File size: 36,031 Bytes
1c40afa
 
 
 
 
 
 
 
 
 
 
 
f16b6cd
1c40afa
 
 
 
 
 
 
 
 
 
 
f16b6cd
 
 
 
1c40afa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f16b6cd
1c40afa
 
f16b6cd
1c40afa
 
 
 
f16b6cd
1c40afa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f16b6cd
 
1c40afa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f16b6cd
 
1c40afa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f16b6cd
 
1c40afa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f16b6cd
1c40afa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f16b6cd
1c40afa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f16b6cd
1c40afa
 
f16b6cd
1c40afa
 
f16b6cd
 
1c40afa
 
 
 
f16b6cd
1c40afa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f16b6cd
 
1c40afa
 
 
 
f16b6cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1c40afa
f16b6cd
 
1c40afa
 
f16b6cd
 
 
 
 
 
 
 
 
 
 
1c40afa
f16b6cd
 
 
 
1c40afa
f16b6cd
 
1c40afa
f16b6cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1c40afa
f16b6cd
 
 
1c40afa
f16b6cd
 
 
1c40afa
f16b6cd
 
1c40afa
f16b6cd
 
 
1c40afa
f16b6cd
 
 
1c40afa
f16b6cd
 
 
 
 
 
 
 
 
1c40afa
f16b6cd
 
 
1c40afa
 
 
f16b6cd
 
 
 
 
 
 
 
 
 
 
 
 
1c40afa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f16b6cd
 
 
 
 
 
 
 
 
 
 
1c40afa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f16b6cd
1c40afa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f16b6cd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
import gradio as gr
import boto3
import json
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import re
import logging
import os
from PIL import Image
import io
import PyPDF2
import secrets

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# AWS credentials for Bedrock API
# For HuggingFace Spaces, set these as secrets in the Space settings
AWS_ACCESS_KEY = os.getenv("AWS_ACCESS_KEY", "")
AWS_SECRET_KEY = os.getenv("AWS_SECRET_KEY", "")
AWS_REGION = os.getenv("AWS_REGION", "us-east-1")

# If we're on HuggingFace Spaces, use the HF_TOKEN (if available)
HF_TOKEN = os.getenv("HF_TOKEN", "")
USE_HF_INFERENCE = bool(HF_TOKEN) and len(HF_TOKEN) > 0

# Initialize Bedrock client if credentials are available
bedrock_client = None
if AWS_ACCESS_KEY and AWS_SECRET_KEY:
    try:
        bedrock_client = boto3.client(
            'bedrock-runtime',
            aws_access_key_id=AWS_ACCESS_KEY,
            aws_secret_access_key=AWS_SECRET_KEY,
            region_name=AWS_REGION
        )
        logger.info("Bedrock client initialized successfully")
    except Exception as e:
        logger.error(f"Failed to initialize Bedrock client: {str(e)}")

# Sample transcript for the demo
SAMPLE_TRANSCRIPT = """*PAR: today I would &-um like to talk about &-um a fun trip I took last &-um summer with my family.
*PAR: we went to the &-um &-um beach [//] no to the mountains [//] I mean the beach actually.
*PAR: there was lots of &-um &-um swimming and &-um sun.
*PAR: we [/] we stayed for &-um three no [//] four days in a &-um hotel near the water [: ocean] [*].
*PAR: my favorite part was &-um building &-um castles with sand.
*PAR: sometimes I forget [//] forgetted [: forgot] [*] what they call those things we built.
*PAR: my brother he [//] he helped me dig a big hole.
*PAR: we saw [/] saw fishies [: fish] [*] swimming in the water.
*PAR: sometimes I wonder [/] wonder where fishies [: fish] [*] go when it's cold.
*PAR: maybe they have [/] have houses under the water.
*PAR: after swimming we [//] I eat [: ate] [*] &-um ice cream with &-um chocolate things on top.
*PAR: what do you call those &-um &-um sprinkles! that's the word.
*PAR: my mom said to &-um that I could have &-um two scoops next time.
*PAR: I want to go back to the beach [/] beach next year."""

def read_pdf(file_path):
    """Read text from a PDF file"""
    try:
        with open(file_path, 'rb') as file:
            pdf_reader = PyPDF2.PdfReader(file)
            text = ""
            for page in pdf_reader.pages:
                text += page.extract_text()
            return text
    except Exception as e:
        logger.error(f"Error reading PDF: {str(e)}")
        return ""

def call_bedrock(prompt):
    """Call the AWS Bedrock API to analyze text using Claude"""
    if not bedrock_client:
        return "AWS credentials not configured. Please set your AWS credentials as secrets in the HuggingFace Space settings."
    
    try:
        body = json.dumps({
            "anthropic_version": "bedrock-2023-05-31",
            "max_tokens": 4096,
            "messages": [
                {
                    "role": "user",
                    "content": prompt
                }
            ],
            "temperature": 0.3,
            "top_p": 0.9
        })

        modelId = 'anthropic.claude-3-sonnet-20240229-v1:0'
        response = bedrock_client.invoke_model(
            body=body, 
            modelId=modelId, 
            accept='application/json', 
            contentType='application/json'
        )
        response_body = json.loads(response.get('body').read())
        return response_body['content'][0]['text']
    except Exception as e:
        logger.error(f"Error in call_bedrock: {str(e)}")
        return f"Error: {str(e)}"

def call_hf_inference(prompt):
    """Simulate LLM output for demo purposes when no API credentials are available"""
    # This function generates a realistic but fake response for demo purposes
    # In a real deployment, you would call an actual LLM API
    
    random_seed = sum(ord(c) for c in prompt) % 1000  # Generate a seed based on prompt
    np.random.seed(random_seed)
    
    # Simulate speech factors with random but reasonable values
    factors = [
        "Difficulty producing fluent speech",
        "Word retrieval issues",
        "Grammatical errors",
        "Repetitions and revisions",
        "Neologisms",
        "Perseveration",
        "Comprehension issues"
    ]
    
    occurrences = np.random.randint(1, 15, size=len(factors))
    percentiles = np.random.randint(30, 95, size=len(factors))
    
    # Simulate CASL scores
    domains = ["Lexical/Semantic", "Syntactic", "Supralinguistic"]
    scores = np.random.randint(80, 115, size=3)
    percentiles_casl = [int(np.interp(s, [70, 85, 100, 115, 130], [2, 16, 50, 84, 98])) for s in scores]
    
    perf_levels = []
    for s in scores:
        if s < 70: perf_levels.append("Well Below Average")
        elif s < 85: perf_levels.append("Below Average")
        elif s < 115: perf_levels.append("Average")
        elif s < 130: perf_levels.append("Above Average")
        else: perf_levels.append("Well Above Average")
    
    # Build response
    response = "## Speech Factor Analysis\n\n"
    for i, factor in enumerate(factors):
        response += f"{factor}: {occurrences[i]}, {percentiles[i]}\n"
    
    response += "\n## CASL-2 Assessment\n\n"
    for i, domain in enumerate(domains):
        response += f"{domain} Skills: Standard Score ({scores[i]}), Percentile Rank ({percentiles_casl[i]}%), Performance Level ({perf_levels[i]})\n"
    
    response += "\n## Other analysis/Best plans of action:\n\n"
    suggestions = [
        "Implement word-finding strategies with semantic cuing",
        "Practice structured narrative tasks with visual supports",
        "Use sentence formulation exercises with increasing complexity",
        "Incorporate self-monitoring techniques during structured conversations",
        "Work on grammatical forms through structured practice"
    ]
    for suggestion in suggestions:
        response += f"- {suggestion}\n"
    
    response += "\n## Explanation:\n\n"
    response += "Based on the analysis, this patient demonstrates moderate word-finding difficulties with compensatory strategies like filler words and repetitions. Their syntactic skills show some weakness in verb tense consistency. Treatment should focus on building vocabulary access, grammatical accuracy, and narrative structure using scaffolded support.\n"
    
    response += "\n## Additional Analysis:\n\n"
    response += "The patient shows relative strengths in conversation maintenance and topic coherence. Consider building on these strengths while addressing specific language formulation challenges. Recommended frequency: 2-3 sessions per week for 10-12 weeks with periodic reassessment."
    
    return response

def parse_analysis_response(response):
    """Parse the LLM response into structured data"""
    lines = response.split('\n')
    data = {
        'Factor': [],
        'Occurrences': [],
        'Severity': []
    }
    
    casl_data = {
        'Domain': ['Lexical/Semantic', 'Syntactic', 'Supralinguistic'],
        'Standard Score': [0, 0, 0],
        'Percentile': [0, 0, 0],
        'Performance Level': ['', '', '']
    }
    
    treatment_suggestions = []
    explanation = ""
    additional_analysis = ""
    
    # Pattern to match factor lines
    factor_pattern = re.compile(r'([\w\s/]+):\s*(\d+)[,\s]+(\d+)')
    
    # Pattern to match CASL data
    casl_pattern = re.compile(r'(\w+/?\w*)\s+Skills:\s+Standard\s+Score\s+\((\d+)\),\s+Percentile\s+Rank\s+\((\d+)%\),\s+Performance\s+Level\s+\(([\w\s]+)\)')
    
    in_suggestions = False
    in_explanation = False
    in_additional = False
    
    for line in lines:
        line = line.strip()
        
        # Skip empty lines
        if not line:
            continue
            
        # Check for factor data
        factor_match = factor_pattern.search(line)
        if factor_match:
            factor = factor_match.group(1).strip()
            occurrences = int(factor_match.group(2))
            severity = int(factor_match.group(3))
            
            data['Factor'].append(factor)
            data['Occurrences'].append(occurrences)
            data['Severity'].append(severity)
            continue
            
        # Check for CASL data
        casl_match = casl_pattern.search(line)
        if casl_match:
            domain = casl_match.group(1)
            score = int(casl_match.group(2))
            percentile = int(casl_match.group(3))
            level = casl_match.group(4)
            
            if "Lexical" in domain:
                casl_data['Standard Score'][0] = score
                casl_data['Percentile'][0] = percentile
                casl_data['Performance Level'][0] = level
            elif "Syntactic" in domain:
                casl_data['Standard Score'][1] = score
                casl_data['Percentile'][1] = percentile
                casl_data['Performance Level'][1] = level
            elif "Supralinguistic" in domain:
                casl_data['Standard Score'][2] = score
                casl_data['Percentile'][2] = percentile
                casl_data['Performance Level'][2] = level
            continue
        
        # Check for section headers
        if "Other analysis/Best plans of action:" in line or "### Recommended Treatment Approaches" in line:
            in_suggestions = True
            in_explanation = False
            in_additional = False
            continue
        elif "Explanation:" in line or "### Clinical Rationale" in line:
            in_suggestions = False
            in_explanation = True
            in_additional = False
            continue
        elif "Additional Analysis:" in line:
            in_suggestions = False
            in_explanation = False
            in_additional = True
            continue
            
        # Add content to appropriate section
        if in_suggestions and line.startswith("- "):
            treatment_suggestions.append(line[2:])  # Remove the bullet point
        elif in_explanation:
            explanation += line + "\n"
        elif in_additional:
            additional_analysis += line + "\n"
    
    return {
        'speech_factors': pd.DataFrame(data),
        'casl_data': pd.DataFrame(casl_data),
        'treatment_suggestions': treatment_suggestions,
        'explanation': explanation,
        'additional_analysis': additional_analysis
    }

def create_plots(speech_factors, casl_data):
    """Create visualizations for the analysis results"""
    
    # Set a professional style for the plots
    plt.style.use('seaborn-v0_8-pastel')
    
    # Create figure with two subplots
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6), dpi=100)
    
    # Plot speech factors - sorted by occurrence count
    if not speech_factors.empty:
        # Sort the dataframe
        speech_factors_sorted = speech_factors.sort_values('Occurrences', ascending=False)
        
        # Custom colors
        speech_colors = ['#4C72B0', '#55A868', '#C44E52', '#8172B3', '#CCB974', '#64B5CD', '#4C72B0']
        
        # Create horizontal bar chart
        bars = ax1.barh(speech_factors_sorted['Factor'], 
               speech_factors_sorted['Occurrences'], 
               color=speech_colors[:len(speech_factors_sorted)])
        
        # Add count labels at the end of each bar
        for bar in bars:
            width = bar.get_width()
            ax1.text(width + 0.3, bar.get_y() + bar.get_height()/2, 
                    f'{width:.0f}', ha='left', va='center')
        
        ax1.set_title('Speech Factors Analysis', fontsize=14, fontweight='bold')
        ax1.set_xlabel('Number of Occurrences', fontsize=11)
        # No y-label needed for horizontal bar chart
        
        # Remove top and right spines
        ax1.spines['top'].set_visible(False)
        ax1.spines['right'].set_visible(False)
    
    # Plot CASL domains
    domain_names = casl_data['Domain']
    y_scores = casl_data['Standard Score']
    
    # Custom color scheme
    casl_colors = ['#4C72B0', '#55A868', '#C44E52']
    
    # Create bars with nice colors
    bars = ax2.bar(domain_names, y_scores, color=casl_colors)
    
    # Add score labels on top of each bar
    for bar in bars:
        height = bar.get_height()
        ax2.text(bar.get_x() + bar.get_width()/2., height + 1,
                f'{height:.0f}', ha='center', va='bottom')
    
    # Add score reference lines
    ax2.axhline(y=100, linestyle='--', color='gray', alpha=0.7, label='Average (100)')
    ax2.axhline(y=85, linestyle=':', color='orange', alpha=0.7, label='Below Average (<85)')
    ax2.axhline(y=115, linestyle=':', color='green', alpha=0.7, label='Above Average (>115)')
    
    # Add labels and title
    ax2.set_title('CASL-2 Standard Scores', fontsize=14, fontweight='bold')
    ax2.set_ylabel('Standard Score', fontsize=11)
    ax2.set_ylim(bottom=0, top=max(130, max(y_scores) + 15))  # Set y-axis limit with some padding
    
    # Add legend
    ax2.legend(loc='upper right', fontsize='small')
    
    # Remove top and right spines
    ax2.spines['top'].set_visible(False)
    ax2.spines['right'].set_visible(False)
    
    plt.tight_layout()
    
    # Save plot to buffer
    buf = io.BytesIO()
    plt.savefig(buf, format='png', bbox_inches='tight')
    buf.seek(0)
    plt.close()
    
    return buf

def create_radar_chart(speech_factors):
    """Create a radar chart for speech factors (percentiles)"""
    
    if speech_factors.empty or 'Severity' not in speech_factors.columns:
        # Create a placeholder image if no data
        plt.figure(figsize=(8, 8))
        plt.text(0.5, 0.5, "No data available for radar chart", 
                ha='center', va='center', fontsize=14)
        plt.axis('off')
        
        buf = io.BytesIO()
        plt.savefig(buf, format='png')
        buf.seek(0)
        plt.close()
        return buf
    
    # Prepare data for radar chart
    categories = speech_factors['Factor'].tolist()
    percentiles = speech_factors['Severity'].tolist()
    
    # Need to repeat first value to close the polygon
    categories = categories + [categories[0]]
    percentiles = percentiles + [percentiles[0]]
    
    # Convert to radians and calculate points
    N = len(categories) - 1  # Subtract 1 for the repeated point
    angles = [n / float(N) * 2 * np.pi for n in range(N)]
    angles += angles[:1]  # Repeat the first angle to close the polygon
    
    # Create the plot
    fig = plt.figure(figsize=(8, 8))
    ax = fig.add_subplot(111, polar=True)
    
    # Draw percentile lines with labels
    plt.xticks(angles[:-1], categories[:-1], size=12)
    ax.set_rlabel_position(0)
    plt.yticks([20, 40, 60, 80, 100], ["20", "40", "60", "80", "100"], color="grey", size=10)
    plt.ylim(0, 100)
    
    # Plot data
    ax.plot(angles, percentiles, linewidth=1, linestyle='solid', color='#4C72B0')
    ax.fill(angles, percentiles, color='#4C72B0', alpha=0.25)
    
    # Add title
    plt.title('Speech Factors Severity (Percentile)', size=15, fontweight='bold', pad=20)
    
    # Save to buffer
    buf = io.BytesIO()
    plt.savefig(buf, format='png', bbox_inches='tight')
    buf.seek(0)
    plt.close()
    
    return buf

def analyze_transcript(transcript, age, gender):
    """Analyze a speech transcript using the Bedrock API or fallback to demo mode"""
    
    # Instructions for the LLM analysis
    instructions = """
    You're a professional Speech-Language Pathologist analyzing this transcription sample.
    
    For your analysis, count occurrences of:
    
    1. Difficulty producing fluent, grammatical speech - Speech that is slow, halting, with pauses while searching for words
    2. Word retrieval issues - Trouble finding specific words, using fillers like "um", circumlocution, or semantically similar substitutions
    3. Grammatical errors - Missing/incorrect function words, verb tense problems, simplified sentences
    4. Repetitions and revisions - Repeating or restating due to word-finding or sentence construction difficulties
    5. Neologisms - Creating nonexistent "new" words
    6. Perseveration - Unintentionally repeating words or phrases
    7. Comprehension issues - Difficulty understanding complex sentences or fast speech
    
    Analyze using the CASL-2 (Comprehensive Assessment of Spoken Language) framework:
    
    Lexical/Semantic Skills:
    - Evaluate vocabulary diversity, word retrieval difficulties, and semantic precision
    - Estimate Standard Score (mean=100, SD=15), percentile rank, and performance level
    
    Syntactic Skills:
    - Assess sentence structure, grammatical accuracy, and syntactic complexity
    - Estimate Standard Score, percentile rank, and performance level
    
    Supralinguistic Skills:
    - Evaluate figurative language use, inferencing, and contextual understanding
    - Estimate Standard Score, percentile rank, and performance level
    
    Format your analysis with:
    1. Speech factor counts with severity percentiles
    2. CASL-2 domain scores with performance levels
    3. Treatment recommendations based on findings
    4. Brief explanation of your rationale
    5. Any additional insights
    """
    
    # Prepare prompt for Claude
    prompt = f"""
    You are an experienced Speech-Language Pathologist analyzing this transcript for a patient who is {age} years old and {gender}.
    
    TRANSCRIPT:
    {transcript}
    
    {instructions}
    
    Be precise, professional, and empathetic in your analysis. Focus on the linguistic patterns present in the sample.
    """
    
    # Call the appropriate API or fallback to demo mode
    if bedrock_client:
        response = call_bedrock(prompt)
    else:
        response = call_hf_inference(prompt)
    
    # Parse the response
    results = parse_analysis_response(response)
    
    # Create visualizations
    plot_image = create_plots(results['speech_factors'], results['casl_data'])
    radar_image = create_radar_chart(results['speech_factors'])
    
    return results, plot_image, radar_image, response

def create_interface():
    """Create the Gradio interface"""
    
    # Define custom theme colors
    primary_color = "#2C7FB8"  # Professional blue
    secondary_color = "#f5f7fa"  # Light background
    accent_color = "#78909C"  # Gray-blue accent
    
    custom_theme = gr.themes.Soft(
        primary_hue=primary_color,
        secondary_hue=secondary_color,
        neutral_hue=accent_color,
        font=[gr.themes.GoogleFont("Inter"), "system-ui", "sans-serif"]
    ).set(
        body_background_fill=secondary_color,
        button_primary_background_fill=primary_color,
        button_primary_background_fill_hover=primary_color,
        button_primary_text_color="white",
        block_title_text_color=primary_color,
        block_label_text_color=accent_color,
        input_background_fill="#FFFFFF",
    )
    
    with gr.Blocks(theme=custom_theme, css="""
        .header {
            text-align: center;
            margin-bottom: 20px;
        }
        .header img {
            max-height: 100px;
            margin-bottom: 10px;
        }
        .container {
            border-radius: 10px;
            padding: 10px;
            margin-bottom: 20px;
        }
        .patient-info {
            background-color: #e3f2fd;
        }
        .speech-sample {
            background-color: #f0f8ff;
        }
        .results-container {
            background-color: #f9f9f9; 
        }
        .viz-container {
            display: flex;
            justify-content: center;
            margin-bottom: 20px;
        }
        .footer {
            text-align: center;
            margin-top: 30px;
            padding: 10px;
            font-size: 0.8em;
            color: #78909C;
        }
        .info-box {
            background-color: #e8f5e9;
            border-left: 4px solid #4CAF50;
            padding: 10px 15px;
            margin-bottom: 15px;
            border-radius: 4px;
        }
        .warning-box {
            background-color: #fff8e1;
            border-left: 4px solid #FFC107;
            padding: 10px 15px;
            border-radius: 4px;
        }
        .markdown-text h3 {
            color: #2C7FB8;
            border-bottom: 1px solid #eaeaea;
            padding-bottom: 5px;
        }
        .evidence-table {
            border-collapse: collapse;
            width: 100%;
        }
        .evidence-table th, .evidence-table td {
            border: 1px solid #ddd;
            padding: 8px;
            text-align: left;
        }
        .evidence-table th {
            background-color: #f5f7fa;
            color: #333;
        }
        .evidence-table tr:nth-child(even) {
            background-color: #f9f9f9;
        }
        .tab-content {
            padding: 15px;
            background-color: white;
            border-radius: 0 0 8px 8px;
            box-shadow: 0 2px 5px rgba(0,0,0,0.05);
        }
    """) as app:
        # Create header with logo
        gr.HTML(
            """
            <div class="header">
                <h1>CASL Speech Analysis Tool</h1>
                <p>A professional assessment tool for Speech-Language Pathologists</p>
            </div>
            """
        )
        
        # Create main layout
        with gr.Row():
            # Left column - Input section
            with gr.Column(scale=1):
                with gr.Box(elem_classes="container patient-info"):
                    gr.Markdown("### Patient Information")
                    
                    with gr.Row():
                        patient_name = gr.Textbox(label="Patient Name", placeholder="Enter patient name")
                        record_id = gr.Textbox(label="Record ID", placeholder="Enter record ID")
                    
                    with gr.Row():
                        age = gr.Number(label="Age", value=8, minimum=1, maximum=120)
                        gender = gr.Radio(["male", "female", "other"], label="Gender", value="male")
                    
                    with gr.Row():
                        assessment_date = gr.Textbox(label="Assessment Date", 
                                           placeholder="MM/DD/YYYY", 
                                           value=None)
                        clinician_name = gr.Textbox(label="Clinician", 
                                          placeholder="Enter clinician name")
                
                with gr.Box(elem_classes="container speech-sample"):
                    gr.Markdown("### Speech Sample")
                    
                    # Add sample button
                    sample_btn = gr.Button("Load Sample Transcript", size="sm")
                    
                    # Transcript input
                    transcript = gr.Textbox(
                        label="Transcript", 
                        placeholder="Paste the speech transcript here...",
                        lines=10
                    )
                    
                    # Add info about transcript format
                    gr.Markdown(
                        """
                        <div class="info-box">
                            <strong>Transcript Format:</strong> Use CHAT format with *PAR: for patient lines. 
                            Mark word-finding with &-um, paraphasias with [*], and provide intended words with [: word].
                        </div>
                        """,
                        elem_classes="markdown-text"
                    )
                    
                    # File upload
                    file_upload = gr.File(
                        label="Or upload a transcript file", 
                        file_types=["text", "txt", "pdf", "rtf"]
                    )
                    
                    # Analysis button
                    analyze_btn = gr.Button("Analyze Speech Sample", variant="primary", size="lg")
                
                # Add API credential section (collapsible)
                with gr.Accordion("API Configuration", open=False):
                    gr.Markdown("""
                    ### AWS Bedrock Credentials
                    
                    For full functionality, add your AWS credentials as environment variables or secrets in your HuggingFace Space:
                    - AWS_ACCESS_KEY
                    - AWS_SECRET_KEY
                    - AWS_REGION (default: us-east-1)
                    
                    Without credentials, the app will run in demo mode with simulated responses.
                    """)
            
            # Right column - Results section
            with gr.Column(scale=1):
                with gr.Box(elem_classes="container results-container"):
                    with gr.Tabs() as tabs:
                        # Summary tab
                        with gr.TabItem("Summary", id=0, elem_classes="tab-content"):
                            with gr.Row():
                                output_image = gr.Image(label="Speech Factors & CASL-2 Scores", 
                                                     show_label=True, elem_classes="viz-container")
                            
                            with gr.Row():
                                radar_chart = gr.Image(label="Severity Profile", 
                                                     show_label=True, elem_classes="viz-container")
                                
                            with gr.Box():
                                gr.Markdown("### Key Findings", elem_classes="markdown-text")
                                speech_factors_table = gr.DataFrame(label="Speech Factors Analysis", 
                                                                  headers=["Factor", "Occurrences", "Severity (Percentile)"],
                                                                  interactive=False)
                                casl_table = gr.DataFrame(label="CASL-2 Assessment", 
                                                        headers=["Domain", "Standard Score", "Percentile", "Performance Level"],
                                                        interactive=False)
                        
                        # Treatment tab
                        with gr.TabItem("Treatment Plan", id=1, elem_classes="tab-content"):
                            gr.Markdown("### Recommended Treatment Approaches", elem_classes="markdown-text")
                            treatment_md = gr.Markdown(elem_classes="treatment-panel")
                            
                            gr.Markdown("### Clinical Rationale", elem_classes="markdown-text")
                            explanation_md = gr.Markdown(elem_classes="panel")
                            
                            with gr.Accordion("Supporting Evidence", open=False):
                                gr.Markdown("""
                                <table class="evidence-table">
                                  <tr>
                                    <th>Factor</th>
                                    <th>Evidence-based Approaches</th>
                                    <th>References</th>
                                  </tr>
                                  <tr>
                                    <td>Word Retrieval</td>
                                    <td>Semantic feature analysis, phonological cueing, word generation tasks</td>
                                    <td>Boyle, 2010; Kiran & Thompson, 2003</td>
                                  </tr>
                                  <tr>
                                    <td>Grammatical Errors</td>
                                    <td>Treatment of Underlying Forms (TUF), Morphosyntactic therapy</td>
                                    <td>Thompson et al., 2003; Ebbels, 2014</td>
                                  </tr>
                                  <tr>
                                    <td>Fluency/Prosody</td>
                                    <td>Rate control, rhythmic cueing, contrastive stress exercises</td>
                                    <td>Ballard et al., 2010; Tamplin & Baker, 2017</td>
                                  </tr>
                                </table>
                                """, elem_classes="markdown-text")
                        
                        # Evidence tab 
                        with gr.TabItem("Language Sample Evidence", id=2, elem_classes="tab-content"):
                            gr.Markdown("### Speech Sample Evidence", elem_classes="markdown-text")
                            
                            # Create a collapsible section for each speech factor
                            factors = ["Word Retrieval", "Grammatical Errors", "Repetitions/Revisions", 
                                       "Fluency", "Neologisms", "Perseveration"]
                            
                            for factor in factors:
                                with gr.Accordion(f"{factor} Examples", open=False):
                                    gr.Markdown(f"Examples of {factor.lower()} will be highlighted here from the transcript.")
                            
                            gr.Markdown("### Transcript Annotations", elem_classes="markdown-text")
                            gr.Markdown("A detailed analysis of the transcript will appear here after processing.")
                            
                        # Full report tab
                        with gr.TabItem("Full Report", id=3, elem_classes="tab-content"):
                            full_analysis = gr.Markdown()
                            
                            # Add PDF export option
                            export_btn = gr.Button("Export Report as PDF", variant="secondary")
                            export_status = gr.Markdown("")
        
        # Footer
        gr.HTML(
            """
            <div class="footer">
                <p>CASL Speech Analysis Tool | For professional use by Speech-Language Pathologists</p>
                <p>Results should be interpreted by qualified professionals in conjunction with other assessment methods.</p>
            </div>
            """
        )
        
        # Define app functions
        
        # Function to load sample transcript
        def load_sample():
            return SAMPLE_TRANSCRIPT
        
        # Handle file upload
        def process_upload(file):
            if file is None:
                return ""
            
            file_path = file.name
            if file_path.endswith('.pdf'):
                return read_pdf(file_path)
            else:
                with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                    return f.read()
        
        # Handle analysis button click
        def on_analyze_click(transcript_text, age_val, gender_val, patient_name_val, record_id_val, clinician_val, assessment_date_val):
            if not transcript_text or len(transcript_text.strip()) < 50:
                return (
                    pd.DataFrame(), 
                    pd.DataFrame(), 
                    None,
                    None,
                    "Error: Please provide a longer transcript for analysis.",
                    "The transcript is too short for meaningful analysis.",
                    "Please provide a speech sample with at least 50 characters."
                )
            
            try:
                results, plot_img, radar_img, full_text = analyze_transcript(transcript_text, age_val, gender_val)
                
                # Format treatment suggestions as markdown
                treatment_text = ""
                for i, suggestion in enumerate(results['treatment_suggestions']):
                    treatment_text += f"- {suggestion}\n"
                
                # Format to include patient metadata in the full report
                patient_info = ""
                if patient_name_val:
                    patient_info += f"**Patient:** {patient_name_val}\n"
                if record_id_val:
                    patient_info += f"**Record ID:** {record_id_val}\n"
                if age_val:
                    patient_info += f"**Age:** {age_val} years\n"
                if gender_val:
                    patient_info += f"**Gender:** {gender_val}\n"
                if assessment_date_val:
                    patient_info += f"**Assessment Date:** {assessment_date_val}\n"
                if clinician_val:
                    patient_info += f"**Clinician:** {clinician_val}\n"
                
                if patient_info:
                    full_report = f"## Patient Information\n\n{patient_info}\n\n## Analysis Report\n\n{full_text}"
                else:
                    full_report = f"## Complete Analysis Report\n\n{full_text}"
                
                # Convert image buffers to PIL images
                plot_img_pil = Image.open(plot_img)
                radar_img_pil = Image.open(radar_img)
                
                return (
                    results['speech_factors'],
                    results['casl_data'],
                    plot_img_pil,
                    radar_img_pil,
                    treatment_text,
                    results['explanation'],
                    full_report
                )
            except Exception as e:
                logger.exception("Error during analysis")
                return (
                    pd.DataFrame(), 
                    pd.DataFrame(), 
                    None,
                    None,
                    f"Error during analysis: {str(e)}",
                    "An error occurred while processing the transcript.",
                    f"Error details: {str(e)}"
                )
        
        # Function to simulate PDF export
        def export_pdf():
            # In a real app, this would generate a PDF
            # For this demo, we'll just return a status message
            return "Report export initiated. The PDF would be downloaded in a production environment."
        
        # Connect UI components to functions
        sample_btn.click(load_sample, outputs=[transcript])
        file_upload.upload(process_upload, file_upload, transcript)
        export_btn.click(export_pdf, outputs=[export_status])
        
        analyze_btn.click(
            on_analyze_click,
            inputs=[
                transcript, age, gender, 
                patient_name, record_id, clinician_name, assessment_date
            ],
            outputs=[
                speech_factors_table,
                casl_table,
                output_image,
                radar_chart, 
                treatment_md,
                explanation_md,
                full_analysis
            ]
        )
    
    return app

# Create requirements.txt file for HuggingFace Spaces
def create_requirements_file():
    requirements = [
        "gradio>=4.0.0",
        "pandas",
        "matplotlib",
        "numpy",
        "Pillow",
        "PyPDF2",
        "boto3"
    ]
    
    with open("requirements.txt", "w") as f:
        for req in requirements:
            f.write(f"{req}\n")

# Create and launch the interface
if __name__ == "__main__":
    # Create requirements.txt for HuggingFace Spaces
    create_requirements_file()
    
    # Check for AWS credentials
    if not AWS_ACCESS_KEY or not AWS_SECRET_KEY:
        print("NOTE: AWS credentials not found. The app will run in demo mode with simulated responses.")
        print("To enable full functionality, set AWS_ACCESS_KEY and AWS_SECRET_KEY environment variables.")
    
    # Launch the Gradio app
    app = create_interface()
    app.launch()