File size: 43,942 Bytes
80c037a
57ccf92
dd9b6c1
57ccf92
7e8adfc
 
 
 
 
 
57ccf92
 
324a80f
7e8adfc
 
 
6fc21ca
 
 
 
 
 
 
 
 
324a80f
 
 
 
 
 
 
6fc21ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57ccf92
7e8adfc
57ccf92
 
 
7e8adfc
57ccf92
7e8adfc
 
 
 
 
b240d03
57ccf92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22a36c5
 
 
57ccf92
 
 
 
 
 
 
 
 
 
 
b240d03
57ccf92
 
 
 
 
 
b240d03
57ccf92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8833692
57ccf92
 
 
 
 
 
 
 
 
 
 
b240d03
57ccf92
 
 
 
 
 
 
 
8ff3020
57ccf92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22a36c5
57ccf92
b240d03
57ccf92
 
b240d03
 
 
 
 
57ccf92
8833692
57ccf92
 
 
ac520ab
b240d03
 
57ccf92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8833692
 
57ccf92
 
 
 
 
 
 
 
22a36c5
 
 
 
57ccf92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e8adfc
b240d03
57ccf92
b240d03
57ccf92
 
324a80f
 
57ccf92
 
 
 
 
324a80f
 
 
 
 
57ccf92
 
324a80f
 
 
 
 
 
 
 
 
b240d03
324a80f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57ccf92
324a80f
 
 
 
 
 
 
8833692
 
324a80f
 
 
 
 
57ccf92
324a80f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57ccf92
 
324a80f
 
 
57ccf92
324a80f
57ccf92
324a80f
57ccf92
 
324a80f
 
 
 
57ccf92
 
324a80f
 
 
 
8833692
 
324a80f
57ccf92
 
 
324a80f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57ccf92
be366f8
57ccf92
be366f8
57ccf92
 
 
 
 
be366f8
 
57ccf92
 
 
 
 
be366f8
 
 
 
 
57ccf92
 
be366f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b240d03
be366f8
b240d03
be366f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57ccf92
 
 
 
be366f8
 
57ccf92
 
 
 
be366f8
 
 
 
 
 
 
57ccf92
 
be366f8
 
 
 
 
 
57ccf92
 
 
be366f8
57ccf92
 
 
 
 
 
 
 
 
 
 
 
 
7e8adfc
 
57ccf92
7e8adfc
 
a5438ee
 
 
 
b240d03
 
 
 
a5438ee
 
 
 
7e8adfc
 
 
 
 
 
22a36c5
7e8adfc
22a36c5
 
 
 
 
 
 
 
 
 
 
 
 
7e8adfc
 
 
 
 
324a80f
7e8adfc
 
 
 
 
 
 
 
 
324a80f
7e8adfc
 
324a80f
7e8adfc
 
8833692
 
 
 
 
7e8adfc
 
 
324a80f
 
8833692
324a80f
 
 
 
 
 
 
 
57ccf92
7e8adfc
324a80f
7e8adfc
 
57ccf92
7e8adfc
324a80f
 
 
 
 
7e8adfc
 
 
 
 
 
 
 
57ccf92
7e8adfc
 
 
 
 
 
 
 
be366f8
 
7e8adfc
 
 
 
5643ef9
 
 
 
 
 
 
be366f8
 
 
 
 
 
 
 
57ccf92
7e8adfc
b240d03
7e8adfc
 
 
57ccf92
7e8adfc
 
 
b240d03
7e8adfc
b240d03
 
7e8adfc
 
 
57ccf92
7e8adfc
57ccf92
 
7e8adfc
e3d6732
57ccf92
265fa55
7e8adfc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a5438ee
57ccf92
7e8adfc
57ccf92
7e8adfc
 
5643ef9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e8adfc
57ccf92
7e8adfc
 
57ccf92
 
7e8adfc
 
 
57ccf92
7e8adfc
 
 
 
 
 
 
 
 
57ccf92
 
7e8adfc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57ccf92
5643ef9
7e8adfc
 
 
 
 
 
 
 
57ccf92
 
a5438ee
7e8adfc
 
 
8833692
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e8adfc
57ccf92
 
 
7e8adfc
8833692
 
 
 
 
 
57ccf92
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
"""
Marketing Image Generator with Gradio MCP Server
Professional AI image generation using Google Imagen4 with marketing review
Deployed on HuggingFace Spaces with built-in MCP server support
"""

import gradio as gr
import os
import logging
import json
import base64
import asyncio
from typing import Dict, Any, Tuple, List
from PIL import Image
import io

# Google Service Account Authentication Setup
def setup_google_credentials():
    """Setup Google credentials from service account JSON"""
    try:
        service_account_json = os.getenv("GOOGLE_SERVICE_ACCOUNT_JSON")
        if service_account_json:
            import tempfile
            from google.oauth2 import service_account
            
            # Clean and parse the JSON credentials
            # Remove common problematic characters
            cleaned_json = service_account_json.strip()
            # Replace common escape sequences
            cleaned_json = cleaned_json.replace('\\n', '\n').replace('\\t', '\t').replace('\\r', '\r')
            
            credentials_dict = json.loads(cleaned_json)
            
            # Create credentials from service account info
            credentials = service_account.Credentials.from_service_account_info(credentials_dict)
            
            # Set the credentials in environment
            with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
                json.dump(credentials_dict, f)
                os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = f.name
            
            print("βœ… Google Cloud service account configured")
            return True
    except Exception as e:
        print(f"⚠️ Google Cloud service account setup failed: {e}")
    
    print("⚠️ Google Cloud service account not found")
    return False

# Setup Google credentials on startup
setup_google_credentials()

# Google AI imports
try:
    import google.generativeai as genai
    from google import genai as genai_sdk
    GEMINI_AVAILABLE = True
except ImportError:
    GEMINI_AVAILABLE = False

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Get API keys - prioritise HuggingFace Secrets
GCP_KEYS = [
    # Hugging Face Secrets (these are the primary ones for HF deployment)
    os.getenv("GOOGLE_API_KEY"),
    os.getenv("GEMINI_API_KEY"), 
    os.getenv("GCP_API_KEY"),
    # Local development keys (fallback for local testing)
    os.getenv("GCP_KEY_1"),
    os.getenv("GCP_KEY_2"), 
    os.getenv("GCP_KEY_3"),
    os.getenv("GCP_KEY_4"),
    os.getenv("GCP_KEY_5"),
    os.getenv("GCP_KEY_6")
]

GOOGLE_API_KEY = next((key for key in GCP_KEYS if key), None)

if GOOGLE_API_KEY and GEMINI_AVAILABLE:
    genai.configure(api_key=GOOGLE_API_KEY)
    logger.info("βœ… Google AI configured successfully")
    logger.info(f"Key source: {[key for key in ['GOOGLE_API_KEY', 'GEMINI_API_KEY', 'GCP_API_KEY'] if os.getenv(key)]}")
else:
    logger.warning(f"❌ Google AI NOT configured - GEMINI_AVAILABLE: {GEMINI_AVAILABLE}, GOOGLE_API_KEY: {'present' if GOOGLE_API_KEY else 'missing'}")

# MCP-enabled functions for Agent1 (Image Generator)
def enhance_prompt_with_gemini(prompt: str, style: str) -> str:
    """
    Use Gemini to enhance the user's prompt for better image generation.
    
    Args:
        prompt (str): The original marketing prompt
        style (str): The desired image style
        
    Returns:
        str: Enhanced prompt optimised for image generation
    """
    if not GEMINI_AVAILABLE or not GOOGLE_API_KEY:
        # Basic enhancement without Gemini
        style_enhancers = {
            "realistic": "photorealistic, high detail, professional photography, sharp focus",
            "artistic": "artistic masterpiece, creative composition, painterly style",
            "cartoon": "cartoon style, vibrant colours, playful, animated character design",
            "photographic": "professional photograph, high quality, detailed, commercial photography",
            "illustration": "digital illustration, clean vector art, modern design"
        }
        enhancer = style_enhancers.get(style.lower(), "high quality, detailed")
        return f"{prompt}, {enhancer}"
    
    try:
        enhancement_prompt = f"""
        You are an expert prompt engineer for AI image generation. Take this marketing prompt and enhance it for optimal results.

        Original prompt: "{prompt}"
        Desired style: "{style}"

        Please provide an enhanced version that:
        1. Maintains the core marketing intent
        2. Adds specific technical details for better image quality
        3. Includes appropriate style descriptors for "{style}" style
        4. Adds professional marketing composition guidance
        5. Keeps the enhanced prompt under 150 words

        Return only the enhanced prompt without explanation.
        """
        
        model = genai.GenerativeModel('gemini-2.5-pro')
        response = model.generate_content(enhancement_prompt)
        enhanced = response.text.strip()
        
        logger.info(f"Gemini enhanced prompt: {enhanced}")
        return enhanced
        
    except Exception as e:
        logger.warning(f"Failed to enhance prompt with Gemini: {e}")
        style_enhancers = {
            "realistic": "photorealistic, high detail, professional photography",
            "artistic": "artistic masterpiece, creative composition",
            "cartoon": "cartoon style, vibrant colours, playful",
            "photographic": "professional photograph, high quality, detailed",
            "illustration": "digital illustration, clean design"
        }
        enhancer = style_enhancers.get(style.lower(), "high quality")
        return f"{prompt}, {enhancer}"

def generate_marketing_image(prompt: str, style: str = "realistic") -> str:
    """
    Generate a professional marketing image using Google Imagen4.
    
    Args:
        prompt (str): Description of the marketing image to generate
        style (str): Art style for the image (realistic, artistic, cartoon, photographic, illustration)
        
    Returns:
        str: JSON string containing image data and metadata
    """
    logger.info(f"🎨 Generating marketing image: {prompt}")
    
    try:
        # Enhance the prompt
        enhanced_prompt = enhance_prompt_with_gemini(prompt, style)
        
        # Try to generate with Google Genai SDK
        if GEMINI_AVAILABLE and GOOGLE_API_KEY:
            try:
                logger.info("🎨 Using Google Genai SDK for image generation")
                logger.info(f"API Key available: {GOOGLE_API_KEY[:10]}...")
                
                # Initialise the genai SDK client  
                client = genai_sdk.Client(api_key=GOOGLE_API_KEY)
                
                # Generate image using Imagen 4.0 with optimised safety filtering
                # Safety configuration: "block_low_and_above" - allows corporate/business content
                # while maintaining essential safety guardrails. This setting significantly
                # improves generation success for financial institutions, corporate brands,
                # and marketing content while blocking genuinely harmful content.
                result = client.models.generate_images(
                    model="imagen-4.0-generate-preview-06-06",
                    prompt=enhanced_prompt,
                    config={
                        "number_of_images": 1,
                        "output_mime_type": "image/png",
                        "safety_filter_level": "block_low_and_above",  # Reduced from default strict filtering
                        "include_safety_attributes": False  # Cleaner response without safety metadata
                    }
                )
                
                # Check if we got a valid response with images
                if result and hasattr(result, 'generated_images') and len(result.generated_images) > 0:
                    generated_image = result.generated_images[0]
                    
                    if hasattr(generated_image, 'image') and hasattr(generated_image.image, 'image_bytes'):
                        # Convert image bytes to base64 data URL
                        image_bytes = generated_image.image.image_bytes
                        img_base64 = base64.b64encode(image_bytes).decode('utf-8')
                        
                        # Determine MIME type from the response or default to PNG
                        mime_type = getattr(generated_image.image, 'mime_type', 'image/png')
                        image_url = f"data:{mime_type};base64,{img_base64}"
                        
                        response_data = {
                            "success": True,
                            "image_url": image_url,
                            "prompt": prompt,
                            "enhanced_prompt": enhanced_prompt,
                            "style": style,
                            "generation_method": "imagen-4.0",
                            "model_name": "imagen-4.0-generate-preview-06-06",
                            "real_ai_generation": True
                        }
                        
                        logger.info("βœ… Successfully generated real AI image with Google SDK!")
                        return json.dumps(response_data)
                        
            except Exception as e:
                logger.error(f"Google SDK generation failed: {e}")
                logger.error(f"Error type: {type(e).__name__}")
                if hasattr(e, 'response'):
                    logger.error(f"Response status: {getattr(e.response, 'status_code', 'unknown')}")
                    logger.error(f"Response text: {getattr(e.response, 'text', 'unknown')}")
        
        # Fallback: Generate a deterministic placeholder
        logger.info("πŸ”„ Using placeholder URL fallback")
        prompt_hash = abs(hash(enhanced_prompt)) % 10000
        image_url = f"https://picsum.photos/seed/{prompt_hash}/1024/1024"
        
        response_data = {
            "success": True,
            "image_url": image_url,
            "prompt": prompt,
            "enhanced_prompt": enhanced_prompt,
            "style": style,
            "generation_method": "placeholder",
            "real_ai_generation": False
        }
        
        return json.dumps(response_data)
        
    except Exception as e:
        logger.error(f"Image generation failed: {e}")
        return json.dumps({
            "success": False,
            "error": f"Generation failed: {str(e)}",
            "prompt": prompt,
            "style": style
        })

def analyse_marketing_image_with_gemini(image_url: str, prompt: str, review_guidelines: str = "") -> str:
    """
    Analyse a generated marketing image using Gemini Vision for quality, relevance, and compliance.
    
    Args:
        image_url (str): URL or base64 data of the generated image
        prompt (str): The original marketing prompt used to generate the image
        review_guidelines (str): Specific guidelines to check against
        
    Returns:
        str: JSON string containing detailed analysis and recommendations
    """
    logger.info(f"πŸ” Analyzing marketing image with Gemini Vision: {prompt[:50]}...")
    
    if not GEMINI_AVAILABLE or not GOOGLE_API_KEY:
        logger.warning("Gemini Vision not available, using fallback analysis")
        return _fallback_image_analysis(prompt, review_guidelines)
    
    try:
        # Create a detailed prompt for marketing image analysis
        analysis_prompt = f"""
        You are a Marketing Image Reviewer analyzing this image generated from: "{prompt}"
        
        CUSTOM REVIEW GUIDELINES (HIGHEST PRIORITY):
        {review_guidelines if review_guidelines.strip() else 'No specific guidelines provided - use standard marketing criteria'}
        
        CRITICAL MARKETING CHECKS:
        1. **Language/Text Requirements**: If guidelines mention "English" or specific language requirements, verify ALL visible text matches
        2. **Brand Compliance**: Check professional appearance, colour consistency, readability
        3. **Marketing Effectiveness**: Assess visual appeal and message clarity
        4. **Target Audience**: Evaluate cultural appropriateness and accessibility
        
        Evaluate on these marketing criteria:
        1. **Marketing Quality**: Visual appeal, composition, professional appearance (0.0 to 1.0)
        2. **Brand/Prompt Compliance**: How well it matches requirements and guidelines (0.0 to 1.0)
        3. **Marketing Effectiveness**: Message clarity, target audience appeal (0.0 to 1.0)
        
        RESPONSE FORMAT:
        Marketing Quality Score: [0.0-1.0]
        Brand Compliance Score: [0.0-1.0]
        Marketing Effectiveness Score: [0.0-1.0]
        
        Guideline Violations: [List specific violations of user guidelines, especially language/text requirements]
        Missing Elements: [List prompt elements missing from image]
        Present Elements: [List prompt elements correctly represented]
        
        Marketing Issues: [Brand compliance, readability, professional appearance problems]
        Language/Text Issues: [Specific text/signage language violations if any]
        Effectiveness Issues: [Marketing message clarity and appeal problems]
        
        Marketing Recommendations: [Specific marketing-focused improvement suggestions]
        
        CRITICAL: If guidelines specify English text/signage, explicitly check and report on ALL visible text language compliance.
        """
        
        # Load the image
        image = None
        if image_url.startswith('data:image'):
            # Handle base64 data URLs
            base64_data = image_url.split(',')[1]
            image_bytes = base64.b64decode(base64_data)
            image = Image.open(io.BytesIO(image_bytes))
        elif image_url.startswith('http'):
            # Handle regular URLs
            import requests
            response = requests.get(image_url, timeout=10)
            if response.status_code == 200:
                image = Image.open(io.BytesIO(response.content))
            else:
                logger.error(f"Failed to fetch image from URL: {response.status_code}")
                return _fallback_image_analysis(prompt, review_guidelines)
        else:
            logger.error("Invalid image URL format")
            return _fallback_image_analysis(prompt, review_guidelines)
        
        if not image:
            logger.error("Could not load image for analysis")
            return _fallback_image_analysis(prompt, review_guidelines)
        
        # Generate analysis using Gemini 2.5 Pro with enhanced multimodal understanding
        model = genai.GenerativeModel('gemini-2.5-pro')
        response = model.generate_content([analysis_prompt, image])
        analysis_text = response.text
        
        # Parse the analysis response
        parsed_result = _parse_gemini_analysis(analysis_text, prompt)
        
        logger.info(f"βœ… Gemini Vision analysis completed with score: {parsed_result.get('overall_score', 0)}")
        return json.dumps(parsed_result)
        
    except Exception as e:
        logger.error(f"Error in Gemini Vision analysis: {str(e)}")
        return _fallback_image_analysis(prompt, review_guidelines)

def _parse_gemini_analysis(analysis_text: str, original_prompt: str) -> Dict[str, Any]:
    """Parse Gemini Vision analysis response"""
    try:
        # Extract scores using regex patterns
        import re
        
        def extract_score(text: str, score_type: str) -> float:
            pattern = rf"{score_type}.*?Score:\s*([0-9]*\.?[0-9]+)"
            match = re.search(pattern, text, re.IGNORECASE)
            if match:
                return float(match.group(1))
            return 0.7  # Default score
        
        def extract_list_items(text: str, section: str) -> List[str]:
            pattern = rf"{section}:\s*\[(.*?)\]"
            match = re.search(pattern, text, re.IGNORECASE | re.DOTALL)
            if match:
                items_text = match.group(1).strip()
                if items_text:
                    return [item.strip() for item in items_text.split(',') if item.strip()]
            return []
        
        # Extract scores
        marketing_quality = extract_score(analysis_text, "Marketing Quality")
        brand_compliance = extract_score(analysis_text, "Brand Compliance")
        marketing_effectiveness = extract_score(analysis_text, "Marketing Effectiveness")
        
        # Calculate overall score
        overall_score = (marketing_quality * 0.4 + brand_compliance * 0.4 + marketing_effectiveness * 0.2)
        
        # Extract lists
        violations = extract_list_items(analysis_text, "Guideline Violations")
        missing_elements = extract_list_items(analysis_text, "Missing Elements")
        present_elements = extract_list_items(analysis_text, "Present Elements")
        marketing_issues = extract_list_items(analysis_text, "Marketing Issues")
        language_issues = extract_list_items(analysis_text, "Language/Text Issues")
        effectiveness_issues = extract_list_items(analysis_text, "Effectiveness Issues")
        recommendations = extract_list_items(analysis_text, "Marketing Recommendations")
        
        # Generate recommendations if none found
        if not recommendations:
            if overall_score > 0.8:
                recommendations = ["Excellent marketing image! Meets all quality standards"]
            elif overall_score > 0.6:
                recommendations = ["Good marketing image with minor improvements needed"]
            else:
                recommendations = ["Image needs significant improvements for marketing use"]
        
        return {
            "success": True,
            "overall_score": round(overall_score, 2),
            "marketing_quality": round(marketing_quality, 2),
            "brand_compliance": round(brand_compliance, 2),
            "marketing_effectiveness": round(marketing_effectiveness, 2),
            "violations": violations,
            "missing_elements": missing_elements,
            "present_elements": present_elements,
            "marketing_issues": marketing_issues,
            "language_issues": language_issues,
            "effectiveness_issues": effectiveness_issues,
            "recommendations": recommendations[:5],  # Limit to top 5
            "analysis_method": "gemini-2.5-pro-vision",
            "model_name": "gemini-2.5-pro",
            "original_prompt": original_prompt
        }
        
    except Exception as e:
        logger.error(f"Error parsing Gemini analysis: {str(e)}")
        return _fallback_image_analysis(original_prompt, "")

def _fallback_image_analysis(prompt: str, review_guidelines: str) -> str:
    """Fallback analysis when Gemini Vision is not available"""
    logger.info("Using fallback text-based analysis")
    
    # Basic analysis based on prompt and guidelines
    word_count = len(prompt.split())
    
    # Simple scoring based on prompt quality
    if word_count < 10:
        quality_score = 0.5
    elif word_count < 20:
        quality_score = 0.7
    else:
        quality_score = 0.8
    
    # Check for marketing keywords
    marketing_keywords = ["professional", "corporate", "business", "marketing", "brand"]
    marketing_score = sum(1 for word in marketing_keywords if word.lower() in prompt.lower()) / len(marketing_keywords)
    
    # Check for language requirements
    language_issues = []
    if "english" in review_guidelines.lower() and "english" not in prompt.lower():
        language_issues.append("English language requirement not specified in prompt")
    
    # Generate recommendations
    recommendations = []
    if marketing_score < 0.2:
        recommendations.append("Add marketing context (e.g., professional, business, corporate)")
    if language_issues:
        recommendations.extend(language_issues)
    if word_count < 10:
        recommendations.append("Expand prompt with more descriptive details")
    
    if not recommendations:
        recommendations = ["Image should meet basic marketing requirements"]
    
    overall_score = (quality_score * 0.6 + marketing_score * 0.4)
    
    return json.dumps({
        "success": True,
        "overall_score": round(overall_score, 2),
        "marketing_quality": round(quality_score, 2),
        "brand_compliance": round(marketing_score, 2),
        "marketing_effectiveness": round(overall_score, 2),
        "violations": language_issues,
        "missing_elements": [],
        "present_elements": [],
        "marketing_issues": [],
        "language_issues": language_issues,
        "effectiveness_issues": [],
        "recommendations": recommendations,
        "analysis_method": "fallback_text",
        "original_prompt": prompt
    })

def generate_and_review_marketing_image(prompt: str, style: str = "realistic", review_guidelines: str = "", max_retries: int = 3, review_threshold: float = 0.8) -> str:
    """
    Complete workflow: Generate a marketing image and provide quality review with iterations.
    
    Args:
        prompt (str): Description of the marketing image to generate
        style (str): Art style for the image (realistic, artistic, cartoon, photographic, illustration)
        review_guidelines (str): Specific guidelines for marketing review
        max_retries (int): Maximum number of generation attempts
        review_threshold (float): Minimum quality score required for approval
        
    Returns:
        str: JSON string containing image, review, and recommendations
    """
    logger.info(f"🎭 Starting complete marketing workflow for: {prompt}")
    logger.info(f"πŸ”„ Max retries: {max_retries}, Review threshold: {review_threshold}")
    
    workflow_history = []
    best_result = None
    best_score = 0.0
    
    try:
        for iteration in range(1, max_retries + 1):
            logger.info(f"πŸ”„ Iteration {iteration} of {max_retries}")
            
            # Step 1: Generate the image
            generation_response = generate_marketing_image(prompt, style)
            generation_data = json.loads(generation_response)
            
            if not generation_data.get("success", False):
                logger.error(f"Generation failed on iteration {iteration}: {generation_data.get('error')}")
                workflow_history.append({
                    "iteration": iteration,
                    "generation_status": "failed",
                    "review_score": 0.0,
                    "error": generation_data.get('error', 'Unknown error')
                })
                continue
            
            # Step 2: Analyse the generated image with Gemini Vision
            image_url = generation_data.get("image_url", "")
            analysis_response = analyse_marketing_image_with_gemini(image_url, prompt, review_guidelines)
            analysis_data = json.loads(analysis_response)
            
            current_score = analysis_data.get("overall_score", 0.0)
            logger.info(f"πŸ“Š Iteration {iteration} score: {current_score:.2f} (threshold: {review_threshold})")
            
            # Record this iteration
            workflow_history.append({
                "iteration": iteration,
                "generation_status": "success",
                "review_score": current_score,
                "review_method": analysis_data.get("analysis_method", "unknown"),
                "recommendations": analysis_data.get("recommendations", [])[:3]  # Top 3 for history
            })
            
            # Create result for this iteration
            current_result = {
                "generation_data": generation_data,
                "analysis_data": analysis_data,
                "image_url": image_url,
                "score": current_score,
                "iteration": iteration
            }
            
            # Keep track of best result
            if current_score > best_score:
                best_result = current_result
                best_score = current_score
            
            # Check if threshold is met
            if current_score >= review_threshold:
                logger.info(f"βœ… Quality threshold met on iteration {iteration}! Score: {current_score:.2f}")
                best_result = current_result  # Use this result since it passes threshold
                break
            else:
                logger.info(f"⚠️ Score {current_score:.2f} below threshold {review_threshold}. {'Retrying...' if iteration < max_retries else 'Max attempts reached.'}")
                
                # Enhance prompt for next iteration based on feedback
                if iteration < max_retries:
                    missing_elements = analysis_data.get("missing_elements", [])
                    violations = analysis_data.get("violations", [])
                    if missing_elements:
                        prompt += f" Including: {', '.join(missing_elements[:2])}"
                    if violations and "english" in review_guidelines.lower():
                        prompt += " with English signage and text"
        
        # Use best result if we have one
        if not best_result:
            return json.dumps({
                "success": False,
                "error": "All generation attempts failed",
                "workflow_history": workflow_history
            })
        
        # Build final result
        final_passed = best_result["score"] >= review_threshold
        final_status = "passed" if final_passed else "needs_improvement"
        
        workflow_result = {
            "success": True,
            "image": {
                "url": best_result["image_url"],
                "data": best_result["image_url"],
                "prompt": prompt,
                "style": style
            },
            "review": {
                "quality_score": best_result["score"],
                "final_status": final_status,
                "iterations": len(workflow_history),
                "passed": final_passed,
                "recommendations": best_result["analysis_data"].get("recommendations", []),
                "analysis_details": best_result["analysis_data"],
                "workflow_history": workflow_history
            },
            "metadata": {
                "generation_method": best_result["generation_data"].get("generation_method", "unknown"),
                "real_ai_generation": best_result["generation_data"].get("real_ai_generation", False),
                "review_method": best_result["analysis_data"].get("analysis_method", "unknown"),
                "workflow_type": "gradio_mcp_server",
                "best_iteration": best_result["iteration"],
                "threshold_met": final_passed
            }
        }
        
        logger.info(f"βœ… Complete marketing workflow successful! Best score: {best_score:.2f} from iteration {best_result['iteration']}")
        return json.dumps(workflow_result)
        
    except Exception as e:
        logger.error(f"Complete workflow failed: {e}")
        return json.dumps({
            "success": False,
            "error": f"Workflow failed: {str(e)}",
            "prompt": prompt,
            "style": style
        })

# Gradio interface functions
def process_generated_image_and_results(api_response_str: str) -> Tuple[Image.Image, str]:
    """Process API response and return image and review text for Gradio display"""
    try:
        response_data = json.loads(api_response_str)
        
        if not response_data.get('success', False):
            error_msg = response_data.get('error', 'Unknown error')
            
            # Add specific documentation links for common errors
            doc_link = ""
            if any(keyword in error_msg.lower() for keyword in ['political', 'timeout', 'stall']):
                doc_link = "\n\nπŸ“– See updated safety configuration: https://huggingface.co/spaces/CognizantAI/marketing-image-generator/blob/main/README.md#content-policy--safety-configuration"
            elif any(keyword in error_msg.lower() for keyword in ['hsbc', 'bank', 'corporate']):
                doc_link = "\n\nπŸ’‘ Note: Financial brands now work better with reduced safety filtering. See: https://huggingface.co/spaces/CognizantAI/marketing-image-generator/blob/main/README.md#improved-content-support"
            elif 'api' in error_msg.lower() or 'key' in error_msg.lower():
                doc_link = "\n\nπŸ“– See API troubleshooting: https://huggingface.co/spaces/CognizantAI/marketing-image-generator/blob/main/README.md#common-issues"
            
            return None, f"❌ Generation failed: {error_msg}{doc_link}"
        
        # Extract image data
        image_info = response_data.get('image', {})
        image_data_b64 = image_info.get('data', image_info.get('url', ''))
        
        image = None
        if image_data_b64:
            try:
                if image_data_b64.startswith('data:image'):
                    # Handle base64 data URLs
                    base64_data = image_data_b64.split(',')[1]
                    image_bytes = base64.b64decode(base64_data)
                    image = Image.open(io.BytesIO(image_bytes))
                elif image_data_b64.startswith('http'):
                    # Handle regular URLs (like picsum.photos)
                    import requests
                    response = requests.get(image_data_b64, timeout=10)
                    if response.status_code == 200:
                        image = Image.open(io.BytesIO(response.content))
                    else:
                        logger.error(f"Failed to fetch image from URL: {response.status_code}")
            except Exception as e:
                logger.error(f"Error processing image: {str(e)}")
        
        # Extract review data
        review_data = response_data.get('review', {})
        analysis_details = review_data.get('analysis_details', {})
        
        if review_data:
            quality_score = review_data.get('quality_score', 0)
            passed = review_data.get('passed', False)
            final_status = review_data.get('final_status', 'unknown')
            recommendations = review_data.get('recommendations', [])
            
            status_emoji = "🟒" if passed else "πŸ”΄"
            
            # Extract metadata about generation and review methods
            metadata = response_data.get('metadata', {})
            generation_method = metadata.get('generation_method', 'unknown')
            review_method = metadata.get('review_method', 'unknown')
            
            generation_info = ""
            if generation_method == "imagen-4.0":
                model_name = metadata.get('model_name', 'imagen-4.0-generate-preview-06-06')
                generation_info = f"🎨 **Generated with**: {model_name} (Real AI)\n"
            elif generation_method == "google-genai-sdk":
                generation_info = "🎨 **Generated with**: Google Imagen 4.0 (Real AI)\n"
            elif generation_method == "placeholder":
                generation_info = "🎨 **Generated with**: Placeholder (Fallback)\n"
            
            review_method_info = ""
            if review_method == "gemini_vision":
                review_method_info = "πŸ” **Reviewed with**: Gemini 2.5 Pro Vision (AI Analysis)\n"
            elif review_method == "fallback_text":
                review_method_info = "πŸ” **Reviewed with**: Text Analysis (Fallback)\n"
            
            # Get detailed scores from analysis
            marketing_quality = analysis_details.get('marketing_quality', quality_score)
            brand_compliance = analysis_details.get('brand_compliance', quality_score)
            marketing_effectiveness = analysis_details.get('marketing_effectiveness', quality_score)
            
            review_text = f"""**πŸ” Marketing Review Results**

{generation_info}{review_method_info}
**Quality Score:** {quality_score:.2f}/1.0
**Status:** {status_emoji} {final_status.upper()}
**Architecture:** Gradio MCP Server

**πŸ“Š Detailed Scores:**
β€’ Marketing Quality: {marketing_quality:.2f}/1.0
β€’ Brand Compliance: {brand_compliance:.2f}/1.0  
β€’ Marketing Effectiveness: {marketing_effectiveness:.2f}/1.0

**πŸ’‘ Recommendations:**
"""
            
            if recommendations:
                for i, rec in enumerate(recommendations[:5], 1):
                    review_text += f"{i}. {rec}\n"
            else:
                review_text += "β€’ Image meets quality standards\n"
                
        else:
            review_text = "⚠️ Review data not available"
        
        return image, review_text
        
    except Exception as e:
        return None, f"❌ Error processing results: {str(e)}"

def gradio_generate_marketing_image(prompt: str, style: str, max_retries: int, review_threshold: float, review_guidelines: str) -> Tuple[Image.Image, str]:
    """Gradio interface wrapper for complete marketing image generation with iterations"""
    if not prompt.strip():
        return None, "⚠️ Please enter a prompt to generate an image."
    
    try:
        # Ensure parameters are correct types
        max_retries = int(max_retries) if max_retries is not None else 3
        review_threshold = float(review_threshold) if review_threshold is not None else 0.8
        review_guidelines = str(review_guidelines) if review_guidelines is not None else ""
        
        logger.info(f"🎯 Starting generation with max_retries={max_retries}, threshold={review_threshold}")
        
        # Use the complete workflow function with iteration parameters
        result_json = generate_and_review_marketing_image(
            prompt=prompt, 
            style=style, 
            review_guidelines=review_guidelines,
            max_retries=max_retries,
            review_threshold=review_threshold
        )
        return process_generated_image_and_results(result_json)
    except Exception as e:
        error_message = f"❌ Error: {str(e)}\n\nπŸ“– For troubleshooting help, see: https://huggingface.co/spaces/CognizantAI/marketing-image-generator/blob/main/README.md#content-policy--safety-configuration"
        logger.error(error_message)
        return None, error_message

# Define suggested prompts
SUGGESTED_PROMPTS = {
    "Modern office team collaboration": ("A modern office space with diverse professionals collaborating around a sleek conference table, natural lighting, professional attire, English signage visible", "realistic"),
    "Executive boardroom meeting": ("Professional executive boardroom with polished conference table, city skyline view, business documents, English presentations on screens", "realistic"),
    "Customer service excellence": ("Professional customer service representative with headset in modern call centre, English signage, clean corporate environment", "realistic"),
    "Product showcase display": ("Clean product showcase on white background with professional lighting, English product labels, minimalist marketing aesthetic", "realistic"),
    "Creative workspace design": ("Creative workspace with colourful design elements, inspirational English quotes on walls, modern furniture, artistic marketing materials", "artistic"),
    "Brand presentation setup": ("Professional brand presentation setup with English branded materials, corporate colours, marketing displays, conference room setting", "realistic")
}

# Create Gradio interface
with gr.Blocks(title="Marketing Image Generator MCP", theme=gr.themes.Soft()) as demo:
    gr.Markdown("""
    # 🎨 Marketing Image Generator
    ### Professional AI image generation with built-in MCP server support
    
    **Gradio MCP Server** β†’ **Google Imagen4** β†’ **Marketing Review** β†’ **Results**
    
    *Pls wait around 1-3 minutes for the image to be made - You'll be hitting Imagen 4 a few times so it takes it sweet time to build. Also if a number a people are using it at the same time, you'll need to be patient since it's going through a a single instance. Have your prompts focused, comprehensive and sensible. Guardrails put in place, but like all AI outputs - it's on you the user to take responsibility)*
    """)
    
    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("### βš™οΈ Configuration")
            
            # Main inputs
            prompt = gr.Textbox(
                label="Describe your marketing image",
                placeholder="e.g., A modern office space with natural lighting, featuring diverse professionals collaborating around a sleek conference table",
                lines=4,
                info="Be specific about the scene, style, mood, and any marketing elements you want to include"
            )
            
            style = gr.Dropdown(
                choices=["realistic", "artistic", "cartoon", "photographic", "illustration"],
                value="realistic",
                label="Art Style",
                info="Choose the artistic style for your generated image"
            )
            
            review_guidelines = gr.Textbox(
                label="πŸ” Marketing Review Guidelines (Optional)",
                placeholder="e.g., All text must be in English only, focus on professional appearance, ensure brand colors are prominent",
                lines=3,
                info="Provide specific marketing guidelines for review"
            )
            
            # Advanced settings
            with gr.Accordion("πŸ”§ Advanced Settings", open=False):
                max_retries = gr.Slider(
                    minimum=1,
                    maximum=5,
                    value=3,
                    step=1,
                    label="Max Retries",
                    info="Maximum number of retry attempts if quality threshold not met"
                )
                
                review_threshold = gr.Slider(
                    minimum=0.0,
                    maximum=1.0,
                    value=0.8,
                    step=0.1,
                    label="Quality Threshold",
                    info="Minimum quality score required for auto-approval"
                )
            
            # Generate button
            generate_btn = gr.Button("πŸš€ Generate Marketing Image", variant="primary", size="lg")
            
            # Status
            gr.Markdown("πŸ” **Mode**: Gradio MCP Server")
            gr.Markdown(f"πŸ”‘ **API Status**: {'βœ… Configured' if GOOGLE_API_KEY else '❌ No API Key'}")
        
        with gr.Column(scale=2):
            # Results display
            gr.Markdown("### πŸ–ΌοΈ Generated Image & Review")
            
            image_output = gr.Image(
                label="Generated Marketing Image",
                type="pil",
                height=400,
                show_download_button=True
            )
            
            review_output = gr.Markdown(
                value="Click **Generate Marketing Image** to create your marketing image with automated review",
                label="Marketing Review Results"
            )
    
    # Suggested prompts section
    gr.Markdown("---")
    gr.Markdown("### πŸ’‘ Suggested Marketing Prompts")
    
    with gr.Row():
        with gr.Column():
            gr.Markdown("**🏒 Professional/Corporate**")
            for prompt_name in ["Modern office team collaboration", "Executive boardroom meeting", "Customer service excellence"]:
                suggested_prompt, suggested_style = SUGGESTED_PROMPTS[prompt_name]
                btn = gr.Button(prompt_name, size="sm")
                btn.click(
                    fn=lambda p=suggested_prompt, s=suggested_style: (p, s),
                    outputs=[prompt, style]
                )
        
        with gr.Column():
            gr.Markdown("**🎨 Creative/Marketing**")
            for prompt_name in ["Product showcase display", "Creative workspace design", "Brand presentation setup"]:
                suggested_prompt, suggested_style = SUGGESTED_PROMPTS[prompt_name]
                btn = gr.Button(prompt_name, size="sm")
                btn.click(
                    fn=lambda p=suggested_prompt, s=suggested_style: (p, s),
                    outputs=[prompt, style]
                )
    
    # Event handlers
    generate_btn.click(
        fn=gradio_generate_marketing_image,
        inputs=[prompt, style, max_retries, review_threshold, review_guidelines],
        outputs=[image_output, review_output],
        show_progress=True
    )
    
    # Footer
    gr.Markdown("""
    ---
    <div style='text-align: center; color: #666; font-size: 0.9rem;'>
        <p>🎨 Marketing Image Generator | Gradio MCP Server</p>
        <p>Image Generation + Marketing Review + MCP API</p>
        <p>πŸ“– <a href="https://huggingface.co/spaces/CognizantAI/marketing-image-generator/blob/main/README.md" target="_blank">Full Documentation & Troubleshooting</a> | MCP Endpoint: <code>/gradio_api/mcp/sse</code></p>
    </div>
    """)

def test_imagen4_models():
    """Test if Imagen 4.0 models are accessible"""
    if not GEMINI_AVAILABLE or not GOOGLE_API_KEY:
        logger.warning("❌ Cannot test Imagen 4.0 - Google AI not configured")
        return []
    
    imagen4_models = [
        "imagen-4.0-generate-preview-06-06",
        "imagen-4.0-fast-generate-preview-06-06", 
        "imagen-4.0-ultra-generate-preview-06-06"
    ]
    
    logger.info("πŸ§ͺ Testing Imagen 4.0 model access...")
    working_models = []
    
    for model_name in imagen4_models:
        try:
            logger.info(f"Testing {model_name}...")
            
            client = genai_sdk.Client(api_key=GOOGLE_API_KEY)
            result = client.models.generate_images(
                model=model_name,
                prompt="A simple red circle",
                config={
                    "number_of_images": 1,
                    "output_mime_type": "image/png"
                }
            )
            
            if result and hasattr(result, 'generated_images') and len(result.generated_images) > 0:
                working_models.append(model_name)
                logger.info(f"βœ… {model_name}: ACCESSIBLE")
            else:
                logger.warning(f"⚠️ {model_name}: No image returned")
                
        except Exception as e:
            error_msg = str(e)
            if "404" in error_msg or "not found" in error_msg.lower():
                logger.warning(f"⚠️ {model_name}: Model not found/available")
            elif "403" in error_msg or "permission" in error_msg.lower():
                logger.warning(f"⚠️ {model_name}: Permission denied")
            else:
                logger.error(f"❌ {model_name}: {error_msg}")
    
    logger.info("=" * 50)
    logger.info(f"πŸŽ‰ ACCESSIBLE IMAGEN 4.0 MODELS: {len(working_models)}")
    for model in working_models:
        logger.info(f"   βœ… {model}")
    
    if not working_models:
        logger.info("   ⚠️ No Imagen 4.0 models accessible - continuing with Imagen 3.0")
    
    logger.info("=" * 50)
    return working_models

if __name__ == "__main__":
    logger.info("πŸš€ Starting Marketing Image Generator with MCP Server")
    logger.info(f"πŸ”‘ Google AI: {'βœ… Configured' if GOOGLE_API_KEY else '❌ No API Key'}")
    logger.info("πŸ”— MCP Server will be available at /gradio_api/mcp/sse")
    
    # Test Imagen 4.0 model access on startup
    if GOOGLE_API_KEY:
        accessible_imagen4_models = test_imagen4_models()
        if accessible_imagen4_models:
            logger.info(f"πŸ’‘ UPGRADE OPPORTUNITY: You can use {accessible_imagen4_models[0]} for better quality!")
    
    demo.launch(mcp_server=True)