""" 🚀 The Content Creator's Journey (AI-Powered Edition) ================================================== This version replaces all mocked functions with real generative AI models for image analysis, content generation, and translation. - Stage 1: Inspiration Hub (Summarization AI) - Stage 2: Creative Studio (Image-to-Text and Text Generation AI) - Stage 3: Globalization Suite (Translation AI) Author: Gemini Date: July 4, 2025 """ import os import re import gradio as gr import arxiv import nltk from transformers import pipeline from youtube_transcript_api import YouTubeTranscriptApi, NoTranscriptFound from youtube_transcript_api.formatters import TextFormatter from PIL import Image # --- Model & Pipeline Setup (Lazy Loading) --- # Dictionary to hold our models, loaded only when needed. models = {} def get_pipeline(task, model_name): """Initializes and returns a pipeline, caching it for reuse.""" if model_name not in models: print(f"🔄 Initializing {task} pipeline with model {model_name}...") models[model_name] = pipeline(task, model=model_name) print(f"✅ {model_name} loaded.") return models[model_name] # --- Stage 1: The Spark (Inspiration Hub) --- # This section already uses a real summarization model, so no changes are needed here. # (Functions search_arxiv_papers and summarize_youtube_from_url are omitted for brevity but remain the same) # ... (Previous code for Stage 1 remains here) ... # --- Stage 2: The Craft (Creative Studio) --- def analyze_image_with_ai(image: Image.Image) -> (str, dict): """Uses a real AI model to generate a description of the image.""" captioner = get_pipeline("image-to-text", "Salesforce/blip-image-captioning-large") description = captioner(image)[0]['generated_text'] analysis = {"description": description} report = ( f"**🎨 AI Vision Analysis:**\n\n" f"- **Image Content:** {description}" ) return report, analysis def generate_creative_content_with_ai(style: str, audience: str, image_analysis: dict, custom_prompt: str) -> (str, str): """Uses a real LLM to generate content based on a detailed prompt.""" generator = get_pipeline("text-generation", "gpt2") image_desc = image_analysis.get("description", "a visual scene") # Create a detailed prompt for the LLM prompt = ( f"Create a '{style}' for a '{audience}' audience. " f"The content should be inspired by the following scene: '{image_desc}'. " f"Follow this specific instruction: '{custom_prompt if custom_prompt else 'Be creative and engaging'}'.\n\n" f"Here is the content:" ) # Generate text and clean it up generated_outputs = generator(prompt, max_length=150, num_return_sequences=1, pad_token_id=generator.tokenizer.eos_token_id) generated_text = generated_outputs[0]['generated_text'] # Clean the output by removing the initial prompt clean_text = generated_text.replace(prompt, "").strip() # The analytics are now informational rather than predictive analytics_report = ( f"**📊 Generation Details:**\n\n" f"- **Model Used:** gpt2\n" f"- **Core Prompt:** Based on a photo of '{image_desc[:40]}...'" ) return clean_text, analytics_report def run_creative_studio(uploaded_image, style, audience, custom_prompt): """Interface function to run the full AI-powered 'Craft' stage.""" if uploaded_image is None: return "❌ Please upload an image.", "", "" try: image = uploaded_image analysis_report, image_analysis = analyze_image_with_ai(image) generated_text, analytics = generate_creative_content_with_ai(style, audience, image_analysis, custom_prompt) return analysis_report, generated_text, analytics except Exception as e: return f"⚠️ Error: {e}", "", "" # --- Stage 3: The Reach (Globalization Suite) --- def translate_content_with_ai(text: str, languages: list) -> str: """Translates content using real AI models.""" if not text: return "❌ Please provide text to translate." if not languages: return "❌ Please select at least one language." lang_model_map = { "German 🇩🇪": "Helsinki-NLP/opus-mt-en-de", "Spanish 🇪🇸": "Helsinki-NLP/opus-mt-en-es", "Japanese 🇯🇵": "Helsinki-NLP/opus-mt-en-jap", } translations = [f"### 🌍 Translated Content\n"] for lang_name in languages: model_name = lang_model_map.get(lang_name) if model_name: translator = get_pipeline("translation", model_name) translated_text = translator(text)[0]['translation_text'] translations.append(f"**{lang_name.upper()} VERSION:**\n\n{translated_text}") return "\n\n---\n\n".join(translations) # --- Full Gradio UI --- # The UI structure remains the same, but the functions it calls are now AI-powered. # The code for create_ui(), search_arxiv_papers, and summarize_youtube_from_url is omitted here # for brevity, as it doesn't change from the previous version. You can just plug the # new functions above into your existing app.py file. # --- Helper functions from previous version to make the file runnable --- def search_arxiv_papers(topic: str) -> str: if not topic: return "❌ Please enter a topic to search." summarizer = get_pipeline("summarization", "sshleifer/distilbart-cnn-12-6") search = arxiv.Search(query=topic, max_results=3, sort_by=arxiv.SortCriterion.Relevance) results = [f"**📄 {res.title}**\n\n**Summary:** {summarizer(res.summary.replace(' ', ' '), max_length=80, min_length=20, do_sample=False)[0]['summary_text']}\n\n**🔗 [Read Paper]({res.pdf_url})**" for res in search.results()] return "\n\n---\n\n".join(results) if results else "No papers found." def summarize_youtube_from_url(video_url: str) -> str: if not video_url: return "❌ Please enter a YouTube URL." video_id_match = re.search(r"(?:v=|\/)([0-9A-Za-z_-]{11}).*", video_url) if not video_id_match: return "❌ Invalid YouTube URL." video_id = video_id_match.group(1) try: transcript_list = YouTubeTranscriptApi.get_transcript(video_id) transcript_text = " ".join([d['text'] for d in transcript_list]) if len(transcript_text) < 200: return "Transcript too short." summarizer = get_pipeline("summarization", "sshleifer/distilbart-cnn-12-6") summary = summarizer(transcript_text, max_length=100, min_length=30, do_sample=False) return f"**✅ Summary:**\n\n{summary[0]['summary_text']}" except NoTranscriptFound: return "❌ No transcript available." except Exception as e: return f"⚠️ Error: {e}" def create_ui(): css = """.gradio-container { font-family: 'Inter', sans-serif; background: #f5f7fa; } .tab-item { background: white; border-radius: 12px; padding: 25px; border: 1px solid #e0e0e0; } footer { display: none !important }""" with gr.Blocks(theme=gr.themes.Base(), css=css, title="The Content Creator's Journey") as app: gr.Markdown("""
From a spark of an idea to a global message, in three stages.