File size: 5,626 Bytes
3d2214e
 
 
 
 
 
 
 
 
d17746a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d2214e
 
 
 
 
 
 
 
 
 
 
 
 
d17746a
 
 
 
 
 
 
3d2214e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d17746a
 
 
 
3d2214e
 
 
 
 
d17746a
3d2214e
 
 
 
d17746a
3d2214e
 
 
d17746a
3d2214e
 
 
 
 
d17746a
3d2214e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
import os
import openai
import gradio as gr
from bs4 import BeautifulSoup
import requests

openai.api_key = os.getenv("OPENAI_API_KEY")

def extract_text_from_url(url):
    """Extracts text from HTML for static pages. Warns if content is very thin."""
    try:
        resp = requests.get(url, timeout=30, headers={
            "User-Agent": "Mozilla/5.0 (compatible; Bot/1.0)"
        })
        soup = BeautifulSoup(resp.content, "html.parser")
        # Try to get rich descriptive content
        candidates = soup.find_all(['h1','h2','h3','h4','p','span','li'])
        text = ' '.join([c.get_text(strip=True) for c in candidates])
        text = text[:4000]
        if len(text) < 100:
            raise ValueError("Could not extract enough content (site may require JavaScript). Please enter keywords manually.")
        return text
    except Exception as e:
        raise ValueError(f"URL extraction error: {e}")

def extract_keywords(text):
    prompt = f"""
    Extract up to 10 concise, relevant SEO keywords suitable for an automotive advertisement from the following content:
    {text}
    Keywords:
    """
    response = openai.ChatCompletion.create(
        model="gpt-4",
        messages=[{"role": "user", "content": prompt}],
        temperature=0.6,
        max_tokens=100
    )
    # Handles both comma or newline separation
    output = response.choices[0].message.content.strip()
    if ',' in output:
        keywords = output.split(',')
    else:
        keywords = output.split('\n')
    return [kw.strip() for kw in keywords if kw.strip()]

def generate_ad_copy(platform, keywords):
    prompt = f"""
    Create a compelling, SEO-optimized {platform} ad using these keywords: {', '.join(keywords)}. 
    Include a clear and enticing call-to-action.
    """
    response = openai.ChatCompletion.create(
        model="gpt-4",
        messages=[{"role": "user", "content": prompt}],
        temperature=0.7,
        max_tokens=300
    )
    return response.choices[0].message.content.strip()

def generate_ad_image(keywords):
    kw_str = ", ".join(keywords)
    image_prompt = f"Professional automotive social media ad featuring: {kw_str}. Bright visuals, luxury theme, with text overlay space."
    response = openai.Image.create(
        prompt=image_prompt,
        n=1,
        size="512x512"
    )
    image_url = response["data"][0]["url"]
    img_data = requests.get(image_url).content
    img_file = "generated_ad_image.png"
    with open(img_file, "wb") as f:
        f.write(img_data)
    return img_file

def main_workflow(input_mode, url_or_keywords):
    error = None
    keywords = []
    ad_copies = {}
    image_path = None

    if input_mode == "URL":
        try:
            text = extract_text_from_url(url_or_keywords)
            keywords = extract_keywords(text)
        except Exception as e:
            return None, None, None, f"{e}"
    else:
        keywords = [kw.strip() for kw in url_or_keywords.split(",") if kw.strip()]
        if not keywords:
            return None, None, None, "Please provide at least one keyword."
    # Generate ad copies
    platforms = ["Facebook", "Instagram", "X (Twitter)", "Google Search"]
    for platform in platforms:
        ad_copies[platform] = generate_ad_copy(platform, keywords)
    # Generate image
    try:
        image_path = generate_ad_image(keywords)
    except Exception as e:
        error = f"Image generation error: {e}"

    # Save ads to txt
    output_txt = "generated_ads.txt"
    with open(output_txt, "w", encoding="utf-8") as f:
        for platform, content in ad_copies.items():
            f.write(f"--- {platform} Ad Copy ---\n{content}\n\n")
    return keywords, ad_copies, image_path, error

def run_space(input_mode, url, keywords):
    url_or_keywords = url if input_mode == "URL" else keywords
    keywords, ad_copies, image_path, error = main_workflow(input_mode, url_or_keywords)
    ad_previews = ""
    if ad_copies:
        for platform, ad in ad_copies.items():
            ad_previews += f"### {platform}\n{ad}\n\n"
    return (
        keywords,
        ad_previews,
        image_path,
        "generated_ads.txt" if ad_copies else None,
        error
    )

with gr.Blocks() as demo:
    gr.Markdown("# 🚗 Auto Ad Generator\nPaste a car listing URL **or** enter your own keywords, then preview AI-generated ads for each social media platform, plus an auto-generated image!")
    input_mode = gr.Radio(["URL", "Keywords"], value="URL", label="Input Type")
    url_input = gr.Textbox(label="Listing URL", placeholder="https://www.cars.com/listing/...", visible=True)
    kw_input = gr.Textbox(label="Manual Keywords (comma separated)", placeholder="e.g. BMW, used car, sunroof", visible=False)
    submit_btn = gr.Button("Generate Ads")

    gr.Markdown("## Keywords")
    kw_out = gr.JSON(label="Extracted/Provided Keywords")

    gr.Markdown("## Ad Copy Previews")
    ad_out = gr.Markdown(label="Ad Copy Preview")

    gr.Markdown("## Generated Ad Image")
    img_out = gr.Image(label="Generated Ad Image", type="filepath")

    gr.Markdown("## Download Ad Copies")
    file_out = gr.File(label="Download TXT")

    err_out = gr.Textbox(label="Errors", interactive=False)

    def show_hide_fields(choice):
        return (
            gr.update(visible=choice == "URL"),
            gr.update(visible=choice == "Keywords"),
        )

    input_mode.change(show_hide_fields, input_mode, [url_input, kw_input])

    submit_btn.click(
        run_space,
        inputs=[input_mode, url_input, kw_input],
        outputs=[kw_out, ad_out, img_out, file_out, err_out]
    )

demo.launch()