File size: 2,682 Bytes
77b4ec5
 
 
 
 
 
 
 
 
4012100
77b4ec5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4012100
 
f1e2c23
4012100
 
77b4ec5
f1e2c23
 
 
c2f59fe
f1e2c23
 
 
4012100
f1e2c23
 
c2f59fe
 
 
 
 
 
f1e2c23
77b4ec5
ea807ca
f1e2c23
684e707
77b4ec5
 
 
4012100
77b4ec5
 
 
 
4012100
77b4ec5
 
 
4012100
77b4ec5
 
 
4012100
 
77b4ec5
4012100
77b4ec5
 
 
4012100
 
77b4ec5
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import os
import gradio as gr
from groq import Groq
from dotenv import load_dotenv
from datetime import datetime

# Load environment variables
load_dotenv()

# Initialize Groq client properly
client = Groq(api_key=os.getenv("GROQ_API_KEY"))

def load_system_prompt():
    """Load and decode the system prompt from the config file."""
    try:
        current_dir = os.path.dirname(os.path.abspath(__file__))
        config_path = os.path.join(current_dir, "config", "system_prompt.txt")
        
        with open(config_path, 'r') as file:
            return file.read().strip()
    except Exception as e:
        print(f"Error loading system prompt: {str(e)}")
        return "You are a helpful social media research assistant."

# Load system prompt once
SYSTEM_PROMPT = load_system_prompt()

def generate_ai_updates():
    """Generate AI updates using Groq API."""
    try:
        current_date = datetime.now().strftime("%B %d, %Y")
        
        chat_completion = client.chat.completions.create(
            messages=[
                {
                    "role": "system",
                    "content": SYSTEM_PROMPT
                },
                {
                    "role": "user",
                    "content": f"Please analyze and provide the latest AI developments and trends for {current_date}. Follow the verification workflow and create content as specified."
                }
            ],
            model="llama3-70b-8192",
            temperature=0.7,
            max_tokens=2048,
            top_p=0.9,
            frequency_penalty=0.1,
            presence_penalty=0.1
        )
        return chat_completion.choices[0].message.content
    
    except Exception as e:
        import traceback
        print("=== Groq API Error ===")
        print(traceback.format_exc())
        print("======================")
        return "⚠️ Oops! Could not generate AI updates. Please try again later."

# Gradio app
with gr.Blocks(title="AI Content Curator") as demo:
    gr.Markdown("# πŸ“ˆ AI Content Curator")
    gr.Markdown("Click below to generate the latest AI developments and recommendations.")

    with gr.Row():
        with gr.Column():
            submit_btn = gr.Button("πŸš€ Generate Latest AI Updates")
        
        with gr.Column():
            output = gr.Textbox(
                label="πŸ”Ž AI Updates and Content Recommendations",
                lines=20,
                interactive=False,
                placeholder="Waiting for AI updates..."
            )
    
    submit_btn.click(
        fn=generate_ai_updates,
        inputs=[],
        outputs=output
    )

if __name__ == "__main__":
    demo.launch(share=True)