File size: 4,514 Bytes
cdf597f
 
 
dc400fa
cdf597f
 
 
dc400fa
cdf597f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
777de86
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import os
from groq import Groq
import gradio as gr



# Access the API key from Colab Secrets
groq_api_key = os.environ.get("GROQ_API_KEY")
if not groq_api_key:
    raise ValueError("GROQ_API_KEY not found in Colab Secrets. Please add it.")

client = Groq(api_key=groq_api_key)

# IMPORTANT: Check Groq's model deprecation page (https://console.groq.com/docs/deprecations)
# for the latest available and recommended models.
# As of July 2025, I'll use a likely available one.
# You might need to update this model_name if it changes on Groq's side.
# Recommended current models: "llama-3.1-8b-instant" or "llama-3.3-70b-versatile"
DEFAULT_GROQ_MODEL = "llama-3.1-8b-instant" 

def get_groq_response(messages: list, model_name: str = DEFAULT_GROQ_MODEL, temperature: float = 0.7):
    """
    Generates a response from Groq given a list of messages.
    Args:
        messages (list): A list of message dictionaries (e.g., [{"role": "user", "content": "Hello"}]).
        model_name (str): The Groq model to use.
        temperature (float): Controls the randomness of the output.
    Returns:
        str: The generated response content.
    """
    try:
        chat_completion = client.chat.completions.create(
            messages=messages,
            model=model_name,
            temperature=temperature,
            max_tokens=1024, # Adjust as needed
            stream=False,
        )
        return chat_completion.choices[0].message.content
    except Exception as e:
        print(f"Error calling Groq API: {e}")
        return "An error occurred while generating a response. Please check your API key and Groq console."


# --- Simple Conversational Agent Class ---
class SimpleGroqAgent:
    def __init__(self, model_name: str = DEFAULT_GROQ_MODEL, temperature: float = 0.7):
        self.model_name = model_name
        self.temperature = temperature
        # Initialize with a system message to define the agent's persona
        self.conversation_history = [{"role": "system", "content": "You are a helpful and concise AI assistant powered by Groq. You respond quickly."}]

    def chat(self, user_input: str) -> str:
        """
        Processes user input and returns an AI response, updating conversation history.
        """
        # Add user's message to history
        self.conversation_history.append({"role": "user", "content": user_input})
        
        # Get response from Groq using the current history
        response_content = get_groq_response(self.conversation_history, self.model_name, self.temperature)
        
        # Add assistant's response to history
        self.conversation_history.append({"role": "assistant", "content": response_content})
        
        return response_content

    def reset_conversation(self):
        """Resets the conversation history."""
        self.conversation_history = [{"role": "system", "content": "You are a helpful and concise AI assistant powered by Groq. You respond quickly."}]

        # --- Gradio Interface Setup ---

        # Instantiate your agent
groq_agent = SimpleGroqAgent()

def chat_interface_function(message, history):
    """
    Function to be used by Gradio's ChatInterface.
    'history' is a list of [user_message, bot_message] pairs.
    Gradio's Chatbot component manages the visual history,
    but our SimpleGroqAgent manages the history sent to the LLM.
    """
    # The SimpleGroqAgent already manages its own internal history.
    # We just need to pass the current message to it.
    response = groq_agent.chat(message)
    return response

# Create the Gradio ChatInterface
# In Colab, share=True will generate a public URL.
demo = gr.ChatInterface(
    fn=chat_interface_function,
    chatbot=gr.Chatbot(height=300),
    textbox=gr.Textbox(placeholder="Ask me anything, powered by Groq!", container=False, scale=7),
    title="⚡️ Groq-Powered AI Assistant in Colab ⚡️",
    description=f"Ask a question and get a lightning-fast response from Groq's {DEFAULT_GROQ_MODEL} LLM!",
    theme="soft",
    examples=[
        "Tell me a short, funny story.",
        "Explain the concept of neural networks in simple terms.",
        "What are the main components of a computer?",
        "Write a creative short paragraph about a futuristic city."
    ],
)

# Launch the Gradio app
# This will provide a public URL you can click to access the UI.
# It will also run the UI directly in the Colab output.
print(f"Launching Gradio demo with model: {DEFAULT_GROQ_MODEL}")
demo.launch(share=True)