AgenticI / app.py
VirtualOasis's picture
Update app.py
989ef70 verified
raw
history blame
4.81 kB
import os
from dotenv import load_dotenv
from smolagents import CodeAgent, ToolCallingAgent, LiteLLMModel, MCPClient
from mcp import StdioServerParameters
import base64
from PIL import Image
import io
# --- 1. Environment and Model Setup ---
# Load environment variables from a .env file (for API keys)
load_dotenv()
# Initialize the language model that our agents will use.
# Ensure your GEMINI_API_KEY is set in your .env file.
model = LiteLLMModel(
model_id="gemini/gemini-2.0-flash-exp",
api_key=os.getenv("GEMINI_API_KEY")
)
# --- 2. MCP Server Configuration ---
# Define the connection parameters for your MCP servers.
kgb_server_parameters = StdioServerParameters(
command="npx",
args=[
"mcp-remote",
"https://agents-mcp-hackathon-kgb-mcp.hf.space/gradio_api/mcp/sse",
"--transport",
"sse-only"],
)
t2i_server_parameters = StdioServerParameters(
command="npx",
args=[
"mcp-remote",
"https://agents-mcp-hackathon-t2i.hf.space/gradio_api/mcp/sse",
"--transport",
"sse-only"],
)
server_parameters = [kgb_server_parameters, t2i_server_parameters]
# --- 3. Main Application Logic ---
def run_storycrafter():
# Instantiate the MCPClient *before* the 'with' block, as per the working example.
mcp = MCPClient(server_parameters)
# Use the created MCPClient instance as a context manager.
with mcp:
print("Connecting to MCP servers and fetching tools...")
# Get all available tools from all connected MCP servers.
all_tools = mcp.get_tools()
print(f"Found {len(all_tools)} tools.")
if not all_tools:
print("Warning: No tools were loaded from the MCP servers. Agents will have limited capabilities.")
# --- 4. Agent Definitions ---
# The Writer Agent is given all available tools.
writer_agent = ToolCallingAgent(
tools=all_tools,
model=model,
name="writer",
description="A creative agent that writes short stories. It can use a knowledge graph tool to research topics for inspiration."
)
# The Illustrator Agent is also given all available tools.
illustrator_agent = ToolCallingAgent(
tools=all_tools,
model=model,
name="illustrator",
description="An artist agent that creates illustrations based on a descriptive prompt using a text-to-image tool."
)
# The Director Agent orchestrates the other two agents.
director_agent = CodeAgent(
tools=[],
model=model,
managed_agents=[writer_agent, illustrator_agent],
system_prompt="""
You are the Director of Agentic Storycrafter, a creative team. Your job is to manage the writer and illustrator agents to create a story with an illustration.
Here is your workflow:
1. Receive a user's prompt for a story.
2. Call the `writer` agent to write a story based on the user's prompt.
3. After the story is written, create a short, descriptive prompt for an illustration that captures the essence of the story.
4. Call the `illustrator` agent with this new prompt to generate an image. The result will be a dictionary containing image data.
5. Return a dictionary containing both the final 'story' and the 'image_data' from the illustrator.
"""
)
# --- 5. The Creative Workflow ---
user_prompt = "a story about a wise old owl living in a library of forgotten books"
print(f"\n--- Director's Task ---")
print(f"Prompt: {user_prompt}\n")
final_output = director_agent.run(f"Create a story and illustration for the following prompt: {user_prompt}")
print("\n--- Agentic Storycrafter Result ---")
result_dict = eval(final_output)
story = result_dict.get("story")
image_data = result_dict.get("image_data")
print("\n--- STORY ---")
print(story)
if image_data and 'b64_json' in image_data:
print("\n--- ILLUSTRATION ---")
print("Illustration created. Saving to 'story_illustration.png'")
try:
img_bytes = base64.b64decode(image_data['b64_json'])
img = Image.open(io.BytesIO(img_bytes))
img.save("story_illustration.png")
print("Image saved successfully.")
except Exception as e:
print(f"Error saving image: {e}")
else:
print("\n--- ILLUSTRATION ---")
print("No illustration was generated.")
# --- 6. Execution Start ---
if __name__ == "__main__":
run_storycrafter()