Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
from smolagents import CodeAgent, ToolCallingAgent, LiteLLMModel, MCPClient
|
4 |
+
from mcp import StdioServerParameters
|
5 |
+
import base64
|
6 |
+
from PIL import Image
|
7 |
+
import io
|
8 |
+
|
9 |
+
# --- 1. Environment and Model Setup ---
|
10 |
+
# Load environment variables from a .env file (for API keys)
|
11 |
+
load_dotenv()
|
12 |
+
|
13 |
+
# Initialize the language model that our agents will use.
|
14 |
+
# Ensure your GEMINI_API_KEY is set in your .env file.
|
15 |
+
model = LiteLLMModel(
|
16 |
+
model_id="gemini/gemini-2.0-flash-exp",
|
17 |
+
api_key=os.getenv("GEMINI_API_KEY")
|
18 |
+
)
|
19 |
+
|
20 |
+
# --- 2. MCP Server Configuration ---
|
21 |
+
# Define the connection parameters for your MCP servers.
|
22 |
+
# These commands will run in the background to connect to your deployed tools.
|
23 |
+
kgb_server_parameters = StdioServerParameters(
|
24 |
+
command="npx",
|
25 |
+
args=[
|
26 |
+
"mcp-remote",
|
27 |
+
"https://agents-mcp-hackathon-kgb-mcp.hf.space/gradio_api/mcp/sse",
|
28 |
+
"--transport",
|
29 |
+
"sse-only"],
|
30 |
+
)
|
31 |
+
|
32 |
+
t2i_server_parameters = StdioServerParameters(
|
33 |
+
command="npx",
|
34 |
+
args=[
|
35 |
+
"mcp-remote",
|
36 |
+
"https://agents-mcp-hackathon-t2i.hf.space/gradio_api/mcp/sse",
|
37 |
+
"--transport",
|
38 |
+
"sse-only"],
|
39 |
+
)
|
40 |
+
|
41 |
+
server_parameters = [kgb_server_parameters, t2i_server_parameters]
|
42 |
+
|
43 |
+
# --- 3. Main Execution Block ---
|
44 |
+
# We use the MCPClient as a context manager to handle the lifecycle of the servers.
|
45 |
+
with MCPClient(server_parameters) as mcp:
|
46 |
+
print("Connecting to MCP servers and fetching tools...")
|
47 |
+
all_tools = mcp.get_tools()
|
48 |
+
print(f"Found {len(all_tools)} tools.")
|
49 |
+
|
50 |
+
# --- 4. Tool Integration ---
|
51 |
+
# Find our specific tools from the list provided by the MCP servers.
|
52 |
+
# We will look for them by name.
|
53 |
+
knowledge_tool = next((tool for tool in all_tools if "knowledge_graph" in tool.name.lower()), None)
|
54 |
+
image_tool = next((tool for tool in all_tools if "text_to_image" in tool.name.lower()), None)
|
55 |
+
|
56 |
+
if not knowledge_tool:
|
57 |
+
print("Warning: Knowledge graph tool not found.")
|
58 |
+
if not image_tool:
|
59 |
+
print("Warning: Text-to-image tool not found.")
|
60 |
+
|
61 |
+
writer_tools = [knowledge_tool] if knowledge_tool else []
|
62 |
+
illustrator_tools = [image_tool] if image_tool else []
|
63 |
+
|
64 |
+
# --- 5. Agent Definitions ---
|
65 |
+
# We define our agent team, now equipped with the tools from your MCP servers.
|
66 |
+
|
67 |
+
# The Writer Agent
|
68 |
+
writer_agent = ToolCallingAgent(
|
69 |
+
tools=writer_tools,
|
70 |
+
model=model,
|
71 |
+
name="writer",
|
72 |
+
description="A creative agent that writes short stories. It can use a knowledge graph tool to research topics for inspiration."
|
73 |
+
)
|
74 |
+
|
75 |
+
# The Illustrator Agent
|
76 |
+
illustrator_agent = ToolCallingAgent(
|
77 |
+
tools=illustrator_tools,
|
78 |
+
model=model,
|
79 |
+
name="illustrator",
|
80 |
+
description="An artist agent that creates illustrations based on a descriptive prompt using a text-to-image tool."
|
81 |
+
)
|
82 |
+
|
83 |
+
# The Director Agent
|
84 |
+
director_agent = CodeAgent(
|
85 |
+
tools=[],
|
86 |
+
model=model,
|
87 |
+
managed_agents=[writer_agent, illustrator_agent],
|
88 |
+
system_prompt="""
|
89 |
+
You are the Director of Agentic Storycrafter, a creative team. Your job is to manage the writer and illustrator agents to create a story with an illustration.
|
90 |
+
|
91 |
+
Here is your workflow:
|
92 |
+
1. Receive a user's prompt for a story.
|
93 |
+
2. Call the `writer` agent to write a story based on the user's prompt.
|
94 |
+
3. After the story is written, create a short, descriptive prompt for an illustration that captures the essence of the story.
|
95 |
+
4. Call the `illustrator` agent with this new prompt to generate an image. The result will be a dictionary containing image data.
|
96 |
+
5. Return a dictionary containing both the final 'story' and the 'image_data' from the illustrator.
|
97 |
+
"""
|
98 |
+
)
|
99 |
+
|
100 |
+
# --- 6. The Creative Workflow ---
|
101 |
+
if __name__ == "__main__":
|
102 |
+
user_prompt = "a story about a wise old owl living in a library of forgotten books"
|
103 |
+
|
104 |
+
print(f"\n--- Director's Task ---")
|
105 |
+
print(f"Prompt: {user_prompt}\n")
|
106 |
+
|
107 |
+
# The director now runs the full workflow.
|
108 |
+
final_output = director_agent.run(f"Create a story and illustration for the following prompt: {user_prompt}")
|
109 |
+
|
110 |
+
print("\n--- Agentic Storycrafter Result ---")
|
111 |
+
|
112 |
+
# The output from the director is code that needs to be executed to get the result
|
113 |
+
result_dict = eval(final_output)
|
114 |
+
|
115 |
+
story = result_dict.get("story")
|
116 |
+
image_data = result_dict.get("image_data")
|
117 |
+
|
118 |
+
print("\n--- STORY ---")
|
119 |
+
print(story)
|
120 |
+
|
121 |
+
if image_data and 'b64_json' in image_data:
|
122 |
+
print("\n--- ILLUSTRATION ---")
|
123 |
+
print("Illustration created. Saving to 'story_illustration.png'")
|
124 |
+
# Decode the base64 string and save it as an image file
|
125 |
+
try:
|
126 |
+
img_bytes = base64.b64decode(image_data['b64_json'])
|
127 |
+
img = Image.open(io.BytesIO(img_bytes))
|
128 |
+
img.save("story_illustration.png")
|
129 |
+
print("Image saved successfully.")
|
130 |
+
except Exception as e:
|
131 |
+
print(f"Error saving image: {e}")
|
132 |
+
else:
|
133 |
+
print("\n--- ILLUSTRATION ---")
|
134 |
+
print("No illustration was generated.")
|
135 |
+
|