Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,495 Bytes
3014996 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
import PIL
import gradio as gr
from agents.all_agents import get_master_agent
from llm import get_default_model
from smolagents.gradio_ui import stream_to_gradio
gr.set_static_paths(paths=["images/"])
master_agent = get_master_agent(get_default_model())
print(master_agent)
def chat_interface_fn(input_request, history):
message = input_request["text"]
image_paths = input_request["files"]
print(message)
print(image_paths)
print(history)
prompt = f"""
You are given a message and possibly some images.
The images are already loaded in the variable "images".
The message is:
{message}
You can use the following tools to perform tasks on the image:
- object_detection_tool: to detect objects in an image, you must provide the image to the agents.
- object_detection_model_retriever: to retrieve object detection models, you must provide the type of class that a model can detect.
If you don't know what model to use, you can use the object_detection_model_retriever tool to retrieve the model.
Never assume an invented model name, always use the model name provided by the object_detection_model_retriever tool.
Whenever you need to use a tool, first write the tool call in the form of a code block.
Then, wait for the tool to return the result.
Then, use the result to perform the task. Step by step.
Before your final answer, if you have any images to show, store them in the "final_images" variable.
Always return a text of what you did.
"""
if image_paths is not None and len(image_paths) > 0:
images = []
resized_images = []
for image_path in image_paths:
image = PIL.Image.open(image_path)
# Get original dimensions
width, height = image.size
# Calculate new dimensions while maintaining aspect ratio
if width > 1200 or height > 800:
ratio = min(1200 / width, 800 / height)
new_width = int(width * ratio)
new_height = int(height * ratio)
resized_image = image.resize(
(new_width, new_height), PIL.Image.Resampling.LANCZOS
)
resized_images.append(resized_image)
images.append(image)
for message in stream_to_gradio(
master_agent,
task=prompt,
task_images=resized_images,
additional_args={"images": images},
reset_agent_memory=False,
):
history.append(message)
yield history, None
final_images = master_agent.python_executor.state.get("final_images", [])
yield history, final_images
with gr.Blocks() as demo:
output_gallery = gr.Gallery(label="Output Gallery", type="pil")
gr.ChatInterface(
chat_interface_fn,
type="messages",
multimodal=True,
textbox=gr.MultimodalTextbox(
{
"text": "Draw a bbox around each car in the image",
"files": [
{
"url": "https://upload.wikimedia.org/wikipedia/commons/5/51/Crossing_the_Hudson_River_on_the_George_Washington_Bridge_from_Fort_Lee%2C_New_Jersey_to_Manhattan%2C_New_York_%287237796950%29.jpg",
"path": "images/image.jpg",
"name": "image.jpg",
}
],
}
),
additional_outputs=[output_gallery],
)
demo.launch()
|