File size: 1,946 Bytes
55b2cb1
2cf117f
55b2cb1
2cf117f
 
 
55b2cb1
2cf117f
 
 
 
 
 
 
 
 
 
 
 
 
55b2cb1
 
 
2cf117f
 
 
55b2cb1
 
 
2cf117f
 
55b2cb1
 
 
 
 
2cf117f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55b2cb1
2cf117f
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
from fastapi import FastAPI, Form
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from PIL import Image
from io import BytesIO
import base64
import torch

from transformers import Qwen2VLProcessor
from gui_actor.modeling import Qwen2VLForConditionalGenerationWithPointer
from gui_actor.inference import inference

app = FastAPI()

# Load model
model_name = "microsoft/GUI-Actor-2B-Qwen2-VL"
processor = Qwen2VLProcessor.from_pretrained(model_name)
tokenizer = processor.tokenizer
model = Qwen2VLForConditionalGenerationWithPointer.from_pretrained(
    model_name,
    torch_dtype=torch.float32,  # use float32 for CPU
    device_map=None,            # don't map to cuda
    attn_implementation=None,
).eval()


class Base64Request(BaseModel):
    image_base64: str
    instruction: str


@app.post("/click/base64")
async def predict_click_base64(data: Base64Request):
    # Decode base64 to image
    image_data = base64.b64decode(data.image_base64.split(",")[-1])
    pil_image = Image.open(BytesIO(image_data)).convert("RGB")

    conversation = [
        {
            "role": "system",
            "content": [
                {
                    "type": "text",
                    "text": "You are a GUI agent. You are given a task and a screenshot of the screen. You need to perform a series of pyautogui actions to complete the task.",
                }
            ]
        },
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "image": pil_image,
                },
                {
                    "type": "text",
                    "text": data.instruction,
                },
            ],
        },
    ]

    pred = inference(conversation, model, tokenizer, processor, use_placeholder=True, topk=3)
    px, py = pred["topk_points"][0]
    return JSONResponse(content={"x": round(px, 4), "y": round(py, 4)})