File size: 1,643 Bytes
14ce5a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import base64
from torchvision.transforms.functional import to_pil_image
from openai import OpenAI
import io
import torch
import numpy as np
from PIL import Image
from ...data_processing import tensor_to_pil, pil_to_tensor


class GPTImage:
    def __init__(self, data_params):
        self.client = OpenAI(organization="org-xZTnLOf1k9s04LEoKKjl4jOB")
        self.prompt = "Please recreate the exact same image without any alterations. Please preserve the original resolution (1024*1024)."
        self.data_params = data_params

    def eval(self):
        pass

    def __call__(self, *args, **kwargs):
        return self.forward(*args, **kwargs)

    def forward(self, input):
        results = []
        for image in input:
            image = tensor_to_pil(image, **self.data_params)
            buffer = io.BytesIO()
            image.save(buffer, format="PNG")
            buffer.seek(0)
            image_file = ("image.png", buffer, "image/png")

            try:
                result = self.client.images.edit(
                    model="gpt-image-1",
                    image=image_file,
                    prompt=self.prompt,
                    n=1,
                    size="1024x1024",
                )
                image_base64 = result.data[0].b64_json
                image_bytes = base64.b64decode(image_base64)
                image = Image.open(io.BytesIO(image_bytes))
                results.append(pil_to_tensor(image, **self.data_params))
            except Exception as e:
                print("💥 Unexpected error occurred:", e)
                results.append(None)

        return results, None, None