File size: 2,306 Bytes
0103298
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f75a94
 
0103298
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff22d64
0103298
 
1f75a94
 
0103298
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import os
import cv2
import torch
import warnings
import numpy as np
import gradio as gr
import paddlehub as hub
from PIL import Image
from methods.img2pixl import pixL
from examples.pixelArt.combine import combine
from methods.media import Media

warnings.filterwarnings("ignore")

U2Net = hub.Module(name='U2Net')
device = "cuda" if torch.cuda.is_available() else "cpu"
face2paint = torch.hub.load("bryandlee/animegan2-pytorch:main", "face2paint", device=device, size=512)
model = torch.hub.load("bryandlee/animegan2-pytorch", "generator", device=device).eval()


def initilize(media,pixel_size,checkbox1):
    #Author:  Alican Akca
    if media.name.endswith('.gif'):
      return Media().split(media.name,pixel_size, 'gif')
    # elif media.name.endswith('.mp4'):
    #   return None #Media().split(media.name,pixel_size, "video")
    else:
      media = Image.open(media.name).convert("RGB")
      media = cv2.cvtColor(np.asarray(face2paint(model, media)), cv2.COLOR_BGR2RGB)
      if checkbox1:
        result = U2Net.Segmentation(images=[media],
                                    paths=None,
                                    batch_size=1,
                                    input_size=320,  
                                    output_dir='output',
                                    visualization=True)
        result = combine().combiner(images = pixL().toThePixL([result[0]['front'][:,:,::-1], result[0]['mask']], 
                                                        pixel_size),
                                background_image = media)
      else:
        result = pixL().toThePixL([media], pixel_size)
      result = Image.fromarray(result)
      result.save('cache.png')
      return [None, result, 'cache.png']

inputs = [gr.File(label="Media"),
          gr.Slider(4, 100, value=12, step = 2, label="Pixel Size"),
          gr.Checkbox(label="Object-Oriented Inference", value=False)]

outputs = [gr.Image(label="Pixed image"),
           gr.File(label="Download")]

title = "ํ”ฝ์„ธ๋ผ : ๊ทธ๋ฆผ์„ ํ”ฝ์…€์•„ํŠธ๋กœ ๋งŒ๋“œ์„ธ์š”"
description = """ํ˜„์žฌ๋Š” ์‚ฌ์ง„๋งŒ ๊ฐ€๋Šฅํ•ฉ๋‹ˆ๋‹ค."""

gr.Interface(fn = initilize,
                    inputs = inputs,
                    outputs = outputs,
                    title=title,
                    description=description).launch()