awacke1 commited on
Commit
5493191
·
1 Parent(s): b46e241

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +166 -0
app.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ from huggingface_hub import hf_hub_url, cached_download
4
+ import PIL
5
+ import onnx
6
+ import onnxruntime
7
+
8
+ config_file_url = hf_hub_url("Jacopo/ToonClip", filename="model.onnx")
9
+ model_file = cached_download(config_file_url)
10
+
11
+ onnx_model = onnx.load(model_file)
12
+ onnx.checker.check_model(onnx_model)
13
+
14
+ opts = onnxruntime.SessionOptions()
15
+ opts.intra_op_num_threads = 16
16
+ ort_session = onnxruntime.InferenceSession(model_file, sess_options=opts)
17
+
18
+ input_name = ort_session.get_inputs()[0].name
19
+ output_name = ort_session.get_outputs()[0].name
20
+
21
+ def normalize(x, mean=(0., 0., 0.), std=(1.0, 1.0, 1.0)):
22
+ # x = (x - mean) / std
23
+ x = np.asarray(x, dtype=np.float32)
24
+ if len(x.shape) == 4:
25
+ for dim in range(3):
26
+ x[:, dim, :, :] = (x[:, dim, :, :] - mean[dim]) / std[dim]
27
+ if len(x.shape) == 3:
28
+ for dim in range(3):
29
+ x[dim, :, :] = (x[dim, :, :] - mean[dim]) / std[dim]
30
+
31
+ return x
32
+
33
+ def denormalize(x, mean=(0., 0., 0.), std=(1.0, 1.0, 1.0)):
34
+ # x = (x * std) + mean
35
+ x = np.asarray(x, dtype=np.float32)
36
+ if len(x.shape) == 4:
37
+ for dim in range(3):
38
+ x[:, dim, :, :] = (x[:, dim, :, :] * std[dim]) + mean[dim]
39
+ if len(x.shape) == 3:
40
+ for dim in range(3):
41
+ x[dim, :, :] = (x[dim, :, :] * std[dim]) + mean[dim]
42
+
43
+ return x
44
+
45
+ def nogan(input_img):
46
+ i = np.asarray(input_img)
47
+ i = i.astype("float32")
48
+ i = np.transpose(i, (2, 0, 1))
49
+ i = np.expand_dims(i, 0)
50
+ i = i / 255.0
51
+ i = normalize(i, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
52
+
53
+ ort_outs = ort_session.run([output_name], {input_name: i})
54
+ output = ort_outs
55
+ output = output[0][0]
56
+
57
+ output = denormalize(output, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
58
+ output = output * 255.0
59
+ output = output.astype('uint8')
60
+ output = np.transpose(output, (1, 2, 0))
61
+ output_image = PIL.Image.fromarray(output, 'RGB')
62
+
63
+ return output_image
64
+
65
+ title = "ToonClip Comics Hero Demo"
66
+ description = """
67
+ Gradio demo for ToonClip, a UNet++ network with MobileNet v3 backbone optimized for mobile frameworks and trained with VGG Perceptual Feature Loss using PyTorch Lighting.
68
+ To use it, simply upload an image with a face or choose an example from the list below.
69
+ """
70
+ article = """
71
+ <style>
72
+ .boxes{
73
+ width:50%;
74
+ float:left;
75
+ }
76
+ #mainDiv{
77
+ width:50%;
78
+ margin:auto;
79
+ }
80
+ img{
81
+ max-width:100%;
82
+ }
83
+ </style>
84
+ <p style='text-align: center'>The \"ToonClip\" model was trained by <a href='https://twitter.com/JacopoMangia' target='_blank'>Jacopo Mangiavacchi</a> and available at <a href='https://github.com/jacopomangiavacchi/ComicsHeroMobileUNet' target='_blank'>Github Repo ComicsHeroMobileUNet</a></p>
85
+ <p style='text-align: center'>The \"Comics Hero dataset\" used to train this model was produced by <a href='https://linktr.ee/Norod78' target='_blank'>Doron Adler</a> and available at <a href='https://github.com/Norod/U-2-Net-StyleTransfer' target='_blank'>Github Repo Comics hero U2Net</a></p>
86
+ <p style='text-align: center'>The \"ToonClip\" iOS mobile app using a CoreML version of this model is available on Apple App Store at <a href='https://apps.apple.com/us/app/toonclip/id1536285338' target='_blank'>ToonClip</a></p>
87
+ <p style='text-align: center'>Blog post on <a href='https://medium.com/@JMangia/optimize-a-face-to-cartoon-style-transfer-model-trained-quickly-on-small-style-dataset-and-50594126e792' target='_blank'>Medium</a></p>
88
+ <br>
89
+ <p style='text-align: center'>Example images from untrained FFHQ validation set: </p>
90
+ <p>
91
+ <div id='mainDiv'>
92
+ <div id='divOne' class='boxes'>
93
+ <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i01.jpeg' alt='Example01'/>
94
+ </div>
95
+ <div id='divTwo' class='boxes'>
96
+ <img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o01.png' alt='Output01'/>
97
+ </div>
98
+ <div id='divOne' class='boxes'>
99
+ <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i02.jpeg' alt='Example01'/>
100
+ </div>
101
+ <div id='divTwo' class='boxes'>
102
+ <img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o02.png' alt='Output01'/>
103
+ </div>
104
+ <div id='divOne' class='boxes'>
105
+ <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i03.jpeg' alt='Example01'/>
106
+ </div>
107
+ <div id='divTwo' class='boxes'>
108
+ <img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o03.png' alt='Output01'/>
109
+ </div>
110
+ <div id='divOne' class='boxes'>
111
+ <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i04.jpeg' alt='Example01'/>
112
+ </div>
113
+ <div id='divTwo' class='boxes'>
114
+ <img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o04.png' alt='Output01'/>
115
+ </div>
116
+ <div id='divOne' class='boxes'>
117
+ <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i05.jpeg' alt='Example01'/>
118
+ </div>
119
+ <div id='divTwo' class='boxes'>
120
+ <img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o05.png' alt='Output01'/>
121
+ </div>
122
+ <div id='divOne' class='boxes'>
123
+ <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i06.jpeg' alt='Example01'/>
124
+ </div>
125
+ <div id='divTwo' class='boxes'>
126
+ <img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o06.png' alt='Output01'/>
127
+ </div>
128
+ <div id='divOne' class='boxes'>
129
+ <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i07.jpeg' alt='Example01'/>
130
+ </div>
131
+ <div id='divTwo' class='boxes'>
132
+ <img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o07.png' alt='Output01'/>
133
+ </div>
134
+ <div id='divOne' class='boxes'>
135
+ <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i08.jpeg' alt='Example01'/>
136
+ </div>
137
+ <div id='divTwo' class='boxes'>
138
+ <img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o08.png' alt='Output01'/>
139
+ </div>
140
+ <div id='divOne' class='boxes'>
141
+ <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i09.jpeg' alt='Example01'/>
142
+ </div>
143
+ <div id='divTwo' class='boxes'>
144
+ <img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o09.png' alt='Output01'/>
145
+ </div>
146
+ <div id='divOne' class='boxes'>
147
+ <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i10.jpeg' alt='Example01'/>
148
+ </div>
149
+ <div id='divTwo' class='boxes'>
150
+ <img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o10.png' alt='Output01'/>
151
+ </div>
152
+ </div>
153
+ </p>
154
+ """
155
+ examples=[['i01.jpeg'], ['i02.jpeg'], ['i03.jpeg'], ['i04.jpeg'], ['i05.jpeg'], ['i06.jpeg'], ['i07.jpeg'], ['i08.jpeg'], ['i09.jpeg'], ['i10.jpeg']]
156
+
157
+ iface = gr.Interface(
158
+ nogan,
159
+ gr.inputs.Image(type="pil", shape=(1024, 1024)),
160
+ gr.outputs.Image(type="pil"),
161
+ title=title,
162
+ description=description,
163
+ article=article,
164
+ examples=examples)
165
+
166
+ iface.launch()