File size: 3,881 Bytes
a20719a
006270c
21a2ce4
6123f94
 
 
07a6275
92ed922
 
 
 
96c8c19
 
07a6275
 
 
 
 
 
27f73fa
07a6275
1b3ea2c
07a6275
1b3ea2c
 
 
07a6275
 
6a2fa62
07a6275
6a2fa62
642303e
07a6275
 
 
642303e
 
 
 
07a6275
642303e
 
 
07a6275
 
642303e
07a6275
642303e
07a6275
 
 
642303e
 
 
 
 
 
 
07a6275
642303e
 
 
 
07a6275
 
812e077
 
 
07a6275
642303e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
07a6275
642303e
 
 
 
 
 
c3cd455
642303e
96c8c19
18e65bf
1306203
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import streamlit as st
import base64
import os.path
import cv2
import random
       

os.system("mkdir _input")
os.system("mkdir _output")
os.system("mkdir _outputf")
os.system("ls")


title = "Melhoria de imagens"
st.title(title)
os.system("ls")
description = "Sistema para automação。"
st.header(description)
article = "<p style='text-align: center'><a href='https://huggingface.co/spaces/akhaliq/GFPGAN/' target='_blank'>clone from akhaliq@huggingface with little change</a> | <a href='https://github.com/TencentARC/GFPGAN' target='_blank'>GFPGAN Github Repo</a></p><center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_GFPGAN' alt='visitor badge'></center>"
#-s 720x1280 

load = st.checkbox("load")

if load:
          percent_complete=0
          my_bar = st.progress(0)   
          percent_complete= percent_complete+ 10
          my_bar.progress(percent_complete )    
       #exec=True 
          st.write("ffmpeg separando imagens")
       #if not os.path.isfile("./_input/imagem-0001.png"):
          os.system("ffmpeg -i vivi.mp4   -compression_level 10 -pred mixed -pix_fmt rgb24 -sws_flags +accurate_rnd+full_chroma_int -s 1480x2560  -r 30 ./_input/imagem-%4d.png")
          percent_complete= percent_complete+ 30
          my_bar.progress(percent_complete )   
          st.write("testando imagem")
          input_img = cv2.imread("./_input/imagem-0002.png" , cv2.IMREAD_COLOR)
          input_img= cv2.cvtColor(input_img,cv2.COLOR_BGR2RGB) 
          
          st.image(input_img)
          
          os.system("ls ./_input") 
          if 'myVar' not in globals():
              myVar=""
          st.write("melhorando faces")
          with st.spinner('Wait for it...'):
             # os.system("pip install git+https://github.com/TencentARC/GFPGAN.git")
                    os.system("python3 inference_gfpgan.py -i _input -o _output -v 1.3 -s 2")
          
          percent_complete= percent_complete+ 30
          my_bar.progress(percent_complete )   
         
          os.system("ls ./_output")
          os.system("echo ----")
          os.system("ls ./_output/cmp")
          os.system("echo ----")
          os.system("ls ./_output/restored_imgs")
          os.system("echo ----")
#          s 1480x2560
          st.write("recompilando video")
          #ffmpeg -r 60 -f image2 -s 1920x1080 -i _output/restored_imgs/imagem-%4d.png   -pix_fmt yuv420p ./videoSaida/output.mp4
          os.system("ffmpeg -y -r 30 -f image2  -i _output/restored_imgs/imagem-%4d.png   -pix_fmt yuv420p ./videoSaida/output.mp4")
          os.system("ls ./videoSaida")
          #st.video("./videoSaida/output.mp4" )
          st.write("preparando para download do video")
          percent_complete= percent_complete+ 30
          my_bar.progress(percent_complete ) 
          with open("./videoSaida/output.mp4", "rb") as file:
              st.video(file)

              btn = st.download_button(
          
                      label="Download video",
          
                      data=file,
          
                      file_name="output.png",
          
                      mime="video/mp4"
          
                    )
          #st.download_button("download video", data, file_name='output.mp4', mime='video/mp4',)
          #stremio
          #input_img = cv2.imread("./_output/cmp/imagem-0001_0000.png" , cv2.IMREAD_COLOR)
          #input_img = cv2.imread("./_output/cmp/imagem-0001_0000.png" , cv2.IMREAD_COLOR)
      
          st.write("demonstrando imagem restaurada")
          input_img = cv2.imread("./_output/restored_imgs/imagem-0002.png" , cv2.IMREAD_COLOR)
          input_img= cv2.cvtColor(input_img,cv2.COLOR_BGR2RGB) 
          
          st.image(input_img)
          exec=False
       #return Image.fromarray(restored_faces[0][:,:,::-1])
       
   
#st.button('Imagem',on_click=inference)
exec=True