Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,87 +1,58 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
)
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
logging.
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
caption_ko = ""
|
60 |
-
logging.info(f"[DEBUG] caption_ko: {caption_ko}")
|
61 |
-
print(f"[DEBUG] caption_ko: {caption_ko}")
|
62 |
-
|
63 |
-
if not caption_ko:
|
64 |
-
return "μ΄λ―Έμ§μ λν μ€λͺ
μ μμ±ν μ μμ΅λλ€.", None
|
65 |
-
|
66 |
-
# β’ TTS ν©μ±
|
67 |
-
try:
|
68 |
-
sr, wav = synthesize_tts(caption_ko)
|
69 |
-
return caption_ko, (sr, wav)
|
70 |
-
except Exception as e:
|
71 |
-
logging.error(f"[ERROR] TTS μλ¬: {e}")
|
72 |
-
return caption_ko, None
|
73 |
-
|
74 |
-
# βββββββββββββββ 5. Gradio μΈν°νμ΄μ€ βββββββββββββββ
|
75 |
-
demo = gr.Interface(
|
76 |
-
fn=describe_and_speak,
|
77 |
-
inputs=gr.Image(type="pil", label="μ
λ ₯ μ΄λ―Έμ§"),
|
78 |
-
outputs=[
|
79 |
-
gr.Textbox(label="νκΈ μΊ‘μ
"),
|
80 |
-
gr.Audio(label="μμ± μ¬μ", type="numpy")
|
81 |
-
],
|
82 |
-
title="μ΄λ―Έμ§ β νκΈ μΊ‘μ
& μμ± λ³ν",
|
83 |
-
description="BLIPμΌλ‘ μμ΄ μΊ‘μ
μμ± β NLLBλ‘ νκ΅μ΄ λ²μ β VITSλ‘ μμ± μμ±"
|
84 |
-
)
|
85 |
-
|
86 |
-
if __name__ == "__main__":
|
87 |
-
demo.launch(debug=True)
|
|
|
1 |
+
import gradio as gr import logging from PIL import Image from transformers import ( BlipProcessor, BlipForConditionalGeneration, pipeline, AutoTokenizer, VitsModel ) import torch
|
2 |
+
|
3 |
+
βββββββββββββββ λ‘κΉ
μ€μ βββββββββββββββ
|
4 |
+
|
5 |
+
logging.basicConfig(level=logging.INFO)
|
6 |
+
|
7 |
+
βββββββββββββββ 1. BLIP μ΄λ―Έμ§ μΊ‘μ
λ (μμ΄ μμ±) βββββββββββββββ
|
8 |
+
|
9 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large") blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
|
10 |
+
|
11 |
+
βββββββββββββββ 2. μμ΄βνκ΅μ΄ λ²μ: NLLB νμ΄νλΌμΈ βββββββββββββββ
|
12 |
+
|
13 |
+
translation_pipeline = pipeline( "translation", model="facebook/nllb-200-distilled-600M", src_lang="eng_Latn", tgt_lang="kor_Hang", max_length=200 )
|
14 |
+
|
15 |
+
βββββββββββββββ 3. νκ΅μ΄ TTS: VITS μ§μ λ‘λ© λ°©μ βββββββββββββββ
|
16 |
+
|
17 |
+
tts_model = VitsModel.from_pretrained("facebook/mms-tts-kor") tts_tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-kor") tts_model.to("cuda" if torch.cuda.is_available() else "cpu")
|
18 |
+
|
19 |
+
def synthesize_tts(text: str): inputs = tts_tokenizer(text, return_tensors="pt") input_ids = inputs["input_ids"].to(tts_model.device) # β fix: use LongTensor only with torch.no_grad(): output = tts_model(input_ids=input_ids) waveform = output.waveform.squeeze().cpu().numpy() return (tts_model.config.sampling_rate, waveform)
|
20 |
+
|
21 |
+
βββββββββββββββ 4. μ΄λ―Έμ§ β μΊ‘μ
+ λ²μ + μμ± μΆλ ₯ βββββββββββββββ
|
22 |
+
|
23 |
+
def describe_and_speak(img: Image.Image): logging.info("[DEBUG] describe_and_speak ν¨μ νΈμΆλ¨")
|
24 |
+
|
25 |
+
# β μμ΄ μΊ‘μ
μμ±
|
26 |
+
pixel_values = processor(images=img, return_tensors="pt").pixel_values
|
27 |
+
generated_ids = blip_model.generate(pixel_values, max_length=64)
|
28 |
+
caption_en = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
29 |
+
logging.info(f"[DEBUG] caption_en: {caption_en}")
|
30 |
+
print(f"[DEBUG] caption_en: {caption_en}")
|
31 |
+
|
32 |
+
# β‘ λ²μ
|
33 |
+
try:
|
34 |
+
result = translation_pipeline(caption_en)
|
35 |
+
caption_ko = result[0]['translation_text'].strip()
|
36 |
+
except Exception as e:
|
37 |
+
logging.error(f"[ERROR] λ²μ μ€λ₯: {e}")
|
38 |
+
caption_ko = ""
|
39 |
+
logging.info(f"[DEBUG] caption_ko: {caption_ko}")
|
40 |
+
print(f"[DEBUG] caption_ko: {caption_ko}")
|
41 |
+
|
42 |
+
if not caption_ko:
|
43 |
+
return "μ΄λ―Έμ§μ λν μ€λͺ
μ μμ±ν μ μμ΅λλ€.", None
|
44 |
+
|
45 |
+
# β’ TTS ν©μ±
|
46 |
+
try:
|
47 |
+
sr, wav = synthesize_tts(caption_ko)
|
48 |
+
return caption_ko, (sr, wav)
|
49 |
+
except Exception as e:
|
50 |
+
logging.error(f"[ERROR] TTS μλ¬: {e}")
|
51 |
+
return caption_ko, None
|
52 |
+
|
53 |
+
βββββββββββββββ 5. Gradio μΈν°νμ΄μ€ βββββββββββββββ
|
54 |
+
|
55 |
+
demo = gr.Interface( fn=describe_and_speak, inputs=gr.Image(type="pil", sources=["upload", "camera"], label="μ
λ ₯ μ΄λ―Έμ§"), outputs=[ gr.Textbox(label="νκΈ μΊ‘μ
"), gr.Audio(label="μμ± μ¬μ", type="numpy") ], title="μ΄λ―Έμ§ β νκΈ μΊ‘μ
& μμ± λ³ν", description="BLIPμΌλ‘ μμ΄ μΊ‘μ
μμ± β NLLBλ‘ νκ΅μ΄ λ²μ β VITSλ‘ μμ± μμ±" )
|
56 |
+
|
57 |
+
if name == "main": demo.launch(debug=True)
|
58 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|