Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +87 -0
- requirements.txt +0 -0
app.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import logging
|
3 |
+
from PIL import Image
|
4 |
+
from transformers import (
|
5 |
+
BlipProcessor,
|
6 |
+
BlipForConditionalGeneration,
|
7 |
+
pipeline,
|
8 |
+
AutoTokenizer,
|
9 |
+
VitsModel
|
10 |
+
)
|
11 |
+
import torch
|
12 |
+
|
13 |
+
# βββββββββββββββ λ‘κΉ
μ€μ βββββββββββββββ
|
14 |
+
logging.basicConfig(level=logging.INFO)
|
15 |
+
|
16 |
+
# βββββββββββββββ 1. BLIP μ΄λ―Έμ§ μΊ‘μ
λ (μμ΄ μμ±) βββββββββββββββ
|
17 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
|
18 |
+
blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
|
19 |
+
|
20 |
+
# βββββββββββββββ 2. μμ΄βνκ΅μ΄ λ²μ: NLLB νμ΄νλΌμΈ βββββββββββββββ
|
21 |
+
translation_pipeline = pipeline(
|
22 |
+
"translation",
|
23 |
+
model="facebook/nllb-200-distilled-600M",
|
24 |
+
src_lang="eng_Latn",
|
25 |
+
tgt_lang="kor_Hang",
|
26 |
+
max_length=200
|
27 |
+
)
|
28 |
+
|
29 |
+
# βββββββββββββββ 3. νκ΅μ΄ TTS: VITS μ§μ λ‘λ© λ°©μ βββββββββββββββ
|
30 |
+
tts_model = VitsModel.from_pretrained("facebook/mms-tts-kor")
|
31 |
+
tts_tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-kor")
|
32 |
+
|
33 |
+
tts_model.to("cuda" if torch.cuda.is_available() else "cpu")
|
34 |
+
|
35 |
+
def synthesize_tts(text: str):
|
36 |
+
inputs = tts_tokenizer(text, return_tensors="pt").to(tts_model.device)
|
37 |
+
with torch.no_grad():
|
38 |
+
output = tts_model(**inputs)
|
39 |
+
waveform = output.waveform.squeeze().cpu().numpy()
|
40 |
+
return (tts_model.config.sampling_rate, waveform)
|
41 |
+
|
42 |
+
# βββββββββββββββ 4. μ΄λ―Έμ§ β μΊ‘μ
+ λ²μ + μμ± μΆλ ₯ βββββββββββββββ
|
43 |
+
def describe_and_speak(img: Image.Image):
|
44 |
+
logging.info("[DEBUG] describe_and_speak ν¨μ νΈμΆλ¨")
|
45 |
+
|
46 |
+
# β μμ΄ μΊ‘μ
μμ±
|
47 |
+
pixel_values = processor(images=img, return_tensors="pt").pixel_values
|
48 |
+
generated_ids = blip_model.generate(pixel_values, max_length=64)
|
49 |
+
caption_en = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
50 |
+
logging.info(f"[DEBUG] caption_en: {caption_en}")
|
51 |
+
print(f"[DEBUG] caption_en: {caption_en}")
|
52 |
+
|
53 |
+
# β‘ λ²μ
|
54 |
+
try:
|
55 |
+
result = translation_pipeline(caption_en)
|
56 |
+
caption_ko = result[0]['translation_text'].strip()
|
57 |
+
except Exception as e:
|
58 |
+
logging.error(f"[ERROR] λ²μ μ€λ₯: {e}")
|
59 |
+
caption_ko = ""
|
60 |
+
logging.info(f"[DEBUG] caption_ko: {caption_ko}")
|
61 |
+
print(f"[DEBUG] caption_ko: {caption_ko}")
|
62 |
+
|
63 |
+
if not caption_ko:
|
64 |
+
return "μ΄λ―Έμ§μ λν μ€λͺ
μ μμ±ν μ μμ΅λλ€.", None
|
65 |
+
|
66 |
+
# β’ TTS ν©μ±
|
67 |
+
try:
|
68 |
+
sr, wav = synthesize_tts(caption_ko)
|
69 |
+
return caption_ko, (sr, wav)
|
70 |
+
except Exception as e:
|
71 |
+
logging.error(f"[ERROR] TTS μλ¬: {e}")
|
72 |
+
return caption_ko, None
|
73 |
+
|
74 |
+
# βββββββββββββββ 5. Gradio μΈν°νμ΄μ€ βββββββββββββββ
|
75 |
+
demo = gr.Interface(
|
76 |
+
fn=describe_and_speak,
|
77 |
+
inputs=gr.Image(type="pil", label="μ
λ ₯ μ΄λ―Έμ§"),
|
78 |
+
outputs=[
|
79 |
+
gr.Textbox(label="νκΈ μΊ‘μ
"),
|
80 |
+
gr.Audio(label="μμ± μ¬μ", type="numpy")
|
81 |
+
],
|
82 |
+
title="μ΄λ―Έμ§ β νκΈ μΊ‘μ
& μμ± λ³ν",
|
83 |
+
description="BLIPμΌλ‘ μμ΄ μΊ‘μ
μμ± β NLLBλ‘ νκ΅μ΄ λ²μ β VITSλ‘ μμ± μμ±"
|
84 |
+
)
|
85 |
+
|
86 |
+
if __name__ == "__main__":
|
87 |
+
demo.launch(debug=True)
|
requirements.txt
ADDED
Binary file (126 Bytes). View file
|
|