imagetoaudio / app.py
yongyeol's picture
Update app.py
d7b41a8 verified
raw
history blame
6.89 kB
import os, sys, types, subprocess, tempfile
import torch, gradio as gr
from transformers import (
VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
)
from PIL import Image
# ─────────────────────────────────────────────────────────────
# 0. ν™˜κ²½ λ³€μˆ˜
# ─────────────────────────────────────────────────────────────
os.environ["HF_FORCE_SAFE_SERIALIZATION"] = "1"
os.environ["XFORMERS_FORCE_DISABLE"] = "1" # audiocraft λ‚΄λΆ€ ν”Œλž˜κ·Έ
# ─────────────────────────────────────────────────────────────
# 1. xformers 더미 λͺ¨λ“ˆ μ£Όμž… (GPU 쒅속 제거)
# ─────────────────────────────────────────────────────────────
dummy = types.ModuleType("xformers")
dummy.__version__ = "0.0.0"
ops = types.ModuleType("xformers.ops")
def _fake_mea(q, k, v, *_, dropout_p: float = 0.0, **__):
return torch.nn.functional.scaled_dot_product_attention(
q, k, v, dropout_p=dropout_p, is_causal=False
)
class _FakeLowerTriangularMask: # audiocraftκ°€ 쑴재 μ—¬λΆ€λ§Œ 확인
pass
ops.memory_efficient_attention = _fake_mea
ops.LowerTriangularMask = _FakeLowerTriangularMask
dummy.ops = ops
sys.modules["xformers"] = dummy
sys.modules["xformers.ops"] = ops
# ─────────────────────────────────────────────────────────────
# 2. (선택) μ„€μΉ˜ν•˜μ§€ μ•Šμ€ λͺ¨λ“ˆλ§Œ μ•ˆμ „λ§μœΌλ‘œ μŠ€ν… 처리 β˜…
# - 이미 requirements.txtμ—μ„œ μ„€μΉ˜ν•œ λͺ¨λ“ˆ(librosa, av λ“±)은
# μŠ€ν… λŒ€μƒμ—μ„œ μ œκ±°ν•©λ‹ˆλ‹€.
# ─────────────────────────────────────────────────────────────
for name in ("pesq", "pystoi", "soxr"): # β˜… ν•„μš”μ‹œλ§Œ 남김
if name not in sys.modules:
sys.modules[name] = types.ModuleType(name)
# ─────────────────────────────────────────────────────────────
# 3. audiocraft (MusicGen) 뢈러였기
# ─────────────────────────────────────────────────────────────
try:
from audiocraft.models import MusicGen
from audiocraft.data.audio import audio_write
except ModuleNotFoundError:
subprocess.check_call([
sys.executable, "-m", "pip", "install",
"git+https://github.com/facebookresearch/audiocraft@main",
"--no-deps", "--use-pep517"
])
subprocess.check_call([sys.executable, "-m", "pip", "install",
"encodec", "librosa", "av", "torchdiffeq",
"torchmetrics", "num2words"])
from audiocraft.models import MusicGen
from audiocraft.data.audio import audio_write
# ─────────────────────────────────────────────────────────────
# 4. 이미지 캑셔닝 λͺ¨λΈ
# ─────────────────────────────────────────────────────────────
# 4. 이미지 캑셔닝 λͺ¨λΈ ------------------------------------
caption_model = VisionEncoderDecoderModel.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning",
use_safetensors=True, # κ·ΈλŒ€λ‘œ
low_cpu_mem_usage=False, # ← meta λ‘œλ”© λΉ„ν™œμ„±ν™”
device_map=None # ← Accelerate μžλ™ λΆ„ν•  끄기
).eval() # 평가 λͺ¨λ“œ
feature_extractor = ViTImageProcessor.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
tokenizer = AutoTokenizer.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
# ─────────────────────────────────────────────────────────────
# 5. MusicGen λͺ¨λΈ
# ─────────────────────────────────────────────────────────────
musicgen = MusicGen.get_pretrained("facebook/musicgen-small")
musicgen.set_generation_params(duration=10)
# ─────────────────────────────────────────────────────────────
# 6. νŒŒμ΄ν”„λΌμΈ ν•¨μˆ˜
# ─────────────────────────────────────────────────────────────
def generate_caption(image: Image.Image) -> str:
with torch.no_grad():
pixel_values = feature_extractor(images=image, return_tensors="pt").pixel_values
output_ids = caption_model.generate(pixel_values, max_length=50)
return tokenizer.decode(output_ids[0], skip_special_tokens=True)
def generate_music(prompt: str) -> str:
wav = musicgen.generate([prompt]) # batch size = 1
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, "musicgen.wav")
audio_write(path, wav[0], musicgen.sample_rate, strategy="loudness")
return path
def process(image: Image.Image):
caption = generate_caption(image)
path = generate_music(f"A cheerful melody inspired by: {caption}")
return caption, path
# ─────────────────────────────────────────────────────────────
# 7. Gradio UI
# ─────────────────────────────────────────────────────────────
demo = gr.Interface(
fn=process,
inputs=gr.Image(type="pil"),
outputs=[
gr.Text(label="AIκ°€ μƒμ„±ν•œ κ·Έλ¦Ό μ„€λͺ…"),
gr.Audio(label="μƒμ„±λœ AI μŒμ•… (MusicGen)")
],
title="🎨 AI κ·Έλ¦Ό-μŒμ•… 생성기",
description="그림을 μ—…λ‘œλ“œν•˜λ©΄ AIκ°€ μ„€λͺ…을 λ§Œλ“€κ³ , μ„€λͺ…을 λ°”νƒ•μœΌλ‘œ 10초 길이의 μŒμ•…μ„ 생성해 λ“€λ €μ€λ‹ˆλ‹€."
)
if __name__ == "__main__":
demo.launch()