Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from huggingface_hub import hf_hub_download | |
| import subprocess | |
| import tempfile | |
| import shutil | |
| import os | |
| import spaces | |
| from transformers import T5ForConditionalGeneration, T5Tokenizer | |
| import os | |
| subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True) | |
| """ | |
| def install_apex(): | |
| # Apex reposunu klonla (eğer zaten klonlanmamışsa) | |
| if not os.path.exists('apex'): | |
| subprocess.run(['git', 'clone', 'https://github.com/NVIDIA/apex'], check=True) | |
| # Apex dizinine git | |
| os.chdir('apex') | |
| # Apex'i kur | |
| cmd = [ | |
| 'pip', 'install', '-v', '--disable-pip-version-check', '--no-cache-dir', | |
| '--no-build-isolation', '--config-settings', '--build-option=--cpp_ext', | |
| '--config-settings', '--build-option=--cuda_ext', '.' | |
| ] | |
| subprocess.run(cmd, check=True) | |
| # Orjinal dizinine geri dön | |
| os.chdir('..') | |
| try: | |
| import apex | |
| except ModuleNotFoundError: | |
| print("Apex not found, installing...") | |
| install_apex() | |
| # Kurulumdan sonra apex'i import etmeyi dene | |
| import apex | |
| """ | |
| def download_t5_model(model_id, save_directory): | |
| # Modelin tokenizer'ını ve modeli indir | |
| model = T5ForConditionalGeneration.from_pretrained(model_id) | |
| tokenizer = T5Tokenizer.from_pretrained(model_id) | |
| # Model ve tokenizer'ı belirtilen dizine kaydet | |
| if not os.path.exists(save_directory): | |
| os.makedirs(save_directory) | |
| model.save_pretrained(save_directory) | |
| tokenizer.save_pretrained(save_directory) | |
| # Model ID ve kaydedilecek dizin | |
| model_id = "DeepFloyd/t5-v1_1-xxl" | |
| save_directory = "pretrained_models/t5_ckpts/t5-v1_1-xxl" | |
| # Modeli indir | |
| download_t5_model(model_id, save_directory) | |
| def download_model(repo_id, model_name): | |
| model_path = hf_hub_download(repo_id=repo_id, filename=model_name) | |
| return model_path | |
| import glob | |
| def run_inference(model_name, prompt_text): | |
| repo_id = "hpcai-tech/Open-Sora" | |
| # Map model names to their respective configuration files | |
| config_mapping = { | |
| "OpenSora-v1-16x256x256.pth": "configs/opensora/inference/16x256x256.py", | |
| "OpenSora-v1-HQ-16x256x256.pth": "configs/opensora/inference/16x512x512.py", | |
| "OpenSora-v1-HQ-16x512x512.pth": "configs/opensora/inference/64x512x512.py" | |
| } | |
| config_path = config_mapping[model_name] | |
| ckpt_path = download_model(repo_id, model_name) | |
| # Save prompt_text to a temporary text file | |
| prompt_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt", mode='w') | |
| prompt_file.write(prompt_text) | |
| prompt_file.close() | |
| with open(config_path, 'r') as file: | |
| config_content = file.read() | |
| config_content = config_content.replace('prompt_path = "./assets/texts/t2v_samples.txt"', f'prompt_path = "{prompt_file.name}"') | |
| with tempfile.NamedTemporaryFile('w', delete=False, suffix='.py') as temp_file: | |
| temp_file.write(config_content) | |
| temp_config_path = temp_file.name | |
| cmd = [ | |
| "torchrun", "--standalone", "--nproc_per_node", "1", | |
| "scripts/inference.py", temp_config_path, | |
| "--ckpt-path", ckpt_path | |
| ] | |
| subprocess.run(cmd) | |
| save_dir = "./outputs/samples/" # Örneğin, inference.py tarafından kullanılan kayıt dizini | |
| list_of_files = glob.glob(f'{save_dir}/*') | |
| if list_of_files: | |
| latest_file = max(list_of_files, key=os.path.getctime) | |
| return latest_file | |
| else: | |
| print("No files found in the output directory.") | |
| return None | |
| # Clean up the temporary files | |
| os.remove(temp_file.name) | |
| os.remove(prompt_file.name) | |
| def main(): | |
| gr.Interface( | |
| fn=run_inference, | |
| inputs=[ | |
| gr.Dropdown(choices=[ | |
| "OpenSora-v1-16x256x256.pth", | |
| "OpenSora-v1-HQ-16x256x256.pth", | |
| "OpenSora-v1-HQ-16x512x512.pth" | |
| ], | |
| value="OpenSora-v1-16x256x256.pth", | |
| label="Model Selection"), | |
| gr.Textbox(label="Prompt Text", value="Enter prompt text here") | |
| ], | |
| outputs=gr.Video(label="Output Video"), | |
| title="Open-Sora Inference", | |
| description="Run Open-Sora Inference with Custom Parameters", | |
| ).launch() | |
| if __name__ == "__main__": | |
| main() | |