File size: 3,278 Bytes
504c7e8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Copyright (C) 2025 NVIDIA Corporation. All rights reserved.\n",
"#\n",
"# This work is licensed under the LICENSE file\n",
"# located at the root directory.\n",
"\n",
"import torch\n",
"import random\n",
"from PIL import Image\n",
"\n",
"from visualization_utils import show_images\n",
"from addit_flux_pipeline import AdditFluxPipeline\n",
"from addit_flux_transformer import AdditFluxTransformer2DModel\n",
"from addit_scheduler import AdditFlowMatchEulerDiscreteScheduler\n",
"from addit_methods import add_object_real\n",
"\n",
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
"my_transformer = AdditFluxTransformer2DModel.from_pretrained(\"black-forest-labs/FLUX.1-dev\", subfolder=\"transformer\", torch_dtype=torch.bfloat16)\n",
"\n",
"pipe = AdditFluxPipeline.from_pretrained(\"black-forest-labs/FLUX.1-dev\", \n",
" transformer=my_transformer,\n",
" torch_dtype=torch.bfloat16).to(device)\n",
"pipe.scheduler = AdditFlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Reset the GPU memory tracking\n",
"torch.cuda.reset_max_memory_allocated(0)\n",
"\n",
"# source_image = Image.open(\"images/cat.jpg\").resize((1024, 1024))\n",
"# (prompt_src, prompt_tgt), subject_token = [\"A photo of a cat\", \"A photo of a cat wearing a scarf\"], \"scarf\"\n",
"\n",
"source_image = Image.open(\"images/bed_dark_room.jpg\").resize((1024, 1024))\n",
"(prompt_src, prompt_tgt), subject_token = [\"A photo of a bed in a dark room\", \"A photo of a dog lying on a bed in a dark room\"], \"dog\"\n",
"\n",
"random.seed(0)\n",
"seed_src = random.randint(0, 10000)\n",
"seeds_obj = [0,1,2]\n",
"\n",
"for seed_obj in seeds_obj:\n",
" images_list = add_object_real(pipe, source_image=source_image, prompt_source=prompt_src, prompt_object=prompt_tgt, \n",
" subject_token=subject_token, seed_src=seed_src, seed_obj=seed_obj, \n",
" extended_scale =1.1, structure_transfer_step=4, blend_steps = [18], #localization_model=\"attention\",\n",
" use_offset=False, show_attention=True, use_inversion=True, display_output=True)\n",
"\n",
"# Report maximum GPU memory usage in GB\n",
"max_memory_used = torch.cuda.max_memory_allocated(0) / (1024**3) # Convert to GB\n",
"print(f\"Maximum GPU memory used: {max_memory_used:.2f} GB\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "addit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|