codeShare commited on
Commit
b7fa894
·
verified ·
1 Parent(s): 548669c

Upload Joycaption_Alpha_One.ipynb

Browse files
Files changed (1) hide show
  1. Joycaption_Alpha_One.ipynb +1 -1
Joycaption_Alpha_One.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"Dwr7gk5OwuGC"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"1X-s_s971qB7"},"outputs":[],"source":["!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","# @markdown Use a custom prompt to instruct the image captioning model\n","custom_prompt = '' # @param {type:'string'}\n","enable_custom_prompt = False # @param {type:'boolean'}\n","if not enable_custom_prompt: custom_prompt = 'Describe the image in 400 words'\n","!pip install peft bitsandbytes\n","!pip install hf_xet\n","from huggingface_hub import InferenceClient\n","from torch import nn\n","from transformers import AutoModel, BitsAndBytesConfig, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n","import torch\n","import torch.amp.autocast_mode\n","from PIL import Image\n","import os\n","import torchvision.transforms.functional as TVF\n","\n","CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n","MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B-bnb-4bit\"\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [f\"{custom_prompt}\"],\n"," (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n"," (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n"," (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n"," (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n"," (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n"," (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n"," (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n"," (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n"," (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n"," (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n"," (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n","}\n","\n","class ImageAdapter(nn.Module):\n","\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n","\t\tsuper().__init__()\n","\t\tself.deep_extract = deep_extract\n","\t\tif self.deep_extract:\n","\t\t\tinput_features = input_features * 5\n","\t\tself.linear1 = nn.Linear(input_features, output_features)\n","\t\tself.activation = nn.GELU()\n","\t\tself.linear2 = nn.Linear(output_features, output_features)\n","\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n","\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n","\t\tself.other_tokens = nn.Embedding(3, output_features)\n","\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n","\tdef forward(self, vision_outputs: torch.Tensor):\n","\t\tif self.deep_extract:\n","\t\t\tx = torch.concat((\n","\t\t\t\tvision_outputs[-2],\n","\t\t\t\tvision_outputs[3],\n","\t\t\t\tvision_outputs[7],\n","\t\t\t\tvision_outputs[13],\n","\t\t\t\tvision_outputs[20],\n","\t\t\t), dim=-1)\n","\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n","\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n","\t\telse:\n","\t\t\tx = vision_outputs[-2]\n","\t\tx = self.ln1(x)\n","\t\tif self.pos_emb is not None:\n","\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n","\t\t\tx = x + self.pos_emb\n","\t\tx = self.linear1(x)\n","\t\tx = self.activation(x)\n","\t\tx = self.linear2(x)\n","\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n","\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n","\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n","\t\treturn x\n","\tdef get_eot_embedding(self):\n","\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n","\n","clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n","clip_model = AutoModel.from_pretrained(CLIP_PATH)\n","clip_model = clip_model.vision_model\n","checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n","checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n","clip_model.load_state_dict(checkpoint)\n","# del checkpoint\n","clip_model.eval()\n","clip_model.requires_grad_(False)\n","clip_model.to(\"cuda\")\n","tokenizer = AutoTokenizer.from_pretrained(f'{MODEL_PATH}')\n","#tokenizer = AutoTokenizer.from_pretrained(\"unsloth/Meta-Llama-3.1-8B-bnb-4bit\")\n","assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n","text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, quantization_config = BitsAndBytesConfig(load_in_8bit=True), device_map=\"auto\", torch_dtype=torch.bfloat16)\n","text_model.load_adapter(\"/content/joy/text_model\")\n","text_model.eval()\n","image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n","image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n","image_adapter.eval()\n","image_adapter.to(\"cuda\")"]},{"cell_type":"code","source":["\n","\n","\n","# @markdown Use a custom prompt to instruct the image captioning model\n","custom_prompt = '' # @param {type:'string'}\n","enable_custom_prompt = True # @param {type:'boolean'}\n","if not enable_custom_prompt: custom_prompt = 'Describe the image in 400 words'\n","\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [f\"{custom_prompt}\"],\n"," (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n"," (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n"," (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n"," (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n"," (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n"," (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n"," (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n"," (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n"," (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n"," (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n"," (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n","}"],"metadata":{"id":"_qrUZ7jRIxIf"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":2,"metadata":{"id":"PjO3Wc4kzR08","executionInfo":{"status":"ok","timestamp":1753382129877,"user_tz":-120,"elapsed":45,"user":{"displayName":"","userId":""}}},"outputs":[],"source":["# @markdown higher temperature = prompt creativity (default 0.6) <br> higher top_p = higher noise reduction in latent embedding (default 0.9)\n","temperature = 1.75 # @param {type:'slider',min:0.5,max:4.0,step:0.05}\n","top_p = 0.75 # @param {type:'slider',min:0.1,max:0.95,step:0.05}\n","temperature = float(temperature)\n","top_p = float(top_p)\n","#-----#\n","num=1\n","#import torch\n","#import Image\n","\n","@torch.no_grad()\n","def stream_chat(input_image: Image.Image, prompt_str: str) -> str:\n"," torch.cuda.empty_cache()\n"," length =512\n"," #length = None if caption_length == \"any\" else caption_length\n"," #if isinstance(length, str):\n"," # try:\n"," # length = int(length)\n"," # except ValueError:\n"," # pass\n"," #if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n"," # caption_tone = \"formal\"\n"," #prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n"," #if prompt_key not in CAPTION_TYPE_MAP:\n"," # raise ValueError(f\"Invalid caption type: {prompt_key}\")\n"," #prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n"," print(f\"Prompt: {prompt_str}\")\n"," image = input_image.resize((384, 384), Image.LANCZOS)\n"," pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n"," pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n"," pixel_values = pixel_values.to('cuda')\n"," prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n"," with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n"," vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n"," image_features = vision_outputs.hidden_states\n"," embedded_images = image_adapter(image_features)\n"," embedded_images = embedded_images.to('cuda')\n"," prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n"," assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n"," embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n"," eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n"," inputs_embeds = torch.cat([\n"," embedded_bos.expand(embedded_images.shape[0], -1, -1),\n"," embedded_images.to(dtype=embedded_bos.dtype),\n"," prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n"," eot_embed.expand(embedded_images.shape[0], -1, -1),\n"," ], dim=1)\n"," input_ids = torch.cat([\n"," torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n"," torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n"," prompt,\n"," torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n"," ], dim=1).to('cuda')\n"," attention_mask = torch.ones_like(input_ids)\n"," generate_ids = text_model.generate(input_ids, top_p = top_p , temperature=temperature, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=3000, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n"," generate_ids = generate_ids[:, input_ids.shape[1]:]\n"," if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n"," generate_ids = generate_ids[:, :-1]\n"," caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n"," caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n"," return caption"]},{"cell_type":"code","source":["\n","%cd /content/\n","!unzip training_data.zip\n","\n","\n","\n","\n","\n","\n","\n"],"metadata":{"id":"c60a6jW-YwsN"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"mhccTDyzirVn"},"outputs":[],"source":["# @markdown Split the image into 20 parts prior to running\n","no_parts = 1 # @param {type:'slider', min:1,max:30,step:1}\n","print(f'Splitting all images found under /content/... \\n into {no_parts} along x-axis')\n","import os,math,random\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","split_folder = f'/content/split/'\n","my_mkdirs(f'{split_folder}')\n","\n","\n","src_folder = '/content/'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","#num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," #while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," textpath = filename.replace(suffix,'.txt')\n"," #os.remove(f'{filename}')\n"," #continue\n"," image = Image.open(f\"{filename}\").convert('RGB')\n"," w,h=image.size\n"," #grid = product(range(0, h-h%d, d), range(0, w-w%d, d))\n"," divs=no_parts\n"," step=math.floor(w/divs)\n"," %cd {split_folder}\n"," for index in range(divs):\n"," box = (step*index, 0 ,step*(index+1),math.floor(1.0*h))\n"," image.crop(box).save(f'{num}_{index}.jpeg','JPEG')\n"," %cd /content/\n"," if os.path.exists(textpath):\n"," with open(f'{textpath}', 'r') as file:\n"," _tags = file.read()\n","\n"," print(_tags)\n"," if not _tags:continue\n"," tags=''\n"," _tags = [item.strip() for item in f'{_tags}'.split(',')]\n"," random.shuffle(_tags)\n"," for tag in _tags:\n"," tags = tags + tag + ','\n"," #----#\n"," tags = (tags + 'AAAA').replace(',AAAA','')\n"," print(tags)\n"," prompt_str = f'Please describe the image in 400 words using danbooru tags : {tags}'\n"," %cd {split_folder}\n"," f = open(f'{num}_{index}.txt','w')\n"," f.write(f'{prompt_str}')\n"," f.close()\n"," #---#\n"," #-----#\n"," #----#\n"," num = num+1\n"," #caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n"," #print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," #print(caption)\n"," #---------#\n"," #f = open(f\"{num}.txt\", \"w\")\n"," #f.write(f'{caption}')\n"," #f.close()\n"," #input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")\n"," os.remove(f'{src_folder}{textpath}')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"J811UZU6xZEo","colab":{"base_uri":"https://localhost:8080/"},"outputId":"38b6b8c4-d322-4866-873e-a28573891d02"},"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n","20_0.jpeg\n","/content/split\n","Prompt: Please describe the image in 400 words using danbooru tags : blonde hair,english text,subtitled,headband,standing,male focus,shirt,3boys,retro artstyle,rifle,multiple boys,pants,indoors,1980s (style),red hair,brown hair,gun,short hair,1990s (style),smile,black hair,weapon,red jacket,holding gun,scene reference,holding,holding weapon,jacket,parody,sunglasses\n","...\n","\n","...caption for 20_0.jpeg\n","\n","...\n","movie still,short_cropped_hair,mouth_open,hung, and a logo of a black bar with rainbow glitch art outline that says 'FLUX Chroma V46' psychadelic art\n","/content/tmp\n","22_0.jpeg\n","/content/split\n","Prompt: Please describe the image in 400 words using danbooru tags : fake screenshot,tiles,solo,holding,holding weapon,1boy,english text,male focus,parody,weapon,handgun,holding gun,gun,subtitled\n","...\n","\n","...caption for 22_0.jpeg\n","\n","...\n","An image from a computer animated game, presumably based on an image similar to those commonly seen in 1970s films, specifically The Italian Job. The viewpoint appears close and to the right side, possibly due to the character standing behind the camera. The characters in question appear similar to those in The Italian Job. To left there is a character’s outstretched right hand, with tan fingers holding a handgun; the background has shadows cast by a street, specifically a paved stone wall with rectangular stones set in gray. Text overlay with yellow font on dark, reading “dude, they got nice tilts in here too., and a logo of a black bar with rainbow glitch art outline that says 'FLUX Chroma V46' psychadelic art\n","/content/tmp\n","15_0.jpeg\n","/content/split\n","Prompt: Please describe the image in 400 words using danbooru tags : parody,fake screenshot,holding,scene reference,holding gun,handgun,retro artstyle,1boy,brown jacket,male focus,pants,subtitled,english text,holding weapon,head out of frame,solo,jacket,gun,weapon\n","...\n","\n","...caption for 15_0.jpeg\n","\n","...\n","Two young men with East Asian features hold handguns with shotguns, wearing light grey or white pajamas and casual brown jackets under dark, menacing eyes as a menacing, foreboding caption \"We live in a big Middle Eastern Bazaar\" is seen beneath., and a logo of a black bar with rainbow glitch art outline that says 'FLUX Chroma V46' psychadelic art\n","/content/tmp\n","13_0.jpeg\n","/content/split\n","Prompt: Please describe the image in 400 words using danbooru tags : solo,brown hair,close-up,fake screenshot,retro artstyle,1boy,english text,closed eyes,male focus,subtitled,1990s (style),headband\n","...\n","\n","...caption for 13_0.jpeg\n","\n","...\n","Brown hair parted to one side\n","From behind a girl's mug with a face and big, goofy eyes shut tight, and one nostril sneaked on. The top right bit, an upclose noggin, a headband, and some hair. There're a few short lines below for expression. She's fair-skinned, and there're a couple dark blotches or shading.\n","\n","Bold and cheeky text in yellow above says, “cockroaches on sticks!”\n","\n","Her forehead shows some dark marks; her eyes are squished with blackness for pupils. A single droplet of sweat near the nostril's edge gives out heat or stress. Brownish-orange hair sticks out in front with a thin, straight cut or a hairband keeping it down.\n","\n","The image is drawn in the classic cartoon and anime style, featuring simple shading and a few strong colors, with bright whites to highlight light from within. Background's kept light, so she shines without distractions. The pic has a playful and amusing tone, with bold colors making you chuckle from its offhand caption., and a logo of a black bar with rainbow glitch art outline that says 'FLUX Chroma V46' psychadelic art\n","/content/tmp\n","7_0.jpeg\n","/content/split\n","Prompt: Please describe the image in 400 words using danbooru tags : retro artstyle,anime coloring,bed,on back,tank top,1girl,1980s (style),bangs,1990s (style),lying,covered nipples,on bed,solo,pillow,dark skin,blue eyes,smile,armpits,grin,arm up,breasts,medium breasts,short hair,dark-skinned female,black hair,upper body,brown hair,hair between eyes,english text,subtitled\n"]}],"source":["\n","import os,random\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","my_mkdirs(f'{tgt_folder}')\n","split_folder = '/content/split/'\n","src_folder = '/content/'\n","if os.path.exists(f'{split_folder}'): src_folder = f'{split_folder}'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," textpath = filename.replace(suffix,'.txt')\n"," prompt_str = 'Describe the image in 400 words and write the danbooru tags'\n"," if os.path.exists(f'{src_folder}{textpath}'):\n"," with open(f'{textpath}', 'r') as file:\n"," prompt_str = file.read()\n"," #prompt_str = f'Please improve this prompt : {tags}'\n"," input_image = Image.open(f\"{filename}\").convert('RGB')\n"," caption = stream_chat(input_image, f'{prompt_str}')\n"," #caption = caption + \", and a logo of a black bar with rainbow glitch art outline that says 'FLUX Chroma V46' psychadelic art\"\n"," print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," print(caption)\n"," #---------#\n"," %cd {tgt_folder}\n"," f = open(f\"{num}.txt\", \"w\")\n"," f.write(f'{caption}')\n"," f.close()\n"," input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")\n"," os.remove(f'{src_folder}{textpath}')\n"," num = num+1"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"5EztLCjkPq4U","colab":{"base_uri":"https://localhost:8080/","height":54},"executionInfo":{"status":"ok","timestamp":1753178892488,"user_tz":-120,"elapsed":109,"user":{"displayName":"","userId":""}},"outputId":"4c9d9def-26b4-4100-f180-ba0bd614ae02"},"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n"]},{"output_type":"execute_result","data":{"text/plain":["'/content/tmp.zip'"],"application/vnd.google.colaboratory.intrinsic+json":{"type":"string"}},"metadata":{},"execution_count":44}],"source":["import shutil\n","%cd /content/\n","shutil.make_archive('/content/tmp', 'zip', '/content/tmp')"]},{"cell_type":"code","source":["# @markdown Save images of all urls found in image_urls.txt to workspace\n","\n","!wget -i image_urls.txt -P ./splits\n","\n"],"metadata":{"id":"v9UMCh3h_mNj"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"kM4TpfdB1amt"},"outputs":[],"source":["# @markdown Auto-disconnect from Google Colab upon running this cell\n","from google.colab import runtime\n","#runtime.unassign() #Disconnect from runtime"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753382381307},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753179095950},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753120703402},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752593897385},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752405756026},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1748859170548},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747227021653},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747225778912},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747224652750},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746209168116},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746181687155},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1742303655056},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740768524003},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740657473013},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739796923572},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739735627072}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
 
1
+ {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"Dwr7gk5OwuGC"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"1X-s_s971qB7"},"outputs":[],"source":["!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","# @markdown Use a custom prompt to instruct the image captioning model\n","custom_prompt = '' # @param {type:'string'}\n","enable_custom_prompt = False # @param {type:'boolean'}\n","if not enable_custom_prompt: custom_prompt = 'Describe the image in 400 words'\n","!pip install peft bitsandbytes\n","!pip install hf_xet\n","from huggingface_hub import InferenceClient\n","from torch import nn\n","from transformers import AutoModel, BitsAndBytesConfig, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n","import torch\n","import torch.amp.autocast_mode\n","from PIL import Image\n","import os\n","import torchvision.transforms.functional as TVF\n","\n","CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n","MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B-bnb-4bit\"\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [f\"{custom_prompt}\"],\n"," (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n"," (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n"," (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n"," (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n"," (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n"," (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n"," (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n"," (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n"," (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n"," (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n"," (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n","}\n","\n","class ImageAdapter(nn.Module):\n","\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n","\t\tsuper().__init__()\n","\t\tself.deep_extract = deep_extract\n","\t\tif self.deep_extract:\n","\t\t\tinput_features = input_features * 5\n","\t\tself.linear1 = nn.Linear(input_features, output_features)\n","\t\tself.activation = nn.GELU()\n","\t\tself.linear2 = nn.Linear(output_features, output_features)\n","\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n","\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n","\t\tself.other_tokens = nn.Embedding(3, output_features)\n","\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n","\tdef forward(self, vision_outputs: torch.Tensor):\n","\t\tif self.deep_extract:\n","\t\t\tx = torch.concat((\n","\t\t\t\tvision_outputs[-2],\n","\t\t\t\tvision_outputs[3],\n","\t\t\t\tvision_outputs[7],\n","\t\t\t\tvision_outputs[13],\n","\t\t\t\tvision_outputs[20],\n","\t\t\t), dim=-1)\n","\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n","\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n","\t\telse:\n","\t\t\tx = vision_outputs[-2]\n","\t\tx = self.ln1(x)\n","\t\tif self.pos_emb is not None:\n","\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n","\t\t\tx = x + self.pos_emb\n","\t\tx = self.linear1(x)\n","\t\tx = self.activation(x)\n","\t\tx = self.linear2(x)\n","\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n","\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n","\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n","\t\treturn x\n","\tdef get_eot_embedding(self):\n","\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n","\n","clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n","clip_model = AutoModel.from_pretrained(CLIP_PATH)\n","clip_model = clip_model.vision_model\n","checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n","checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n","clip_model.load_state_dict(checkpoint)\n","# del checkpoint\n","clip_model.eval()\n","clip_model.requires_grad_(False)\n","clip_model.to(\"cuda\")\n","tokenizer = AutoTokenizer.from_pretrained(f'{MODEL_PATH}')\n","#tokenizer = AutoTokenizer.from_pretrained(\"unsloth/Meta-Llama-3.1-8B-bnb-4bit\")\n","assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n","text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, quantization_config = BitsAndBytesConfig(load_in_8bit=True), device_map=\"auto\", torch_dtype=torch.bfloat16)\n","text_model.load_adapter(\"/content/joy/text_model\")\n","text_model.eval()\n","image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n","image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n","image_adapter.eval()\n","image_adapter.to(\"cuda\")"]},{"cell_type":"code","source":["\n","\n","\n","# @markdown Use a custom prompt to instruct the image captioning model\n","custom_prompt = '' # @param {type:'string'}\n","enable_custom_prompt = True # @param {type:'boolean'}\n","if not enable_custom_prompt: custom_prompt = 'Describe the image in 400 words'\n","\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [f\"{custom_prompt}\"],\n"," (\"descriptive\", \"formal\", False, True): [\"Write a descriptive caption for this image in a formal tone within {word_count} words.\"],\n"," (\"descriptive\", \"formal\", True, False): [\"Write a {length} descriptive caption for this image in a formal tone.\"],\n"," (\"descriptive\", \"informal\", False, False): [\"Write a descriptive caption for this image in a casual tone.\"],\n"," (\"descriptive\", \"informal\", False, True): [\"Write a descriptive caption for this image in a casual tone within {word_count} words.\"],\n"," (\"descriptive\", \"informal\", True, False): [\"Write a {length} descriptive caption for this image in a casual tone.\"],\n"," (\"training_prompt\", \"formal\", False, False): [\"Write a stable diffusion prompt for this image.\"],\n"," (\"training_prompt\", \"formal\", False, True): [\"Write a stable diffusion prompt for this image within {word_count} words.\"],\n"," (\"training_prompt\", \"formal\", True, False): [\"Write a {length} stable diffusion prompt for this image.\"],\n"," (\"rng-tags\", \"formal\", False, False): [\"Write a list of Booru tags for this image.\"],\n"," (\"rng-tags\", \"formal\", False, True): [\"Write a list of Booru tags for this image within {word_count} words.\"],\n"," (\"rng-tags\", \"formal\", True, False): [\"Write a {length} list of Booru tags for this image.\"],\n","}"],"metadata":{"id":"_qrUZ7jRIxIf"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":9,"metadata":{"id":"PjO3Wc4kzR08","executionInfo":{"status":"ok","timestamp":1753383334613,"user_tz":-120,"elapsed":3,"user":{"displayName":"","userId":""}}},"outputs":[],"source":["# @markdown higher temperature = prompt creativity (default 0.6) <br> higher top_p = higher noise reduction in latent embedding (default 0.9)\n","temperature = 1.35 # @param {type:'slider',min:0.5,max:4.0,step:0.05}\n","top_p = 0.75 # @param {type:'slider',min:0.1,max:0.95,step:0.05}\n","temperature = float(temperature)\n","top_p = float(top_p)\n","#-----#\n","num=1\n","#import torch\n","#import Image\n","\n","@torch.no_grad()\n","def stream_chat(input_image: Image.Image, prompt_str: str) -> str:\n"," torch.cuda.empty_cache()\n"," length =512\n"," #length = None if caption_length == \"any\" else caption_length\n"," #if isinstance(length, str):\n"," # try:\n"," # length = int(length)\n"," # except ValueError:\n"," # pass\n"," #if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n"," # caption_tone = \"formal\"\n"," #prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n"," #if prompt_key not in CAPTION_TYPE_MAP:\n"," # raise ValueError(f\"Invalid caption type: {prompt_key}\")\n"," #prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n"," print(f\"Prompt: {prompt_str}\")\n"," image = input_image.resize((384, 384), Image.LANCZOS)\n"," pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n"," pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n"," pixel_values = pixel_values.to('cuda')\n"," prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n"," with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n"," vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n"," image_features = vision_outputs.hidden_states\n"," embedded_images = image_adapter(image_features)\n"," embedded_images = embedded_images.to('cuda')\n"," prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n"," assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n"," embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n"," eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n"," inputs_embeds = torch.cat([\n"," embedded_bos.expand(embedded_images.shape[0], -1, -1),\n"," embedded_images.to(dtype=embedded_bos.dtype),\n"," prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n"," eot_embed.expand(embedded_images.shape[0], -1, -1),\n"," ], dim=1)\n"," input_ids = torch.cat([\n"," torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n"," torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n"," prompt,\n"," torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n"," ], dim=1).to('cuda')\n"," attention_mask = torch.ones_like(input_ids)\n"," generate_ids = text_model.generate(input_ids, top_p = top_p , temperature=temperature, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=3000, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n"," generate_ids = generate_ids[:, input_ids.shape[1]:]\n"," if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n"," generate_ids = generate_ids[:, :-1]\n"," caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n"," caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n"," return caption"]},{"cell_type":"code","source":["\n","%cd /content/\n","!unzip training_data.zip\n","\n","\n","\n","\n","\n","\n","\n"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"c60a6jW-YwsN","executionInfo":{"status":"ok","timestamp":1753383288961,"user_tz":-120,"elapsed":119,"user":{"displayName":"","userId":""}},"outputId":"9cefa4ba-9fed-44a5-c09a-ba306481b192"},"execution_count":7,"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n","Archive: training_data.zip\n"," extracting: 000.txt \n"," extracting: 001.txt \n"," extracting: 002.txt \n"," extracting: 003.txt \n"," extracting: 004.txt \n"," extracting: 005.txt \n"," extracting: 006.txt \n"," extracting: 007.txt \n"," extracting: 008.txt \n"," extracting: 009.txt \n"," extracting: 010.txt \n"," extracting: 011.txt \n"," extracting: 012.txt \n"," extracting: 013.txt \n"," extracting: 014.txt \n"," extracting: 015.txt \n"," extracting: 016.txt \n"," extracting: 017.txt \n"," extracting: 018.txt \n"," extracting: 019.txt \n"," extracting: 020.txt \n"," extracting: 021.txt \n"," extracting: 022.txt \n"," extracting: 023.txt \n"," extracting: 024.txt \n"," extracting: 025.txt \n"," extracting: 026.txt \n"," extracting: 027.txt \n"," extracting: 028.txt \n"," extracting: 029.txt \n"," extracting: 030.txt \n"," extracting: 031.txt \n"," extracting: 032.txt \n"," extracting: 033.txt \n"," extracting: 034.txt \n"," extracting: 035.txt \n"," extracting: 036.txt \n"," extracting: 037.txt \n"," extracting: 000.jpg \n"," extracting: 001.jpg \n"," extracting: 002.jpg \n"," extracting: 003.jpg \n"," extracting: 004.jpg \n"," extracting: 005.jpg \n"," extracting: 006.jpg \n"," extracting: 007.jpg \n"," extracting: 008.jpg \n"," extracting: 009.jpg \n"," extracting: 010.jpg \n"," extracting: 011.jpg \n"," extracting: 012.jpg \n"," extracting: 013.jpg \n"," extracting: 014.jpg \n"," extracting: 015.jpg \n"," extracting: 016.jpg \n"," extracting: 017.jpg \n"," extracting: 018.jpg \n"," extracting: 019.jpg \n"," extracting: 020.jpg \n"," extracting: 021.jpg \n"," extracting: 022.jpg \n"," extracting: 023.jpg \n"," extracting: 024.jpg \n"," extracting: 025.jpg \n"," extracting: 026.jpg \n"," extracting: 027.jpg \n"," extracting: 028.jpg \n"," extracting: 029.jpg \n"," extracting: 030.jpg \n"," extracting: 031.jpg \n"," extracting: 032.jpg \n"," extracting: 033.jpg \n"," extracting: 034.jpg \n"," extracting: 035.jpg \n"," extracting: 036.jpg \n"," extracting: 037.jpg \n"]}]},{"cell_type":"code","execution_count":8,"metadata":{"id":"mhccTDyzirVn","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1753383292586,"user_tz":-120,"elapsed":429,"user":{"displayName":"","userId":""}},"outputId":"d89b460b-828f-47ff-fc06-8e01147ccd1f"},"outputs":[{"output_type":"stream","name":"stdout","text":["Splitting all images found under /content/... \n"," into 1 along x-axis\n","/content\n","021.jpg\n","/content\n","/content/split\n","/content\n","1girl, glasses, solo, brown hair, subtitled, smile, sidelocks, meme, portrait, closed mouth, indoors, bangs, looking at viewer, english text, black eyes\n","black eyes,bangs,subtitled,portrait,meme,sidelocks,closed mouth,english text,1girl,brown hair,smile,looking at viewer,indoors,solo,glasses\n","/content/split\n","015.jpg\n","/content\n","/content/split\n","/content\n","subtitled, 1boy, male focus, solo, fake screenshot, belt, retro artstyle, labcoat, english text, long sleeves, pants, brick wall, shirt, head out of frame, meme, anime coloring, 1990s (style)\n","belt,1boy,labcoat,subtitled,1990s (style),english text,meme,pants,brick wall,retro artstyle,male focus,anime coloring,fake screenshot,solo,long sleeves,shirt,head out of frame\n","/content/split\n","028.jpg\n","/content\n","/content/split\n","/content\n","1boy, male focus, bow, solo, bowtie, glasses, male child, red bow, blue eyes, red bowtie, english text, subtitled, brown hair, black-framed eyewear, looking at viewer, blue jacket, shirt, open mouth, jacket, white shirt, bangs, upper body, collared shirt, caution tape, parody, short hair, blazer, child, indoors\n","bowtie,shirt,subtitled,red bow,looking at viewer,blue eyes,blue jacket,1boy,red bowtie,open mouth,bow,male child,male focus,caution tape,jacket,glasses,black-framed eyewear,upper body,parody,indoors,collared shirt,white shirt,blazer,short hair,bangs,solo,english text,brown hair,child\n","/content/split\n","010.jpg\n","/content\n","/content/split\n","/content\n","male focus, english text, 1boy, cape, mask, superhero, subtitled, belt, solo, door, hands on hips, indoors, parody\n","mask,1boy,male focus,subtitled,cape,parody,indoors,belt,english text,solo,hands on hips,door,superhero\n","/content/split\n","019.jpg\n","/content\n","/content/split\n","/content\n","1girl, subtitled, retro artstyle, solo, smile, dark-skinned female, 1990s (style), pillow, dark skin, breasts, lying, english text, armpits, 1980s (style), on back, tank top, arm up, grin, hair between eyes, black hair, short hair, anime coloring, brown hair, blue eyes, medium breasts, upper body, on bed, covered nipples, bed, bangs\n","bed,solo,dark-skinned female,1990s (style),tank top,retro artstyle,blue eyes,arm up,english text,1girl,on back,brown hair,dark skin,breasts,covered nipples,anime coloring,1980s (style),short hair,on bed,armpits,smile,grin,pillow,medium breasts,black hair,hair between eyes,upper body,subtitled,lying,bangs\n","/content/split\n","011.jpg\n","/content\n","/content/split\n","/content\n","1girl, solo, retro artstyle, subtitled, blue eyes, black hair, tank top, 1990s (style), dark skin, short hair, 1980s (style), collarbone, dark-skinned female, hair between eyes, anime coloring, cracked wall, open mouth, indoors, bangs, bare shoulders, upper body, english text, wall\n","subtitled,dark skin,indoors,wall,tank top,upper body,black hair,collarbone,cracked wall,1980s (style),bangs,dark-skinned female,hair between eyes,solo,short hair,english text,bare shoulders,retro artstyle,anime coloring,blue eyes,1girl,open mouth,1990s (style)\n","/content/split\n","005.jpg\n","/content\n","/content/split\n","/content\n","mole under eye, multiple boys, mole, subtitled, 3boys, brown hair, black hair, male focus, closed eyes, jacket, english text, smile, open mouth, short hair, ponytail, upper body, parody, anime coloring, fake screenshot, track jacket, ahoge\n","open mouth,jacket,ahoge,fake screenshot,ponytail,upper body,anime coloring,male focus,mole under eye,3boys,mole,english text,black hair,brown hair,track jacket,closed eyes,multiple boys,subtitled,smile,parody,short hair\n","/content/split\n","036.jpg\n","/content\n","/content/split\n","/content\n","multiple girls, multiple boys, blonde hair, crossover, black hair, english text, red eyes, dress, subtitled, ribbon, short hair, jacket, glasses, 2girls, red dress, hair ribbon, two-tone dress, weapon, bow, gun, parody, shirt, hat, bowtie, multicolored hair, red hair, brown hair, headphones, red ribbon, bangs\n","brown hair,red ribbon,ribbon,short hair,red hair,crossover,multiple girls,headphones,hair ribbon,shirt,english text,bowtie,bangs,2girls,jacket,weapon,red dress,gun,black hair,red eyes,multicolored hair,dress,hat,multiple boys,two-tone dress,subtitled,glasses,parody,bow,blonde hair\n","/content/split\n","000.jpg\n","/content\n","/content/split\n","/content\n","car interior, multiple boys, subtitled, brown hair, green eyes, black hair, male focus, brown eyes, fake screenshot, english text, steering wheel, 2boys, car, parody, short hair, driving, 3boys, motor vehicle, sweatdrop, meme, ground vehicle\n","subtitled,meme,3boys,fake screenshot,ground vehicle,car interior,brown hair,motor vehicle,green eyes,multiple boys,steering wheel,male focus,car,driving,2boys,parody,black hair,brown eyes,sweatdrop,english text,short hair\n","/content/split\n","027.jpg\n","/content\n","/content/split\n","/content\n","subtitled, weapon, gun, fake screenshot, multiple boys, english text, holding, holding weapon, retro artstyle, handgun, parody, holding gun, solo focus, male focus\n","multiple boys,subtitled,holding,gun,holding weapon,male focus,english text,retro artstyle,solo focus,parody,handgun,fake screenshot,weapon,holding gun\n","/content/split\n","020.jpg\n","/content\n","/content/split\n","/content\n","weapon, multiple girls, gun, 2girls, subtitled, retro artstyle, blonde hair, smile, handgun, holding weapon, holding gun, shirt, short hair, english text, grin, green eyes, 1990s (style), holding, black hair, brown hair, dark-skinned female, 1980s (style), open mouth, white shirt, dark skin\n","shirt,2girls,handgun,subtitled,retro artstyle,1990s (style),holding,green eyes,weapon,brown hair,english text,multiple girls,black hair,grin,smile,open mouth,dark-skinned female,gun,dark skin,holding gun,short hair,holding weapon,white shirt,1980s (style),blonde hair\n","/content/split\n","023.jpg\n","/content\n","/content/split\n","/content\n","1girl, subtitled, glasses, smile, brown hair, solo, retro artstyle, red eyes, english text, grin, 1990s (style), glowing, coat, long hair\n","glowing,brown hair,glasses,english text,1990s (style),1girl,red eyes,subtitled,long hair,solo,retro artstyle,smile,grin,coat\n","/content/split\n","014.jpg\n","/content\n","/content/split\n","/content\n","subtitled, multiple boys, jacket, male focus, 3boys, parody, retro artstyle, sunglasses, fake screenshot, english text, pants, shirt, black hair, brown hair, scene reference, 2boys, 1980s (style), gun\n","black hair,2boys,english text,multiple boys,3boys,brown hair,gun,retro artstyle,sunglasses,1980s (style),pants,parody,shirt,scene reference,male focus,jacket,subtitled,fake screenshot\n","/content/split\n","033.jpg\n","/content\n","/content/split\n","/content\n","multiple boys, hat, english text, subtitled, fake screenshot, blue hair, fedora, parody, trench coat, 1girl, retro artstyle, pants, jacket, short hair, photo (object), black hair, multiple girls, from above\n","subtitled,parody,blue hair,black hair,photo (object),english text,1girl,fake screenshot,trench coat,multiple boys,jacket,pants,short hair,from above,fedora,retro artstyle,multiple girls,hat\n","/content/split\n","008.jpg\n","/content\n","/content/split\n","/content\n","phone, gloves, english text, holding, cellphone, black gloves, fingerless gloves, holding phone, long sleeves, subtitled, smartphone, 1girl, solo, 1boy\n","black gloves,1girl,1boy,phone,fingerless gloves,gloves,holding phone,english text,subtitled,smartphone,holding,long sleeves,cellphone,solo\n","/content/split\n","018.jpg\n","/content\n","/content/split\n","/content\n","weapon, 1boy, male focus, gun, subtitled, headband, smile, solo, english text, red hair, indoors, handgun, retro artstyle, parody, revolver, fake screenshot, pocket\n","weapon,subtitled,indoors,male focus,smile,pocket,english text,fake screenshot,red hair,handgun,headband,solo,retro artstyle,revolver,parody,1boy,gun\n","/content/split\n","035.jpg\n","/content\n","/content/split\n","/content\n","retro artstyle, 2girls, multiple girls, 1990s (style), 1980s (style), blonde hair, short hair, black hair, hugging own legs, english text, sitting, shirt, green eyes, dark skin, dark-skinned female, subtitled, barefoot, tank top, white shirt\n","tank top,barefoot,dark-skinned female,subtitled,hugging own legs,retro artstyle,multiple girls,black hair,blonde hair,2girls,shirt,sitting,1980s (style),green eyes,english text,short hair,white shirt,dark skin,1990s (style)\n","/content/split\n","037.jpg\n","/content\n","/content/split\n","/content\n","1girl, chopsticks, subtitled, brown hair, solo, holding chopsticks, english text, coat, holding, jacket, open mouth, brown eyes, turtleneck, plant, indoors, noodles, sidelocks, food, trench coat\n","subtitled,plant,solo,sidelocks,holding,coat,holding chopsticks,chopsticks,jacket,brown hair,indoors,trench coat,brown eyes,1girl,noodles,turtleneck,english text,food,open mouth\n","/content/split\n","012.jpg\n","/content\n","/content/split\n","/content\n","cloud, no humans, english text, sky, sunset, cityscape, scenery, city, skyline, cloudy sky, skyscraper, building\n","skyline,building,sky,cloud,cityscape,sunset,city,no humans,skyscraper,cloudy sky,scenery,english text\n","/content/split\n","009.jpg\n","/content\n","/content/split\n","/content\n","subtitled, weapon, gun, handgun, holding weapon, english text, holding, holding gun, tiles, fake screenshot, solo, 1boy, parody, male focus\n","holding gun,holding,handgun,tiles,subtitled,english text,male focus,solo,gun,parody,fake screenshot,1boy,weapon,holding weapon\n","/content/split\n","017.jpg\n","/content\n","/content/split\n","/content\n","1boy, weapon, male focus, gun, solo, black hair, shirt, handgun, subtitled, bags under eyes, english text, white shirt, revolver, fake screenshot, black eyes, upper body, retro artstyle, parody\n","fake screenshot,solo,black hair,weapon,upper body,bags under eyes,1boy,male focus,shirt,retro artstyle,revolver,handgun,white shirt,english text,parody,black eyes,gun,subtitled\n","/content/split\n","007.jpg\n","/content\n","/content/split\n","/content\n","subtitled, gun, weapon, 1boy, solo, jacket, male focus, brown jacket, english text, fake screenshot, holding, parody, pants, retro artstyle, handgun, holding weapon, holding gun, scene reference, head out of frame\n","head out of frame,male focus,holding gun,gun,weapon,parody,subtitled,fake screenshot,english text,jacket,retro artstyle,solo,scene reference,holding,holding weapon,1boy,handgun,pants,brown jacket\n","/content/split\n","031.jpg\n","/content\n","/content/split\n","/content\n","weapon, 1boy, gun, male focus, subtitled, retro artstyle, rifle, english text, box, shirt, solo, 1980s (style), brown hair, fake screenshot, parody, sleeves rolled up, smile, pants, collared shirt\n","shirt,rifle,weapon,box,english text,subtitled,gun,retro artstyle,1980s (style),solo,parody,smile,1boy,pants,sleeves rolled up,brown hair,fake screenshot,collared shirt,male focus\n","/content/split\n","004.jpg\n","/content\n","/content/split\n","/content\n","car interior, subtitled, brown hair, green eyes, male focus, 2boys, steering wheel, car, multiple boys, motor vehicle, english text, brown eyes, ground vehicle, driving, fake screenshot, 1boy, parody, shirt\n","driving,shirt,male focus,steering wheel,fake screenshot,english text,green eyes,parody,ground vehicle,car,car interior,multiple boys,brown eyes,brown hair,motor vehicle,2boys,1boy,subtitled\n","/content/split\n","016.jpg\n","/content\n","/content/split\n","/content\n","male focus, 1boy, solo, bow, glasses, closed eyes, bowtie, male child, red bow, english text, brown hair, parody, red bowtie, closed mouth, black-framed eyewear, caution tape, subtitled, jacket, upper body, blue jacket\n","bow,subtitled,jacket,bowtie,parody,male child,1boy,black-framed eyewear,male focus,solo,closed mouth,blue jacket,red bow,upper body,english text,red bowtie,glasses,brown hair,caution tape,closed eyes\n","/content/split\n","013.jpg\n","/content\n","/content/split\n","/content\n","weapon, gun, english text, subtitled, no humans, rifle, handgun, suppressor, magazine (weapon), fake screenshot\n","rifle,fake screenshot,gun,weapon,suppressor,no humans,subtitled,magazine (weapon),handgun,english text\n","/content/split\n","029.jpg\n","/content\n","/content/split\n","/content\n","1boy, weapon, male focus, gun, subtitled, solo, headband, indoors, red hair, holding, smile, english text, holding weapon, rifle, grey jacket, jacket, retro artstyle, holding gun, parody, assault rifle, upper body, short hair\n","gun,holding gun,upper body,english text,holding,assault rifle,1boy,solo,weapon,red hair,indoors,holding weapon,headband,rifle,short hair,male focus,smile,jacket,grey jacket,subtitled,retro artstyle,parody\n","/content/split\n","006.jpg\n","/content\n","/content/split\n","/content\n","multiple boys, car interior, subtitled, brown hair, male focus, black hair, 3boys, brown eyes, driving, english text, steering wheel, car, green eyes, motor vehicle, short hair, ground vehicle, shirt, grey hair, open mouth, fake screenshot, jacket\n","subtitled,steering wheel,open mouth,car interior,motor vehicle,grey hair,short hair,male focus,shirt,jacket,driving,3boys,car,ground vehicle,green eyes,multiple boys,brown hair,brown eyes,english text,fake screenshot,black hair\n","/content/split\n","001.jpg\n","/content\n","/content/split\n","/content\n","car interior, subtitled, brown hair, green eyes, 2boys, steering wheel, car, male focus, multiple boys, motor vehicle, english text, driving, ground vehicle, fake screenshot, brown eyes, 1boy, parody, sweatdrop, anime coloring, short hair\n","car,steering wheel,car interior,1boy,driving,fake screenshot,2boys,brown eyes,multiple boys,green eyes,male focus,ground vehicle,motor vehicle,brown hair,short hair,parody,english text,sweatdrop,subtitled,anime coloring\n","/content/split\n","003.jpg\n","/content\n","/content/split\n","/content\n","no humans, english text, window, building, outdoors, day\n","building,english text,outdoors,window,day,no humans\n","/content/split\n","024.jpg\n","/content\n","/content/split\n","/content\n","subtitled, multiple boys, 1boy, pants, male focus, english text, from above, outdoors, shirt, bush, red shirt\n","multiple boys,english text,from above,pants,red shirt,bush,male focus,shirt,subtitled,1boy,outdoors\n","/content/split\n","022.jpg\n","/content\n","/content/split\n","/content\n","subtitled, weapon, gun, fake screenshot, english text, handgun, retro artstyle, holding weapon, parody, holding gun, holding, multiple boys, headband\n","subtitled,gun,fake screenshot,headband,holding,handgun,parody,multiple boys,holding gun,english text,holding weapon,weapon,retro artstyle\n","/content/split\n","032.jpg\n","/content\n","/content/split\n","/content\n","retro artstyle, 2girls, multiple girls, blonde hair, dark skin, dark-skinned female, 1990s (style), english text, black hair, short hair, 1980s (style), subtitled, hugging own legs, green eyes, tank top, sitting, shirt, black eyes, breasts, cleavage, knees up, indoors, fake screenshot\n","breasts,shirt,subtitled,multiple girls,short hair,dark skin,sitting,2girls,knees up,cleavage,blonde hair,green eyes,1990s (style),dark-skinned female,english text,hugging own legs,indoors,retro artstyle,black hair,black eyes,1980s (style),fake screenshot,tank top\n","/content/split\n","030.jpg\n","/content\n","/content/split\n","/content\n","weapon, subtitled, gun, multiple boys, holding, retro artstyle, 3boys, jacket, parody, holding gun, brown hair, english text, pants, holding weapon, sunglasses, rifle, red hair, smile, 1980s (style), shirt, indoors, headband, scene reference, black hair, male focus, short hair, standing, 1990s (style), blonde hair, red jacket\n","scene reference,red jacket,holding,1990s (style),blonde hair,holding gun,weapon,jacket,subtitled,black hair,indoors,male focus,sunglasses,retro artstyle,gun,shirt,parody,1980s (style),red hair,headband,short hair,rifle,pants,brown hair,smile,holding weapon,multiple boys,english text,standing,3boys\n","/content/split\n","034.jpg\n","/content\n","/content/split\n","/content\n","city, building, no humans, english text, cityscape, skyscraper, subtitled, outdoors, retro artstyle, scenery\n","outdoors,scenery,no humans,english text,retro artstyle,cityscape,city,building,skyscraper,subtitled\n","/content/split\n","002.jpg\n","/content\n","/content/split\n","/content\n","mole under eye, mole, 1boy, male focus, brown hair, solo, subtitled, open mouth, school uniform, sweatdrop, green eyes, looking at viewer, yellow eyes, english text, heterochromia, ahoge, fake screenshot, upper body\n","fake screenshot,yellow eyes,upper body,mole under eye,solo,english text,green eyes,heterochromia,looking at viewer,brown hair,ahoge,male focus,1boy,open mouth,subtitled,sweatdrop,school uniform,mole\n","/content/split\n","025.jpg\n","/content\n","/content/split\n","/content\n","1girl, gun, weapon, blonde hair, solo, english text, dress, pleated dress, grey dress, subtitled, rifle, two-tone dress, red dress, indoors, submachine gun, handgun, short hair, meme, ribbon, squatting, bangs, school uniform, parody, cardboard box, box, assault rifle, shotgun, suppressor, desk, hair ribbon\n","squatting,subtitled,submachine gun,grey dress,shotgun,handgun,box,indoors,desk,parody,two-tone dress,suppressor,ribbon,1girl,blonde hair,gun,red dress,cardboard box,pleated dress,meme,bangs,assault rifle,rifle,short hair,dress,school uniform,english text,hair ribbon,solo,weapon\n","/content/split\n","026.jpg\n","/content\n","/content/split\n","/content\n","1boy, weapon, male focus, gun, black hair, solo, shirt, handgun, subtitled, white shirt, revolver, bags under eyes, english text, upper body, black eyes, retro artstyle, fake screenshot, parody\n","shirt,solo,subtitled,weapon,retro artstyle,gun,white shirt,black hair,english text,fake screenshot,upper body,parody,bags under eyes,handgun,1boy,male focus,revolver,black eyes\n","/content/split\n"]}],"source":["# @markdown Split the image into 20 parts prior to running\n","no_parts = 1 # @param {type:'slider', min:1,max:30,step:1}\n","print(f'Splitting all images found under /content/... \\n into {no_parts} along x-axis')\n","import os,math,random\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","split_folder = f'/content/split/'\n","my_mkdirs(f'{split_folder}')\n","\n","\n","src_folder = '/content/'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","#num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," #while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," textpath = filename.replace(suffix,'.txt')\n"," #os.remove(f'{filename}')\n"," #continue\n"," image = Image.open(f\"{filename}\").convert('RGB')\n"," w,h=image.size\n"," #grid = product(range(0, h-h%d, d), range(0, w-w%d, d))\n"," divs=no_parts\n"," step=math.floor(w/divs)\n"," %cd {split_folder}\n"," for index in range(divs):\n"," box = (step*index, 0 ,step*(index+1),math.floor(1.0*h))\n"," image.crop(box).save(f'{num}_{index}.jpeg','JPEG')\n"," %cd /content/\n"," if os.path.exists(textpath):\n"," with open(f'{textpath}', 'r') as file:\n"," _tags = file.read()\n","\n"," print(_tags)\n"," if not _tags:continue\n"," tags=''\n"," _tags = [item.strip() for item in f'{_tags}'.split(',')]\n"," random.shuffle(_tags)\n"," for tag in _tags:\n"," tags = tags + tag + ','\n"," #----#\n"," tags = (tags + 'AAAA').replace(',AAAA','')\n"," print(tags)\n"," prompt_str = f'Please describe the image in 400 words using danbooru tags : {tags}'\n"," %cd {split_folder}\n"," f = open(f'{num}_{index}.txt','w')\n"," f.write(f'{prompt_str}')\n"," f.close()\n"," #---#\n"," #-----#\n"," #----#\n"," num = num+1\n"," #caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n"," #print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," #print(caption)\n"," #---------#\n"," #f = open(f\"{num}.txt\", \"w\")\n"," #f.write(f'{caption}')\n"," #f.close()\n"," #input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")\n"," os.remove(f'{src_folder}{textpath}')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"J811UZU6xZEo"},"outputs":[],"source":["\n","import os,random\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","my_mkdirs(f'{tgt_folder}')\n","split_folder = '/content/split/'\n","src_folder = '/content/'\n","if os.path.exists(f'{split_folder}'): src_folder = f'{split_folder}'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," textpath = filename.replace(suffix,'.txt')\n"," prompt_str = 'Describe the anime screencap and the yellow subtitles text in 400 words and write the danbooru tags'\n"," if os.path.exists(f'{src_folder}{textpath}'):\n"," with open(f'{textpath}', 'r') as file:\n"," prompt_str = file.read()\n"," #prompt_str = f'Please improve this prompt : {tags}'\n"," input_image = Image.open(f\"{filename}\").convert('RGB')\n"," caption = stream_chat(input_image, f'{prompt_str}')\n"," caption = caption + prompt_str\n"," #caption = caption + \", and a logo of a black bar with rainbow glitch art outline that says 'FLUX Chroma V46' psychadelic art\"\n"," print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," print(caption)\n"," #---------#\n"," %cd {tgt_folder}\n"," f = open(f\"{num}.txt\", \"w\")\n"," f.write(f'{caption}')\n"," f.close()\n"," input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")\n"," os.remove(f'{src_folder}{textpath}')\n"," num = num+1"]},{"cell_type":"code","execution_count":6,"metadata":{"id":"5EztLCjkPq4U","colab":{"base_uri":"https://localhost:8080/","height":54},"executionInfo":{"status":"ok","timestamp":1753383024100,"user_tz":-120,"elapsed":131,"user":{"displayName":"","userId":""}},"outputId":"d0ac4f71-1b7d-46a6-b511-68287cb6c5dc"},"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n"]},{"output_type":"execute_result","data":{"text/plain":["'/content/tmp.zip'"],"application/vnd.google.colaboratory.intrinsic+json":{"type":"string"}},"metadata":{},"execution_count":6}],"source":["import shutil\n","%cd /content/\n","shutil.make_archive('/content/tmp', 'zip', '/content/tmp')"]},{"cell_type":"code","source":["# @markdown Save images of all urls found in image_urls.txt to workspace\n","\n","!wget -i image_urls.txt -P ./splits\n","\n"],"metadata":{"id":"v9UMCh3h_mNj"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"kM4TpfdB1amt"},"outputs":[],"source":["# @markdown Auto-disconnect from Google Colab upon running this cell\n","from google.colab import runtime\n","#runtime.unassign() #Disconnect from runtime"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753383684211},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753179095950},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753120703402},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752593897385},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752405756026},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1748859170548},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747227021653},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747225778912},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747224652750},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746209168116},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746181687155},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1742303655056},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740768524003},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740657473013},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739796923572},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739735627072}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}