codeShare commited on
Commit
cef1fa3
·
verified ·
1 Parent(s): 3b596e7

Upload Joycaption_Alpha_One.ipynb

Browse files
Files changed (1) hide show
  1. Joycaption_Alpha_One.ipynb +1 -1
Joycaption_Alpha_One.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"Dwr7gk5OwuGC"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"1X-s_s971qB7"},"outputs":[],"source":["!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","# Use a custom prompt to instruct the image captioning model\n","custom_prompt = '' #{type:'string'}\n","enable_custom_prompt = False # {type:'boolean'}\n","if not enable_custom_prompt: custom_prompt = 'Describe the image in 400 words'\n","!pip install peft bitsandbytes\n","!pip install hf_xet\n","from huggingface_hub import InferenceClient\n","from torch import nn\n","from transformers import AutoModel, BitsAndBytesConfig, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n","import torch\n","import torch.amp.\n","from PIL import Image\n","import os\n","import torchvision.transforms.functional as TVF\n","\n","CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n","MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B-bnb-4bit\"\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [f\"{custom_prompt}\"],\n","}\n","\n","class ImageAdapter(nn.Module):\n","\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n","\t\tsuper().__init__()\n","\t\tself.deep_extract = deep_extract\n","\t\tif self.deep_extract:\n","\t\t\tinput_features = input_features * 5\n","\t\tself.linear1 = nn.Linear(input_features, output_features)\n","\t\tself.activation = nn.GELU()\n","\t\tself.linear2 = nn.Linear(output_features, output_features)\n","\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n","\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n","\t\tself.other_tokens = nn.Embedding(3, output_features)\n","\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n","\tdef forward(self, vision_outputs: torch.Tensor):\n","\t\tif self.deep_extract:\n","\t\t\tx = torch.concat((\n","\t\t\t\tvision_outputs[-2],\n","\t\t\t\tvision_outputs[3],\n","\t\t\t\tvision_outputs[7],\n","\t\t\t\tvision_outputs[13],\n","\t\t\t\tvision_outputs[20],\n","\t\t\t), dim=-1)\n","\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n","\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n","\t\telse:\n","\t\t\tx = vision_outputs[-2]\n","\t\tx = self.ln1(x)\n","\t\tif self.pos_emb is not None:\n","\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n","\t\t\tx = x + self.pos_emb\n","\t\tx = self.linear1(x)\n","\t\tx = self.activation(x)\n","\t\tx = self.linear2(x)\n","\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n","\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n","\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n","\t\treturn x\n","\tdef get_eot_embedding(self):\n","\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n","\n","clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n","clip_model = AutoModel.from_pretrained(CLIP_PATH)\n","clip_model = clip_model.vision_model\n","checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n","checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n","clip_model.load_state_dict(checkpoint)\n","# del checkpoint\n","clip_model.eval()\n","clip_model.requires_grad_(False)\n","clip_model.to(\"cuda\")\n","tokenizer = AutoTokenizer.from_pretrained(f'{MODEL_PATH}')\n","#tokenizer = AutoTokenizer.from_pretrained(\"unsloth/Meta-Llama-3.1-8B-bnb-4bit\")\n","assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n","text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, quantization_config = BitsAndBytesConfig(load_in_8bit=True), device_map=\"auto\", torch_dtype=torch.bfloat16)\n","text_model.load_adapter(\"/content/joy/text_model\")\n","text_model.eval()\n","image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n","image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n","image_adapter.eval()\n","image_adapter.to(\"cuda\")"]},{"cell_type":"code","execution_count":9,"metadata":{"id":"PjO3Wc4kzR08","executionInfo":{"status":"ok","timestamp":1753383334613,"user_tz":-120,"elapsed":3,"user":{"displayName":"","userId":""}}},"outputs":[],"source":["# @markdown higher temperature = prompt creativity (default 0.6) <br> higher top_p = higher noise reduction in latent embedding (default 0.9)\n","temperature = 1.35 # @param {type:'slider',min:0.5,max:4.0,step:0.05}\n","top_p = 0.75 # @param {type:'slider',min:0.1,max:0.95,step:0.05}\n","temperature = float(temperature)\n","top_p = float(top_p)\n","#-----#\n","num=1\n","#import torch\n","#import Image\n","\n","@torch.no_grad()\n","def stream_chat(input_image: Image.Image, prompt_str: str) -> str:\n"," torch.cuda.empty_cache()\n"," length =512\n"," #length = None if caption_length == \"any\" else caption_length\n"," #if isinstance(length, str):\n"," # try:\n"," # length = int(length)\n"," # except ValueError:\n"," # pass\n"," #if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n"," # caption_tone = \"formal\"\n"," #prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n"," #if prompt_key not in CAPTION_TYPE_MAP:\n"," # raise ValueError(f\"Invalid caption type: {prompt_key}\")\n"," #prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n"," print(f\"Prompt: {prompt_str}\")\n"," image = input_image.resize((384, 384), Image.LANCZOS)\n"," pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n"," pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n"," pixel_values = pixel_values.to('cuda')\n"," prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n"," with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n"," vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n"," image_features = vision_outputs.hidden_states\n"," embedded_images = image_adapter(image_features)\n"," embedded_images = embedded_images.to('cuda')\n"," prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n"," assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n"," embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n"," eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n"," inputs_embeds = torch.cat([\n"," embedded_bos.expand(embedded_images.shape[0], -1, -1),\n"," embedded_images.to(dtype=embedded_bos.dtype),\n"," prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n"," eot_embed.expand(embedded_images.shape[0], -1, -1),\n"," ], dim=1)\n"," input_ids = torch.cat([\n"," torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n"," torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n"," prompt,\n"," torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n"," ], dim=1).to('cuda')\n"," attention_mask = torch.ones_like(input_ids)\n"," generate_ids = text_model.generate(input_ids, top_p = top_p , temperature=temperature, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=3000, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n"," generate_ids = generate_ids[:, input_ids.shape[1]:]\n"," if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n"," generate_ids = generate_ids[:, :-1]\n"," caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n"," caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n"," return caption"]},{"cell_type":"code","source":["\n","%cd /content/\n","!unzip training_data.zip\n","\n","\n","\n","\n","\n","\n","\n"],"metadata":{"id":"c60a6jW-YwsN"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"mhccTDyzirVn"},"outputs":[],"source":["# @markdown Split the image into 20 parts prior to running\n","no_parts = 1 # @param {type:'slider', min:1,max:30,step:1}\n","print(f'Splitting all images found under /content/... \\n into {no_parts} along x-axis')\n","import os,math,random\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","split_folder = f'/content/split/'\n","my_mkdirs(f'{split_folder}')\n","\n","\n","src_folder = '/content/'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","#num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," #while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," textpath = filename.replace(suffix,'.txt')\n"," #os.remove(f'{filename}')\n"," #continue\n"," image = Image.open(f\"{filename}\").convert('RGB')\n"," w,h=image.size\n"," #grid = product(range(0, h-h%d, d), range(0, w-w%d, d))\n"," divs=no_parts\n"," step=math.floor(w/divs)\n"," %cd {split_folder}\n"," for index in range(divs):\n"," box = (step*index, 0 ,step*(index+1),math.floor(1.0*h))\n"," image.crop(box).save(f'{num}_{index}.jpeg','JPEG')\n"," %cd /content/\n"," if os.path.exists(textpath):\n"," with open(f'{textpath}', 'r') as file:\n"," _tags = file.read()\n","\n"," print(_tags)\n"," if not _tags:continue\n"," tags=''\n"," _tags = [item.strip() for item in f'{_tags}'.split(',')]\n"," random.shuffle(_tags)\n"," for tag in _tags:\n"," tags = tags + tag + ','\n"," #----#\n"," tags = (tags + 'AAAA').replace(',AAAA','')\n"," print(tags)\n"," prompt_str = f' {tags}'\n"," %cd {split_folder}\n"," f = open(f'{num}_{index}.txt','w')\n"," f.write(f'{prompt_str}')\n"," f.close()\n"," #---#\n"," #-----#\n"," #----#\n"," num = num+1\n"," #caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n"," #print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," #print(caption)\n"," #---------#\n"," #f = open(f\"{num}.txt\", \"w\")\n"," #f.write(f'{caption}')\n"," #f.close()\n"," #input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")\n"," os.remove(f'{src_folder}{textpath}')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"J811UZU6xZEo","colab":{"base_uri":"https://localhost:8080/"},"outputId":"7b08f5d9-835f-47db-9951-21a04ab2cd98"},"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n","73_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text scenery,building,sunset,skyline,skyscraper,english text,no humans,cloudy sky,cityscape,city,sky,cloud\n","...\n","\n","...caption for 73_0.jpeg\n","\n","...\n","A high-rise skyline, silhouetted against a dramatic sunset sky with deep oranges and reds. The sky is filled with streaky clouds. Text across the bottom, in large yellow font, reads \"YEAH AAAAAAHHHHHH!!!\". scenery,building,sunset,skyline,skyscraper,english text,no humans,cloudy sky,cityscape,city,sky,cloud\n","/content/tmp\n","59_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text dark skin,brown hair,on bed,arm up,bangs,pillow,black hair,short hair,anime coloring,smile,english text,1girl,dark-skinned female,blue eyes,retro artstyle,breasts,covered nipples,hair between eyes,solo,on back,subtitled,lying,1990s (style),grin,bed,tank top,upper body,medium breasts,1980s (style),armpits\n","...\n","\n","...caption for 59_0.jpeg\n","\n","...\n","Bed scene dark skin,brown hair,on bed,arm up,bangs,pillow,black hair,short hair,anime coloring,smile,english text,1girl,dark-skinned female,blue eyes,retro artstyle,breasts,covered nipples,hair between eyes,solo,on back,subtitled,lying,1990s (style),grin,bed,tank top,upper body,medium breasts,1980s (style),armpits\n","/content/tmp\n","68_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text short hair,fake screenshot,1girl,multiple girls,multiple boys,black hair,trench coat,subtitled,pants,fedora,photo (object),retro artstyle,english text,parody,from above,hat,blue hair,jacket\n","...\n","\n","...caption for 68_0.jpeg\n","\n","...\n","The background includes a painting hanging on a beige wall to the right, and a TV stand with a TV on top and a plant. There are five individuals. One person, wearing a white trench coat and light green pants, has an orange face covering. Another individual wearing a black suit and cap walks next to the orange covered individual. short hair,fake screenshot,1girl,multiple girls,multiple boys,black hair,trench coat,subtitled,pants,fedora,photo (object),retro artstyle,english text,parody,from above,hat,blue hair,jacket\n","/content/tmp\n","79_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text closed mouth,upper body,1boy,caution tape,parody,bowtie,red bowtie,male focus,bow,closed eyes,subtitled,jacket,brown hair,black-framed eyewear,solo,blue jacket,red bow,glasses,male child,english text\n","...\n","\n","...caption for 79_0.jpeg\n","\n","...\n","animated still, sfw, parody closed mouth,upper body,1boy,caution tape,parody,bowtie,red bowtie,male focus,bow,closed eyes,subtitled,jacket,brown hair,black-framed eyewear,solo,blue jacket,red bow,glasses,male child,english text\n","/content/tmp\n","92_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text white shirt,solo,revolver,shirt,upper body,parody,english text,bags under eyes,black hair,male focus,retro artstyle,handgun,1boy,weapon,subtitled,black eyes,fake screenshot,gun\n","...\n","\n","...caption for 92_0.jpeg\n","\n","...\n","text white shirt,solo,revolver,shirt,upper body,parody,english text,bags under eyes,black hair,male focus,retro artstyle,handgun,1boy,weapon,subtitled,black eyes,fake screenshot,gun\n","/content/tmp\n","89_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text scenery,building,cityscape,outdoors,retro artstyle,subtitled,skyscraper,no humans,city,english text\n","...\n","\n","...caption for 89_0.jpeg\n","\n","...\n","A scene from an anime showing a night-time view of a city. The scene is set in a digital, stylized manner, giving a sense of a computer-generated city. Two high-rise buildings, each with rectangular shapes and windows in neat rows, dominate the foreground. Each building has a dark texture with blue and gray tones. In the background, more buildings with illuminated windows stand out against the night sky, creating a sense of depth. The scene is dimly lit, with a mixture of red and purple hues. A large subtitle in yellow letters at the bottom reads, \"YEAHHHHHH!!! AAAAH!!!\", indicating excitement or joy, possibly directed at the cityscape or the city's lively atmosphere. scenery,building,cityscape,outdoors,retro artstyle,subtitled,skyscraper,no humans,city,english text\n","/content/tmp\n","91_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text dress,handgun,desk,1girl,suppressor,solo,submachine gun,shotgun,rifle,school uniform,ribbon,bangs,meme,red dress,subtitled,blonde hair,weapon,two-tone dress,cardboard box,hair ribbon,grey dress,pleated dress,parody,english text,squatting,gun,short hair,indoors,box,assault rifle\n","...\n","\n","...caption for 91_0.jpeg\n","\n","...\n","This meme features a scene from an anime, depicting a blonde girl with a ponytail standing against a wall adorned with various handguns, shotguns, and rifles. She is wearing a red dress, which accentuates her youthful appearance. The backdrop includes shelves and a desk, adding a functional, everyday setting to the chaotic arrangement. Her pose is suggestive, as if about to take aim at a target. The text overlaid at the bottom in yellow color indicates she is in a dilemma about whether to choose one of the guns, given their diversity. The image also contains a humorous caption, “very interesting choice of diet you got going on in here,” hinting at a surreal, absurd interaction. dress,handgun,desk,1girl,suppressor,solo,submachine gun,shotgun,rifle,school uniform,ribbon,bangs,meme,red dress,subtitled,blonde hair,weapon,two-tone dress,cardboard box,hair ribbon,grey dress,pleated dress,parody,english text,squatting,gun,short hair,indoors,box,assault rifle\n","/content/tmp\n","87_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text cleavage,sitting,1990s (style),black eyes,retro artstyle,2girls,hugging own legs,fake screenshot,shirt,subtitled,black hair,english text,knees up,dark skin,multiple girls,1980s (style),indoors,green eyes,tank top,breasts,blonde hair,short hair,dark-skinned female\n","...\n","\n","...caption for 87_0.jpeg\n","\n","...\n","Looks like a still from an anime show with hand-drawn characters. Two young women sit on the floor. They're wearing casual shirts. The one on the left is blonde with shoulder-length hair and light skin, looking a bit pouty, while the one on the right is dark-skinned, with shoulder-length dark hair and a more neutral face, arms around her legs. A speech bubble has yellow text, saying, \"Is it possible we could discuss potential rebirth for—\" in a retro style. cleavage,sitting,1990s (style),black eyes,retro artstyle,2girls,hugging own legs,fake screenshot,shirt,subtitled,black hair,english text,knees up,dark skin,multiple girls,1980s (style),indoors,green eyes,tank top,breasts,blonde hair,short hair,dark-skinned female\n","/content/tmp\n","66_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text 1girl,subtitled,1990s (style),brown hair,glasses,glowing,retro artstyle,coat,solo,long hair,english text,smile,red eyes,grin\n","...\n","\n","...caption for 66_0.jpeg\n","\n","...\n","In this anime screencap, the focus is on a close-up of a young girl with light skin and brown, wavy hair styled in a bob. She is seen from the side, slightly turned to the left, smiling widely, revealing her teeth, and showing a mischievous expression. Her large brown-framed glasses rest near the bridge of her nose, and there are two small dots near the right temple of her glasses, glowing in a bright pink color, which draws attention to her face. She wears a light-colored coat or jacket over her shoulders, with visible black straps underneath, adding to her playful demeanor. The background is mostly out of focus, with muted shades of brown, giving a sense of depth to the scene. At the bottom of the image, there are Japanese text in yellow with the text \"YEAHHHAAAAAAAHHHHHH!!!\" indicating her vocalizing. The overall artwork style features clean lines, with a playful and whimsical tone. 1girl,subtitled,1990s (style),brown hair,glasses,glowing,retro artstyle,coat,solo,long hair,english text,smile,red eyes,grin\n","/content/tmp\n","64_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text subtitled,parody,holding,fake screenshot,multiple boys,solo focus,retro artstyle,holding weapon,holding gun,weapon,gun,male focus,handgun,english text\n","...\n","\n","...caption for 64_0.jpeg\n","\n","...\n","English text subtitled,parody,holding,fake screenshot,multiple boys,solo focus,retro artstyle,holding weapon,holding gun,weapon,gun,male focus,handgun,english text\n","/content/tmp\n","62_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text hat,shirt,headphones,bowtie,red ribbon,short hair,2girls,parody,bow,ribbon,multicolored hair,english text,gun,blonde hair,brown hair,weapon,red dress,crossover,hair ribbon,bangs,two-tone dress,multiple girls,jacket,subtitled,dress,multiple boys,red eyes,red hair,black hair,glasses\n","...\n","\n","...caption for 62_0.jpeg\n","\n","...\n","Batman, blue eyes, brown hair, grey background, multiple characters, black eyes, red hair, blue skin, green eyes, green hair, grey shirt, red eyes, yellow skin, blue shirt, blonde hair, red shirt, grey hair, red tie, brown eyes, black eyes, yellow hair, black gloves, black shirt, grey eyes, black hair hat,shirt,headphones,bowtie,red ribbon,short hair,2girls,parody,bow,ribbon,multicolored hair,english text,gun,blonde hair,brown hair,weapon,red dress,crossover,hair ribbon,bangs,two-tone dress,multiple girls,jacket,subtitled,dress,multiple boys,red eyes,red hair,black hair,glasses\n","/content/tmp\n","76_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text english text,subtitled,brown jacket,solo,fake screenshot,gun,male focus,retro artstyle,jacket,holding gun,holding,holding weapon,weapon,parody,head out of frame,handgun,pants,scene reference,1boy\n","...\n","\n","...caption for 76_0.jpeg\n","\n","...\n","The drawing style is reminiscent of classic anime, with clean, sharp lines and a limited color palette dominated by dark shades. The image focuses on a male character, partially obscured by his brown leather jacket, which has black stripes on the arms. He wears a black shirt underneath. The scene is set at night, as suggested by the dim, dark-colored walls and a visible window. In the foreground, he grips a large, black, metal device that resembles a gun or a military-grade machine gun. Bullets are scattered around, near his feet and on the ground to his left, suggesting that he recently used the weapon.\n","\n","The text at the bottom, in a bold, bright yellow, states, \"We live in a big Middle Eastern bazaar,\" adding a humorous or ironic undertone to the scene. The character's physique is muscular, indicated by the tight fit of his jacket and the way he holds the weapon with both hands. The background is simplistic, with minimal detail, to focus attention on english text,subtitled,brown jacket,solo,fake screenshot,gun,male focus,retro artstyle,jacket,holding gun,holding,holding weapon,weapon,parody,head out of frame,handgun,pants,scene reference,1boy\n","/content/tmp\n","61_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text ahoge,smile,ponytail,mole,3boys,upper body,open mouth,anime coloring,multiple boys,fake screenshot,short hair,english text,subtitled,black hair,parody,jacket,brown hair,closed eyes,mole under eye,track jacket,male focus\n","...\n","\n","...caption for 61_0.jpeg\n","\n","...\n","looking at each other ahoge,smile,ponytail,mole,3boys,upper body,open mouth,anime coloring,multiple boys,fake screenshot,short hair,english text,subtitled,black hair,parody,jacket,brown hair,closed eyes,mole under eye,track jacket,male focus\n","/content/tmp\n","72_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text turtleneck,sidelocks,jacket,food,plant,holding,brown hair,1girl,coat,indoors,solo,holding chopsticks,noodles,chopsticks,open mouth,brown eyes,trench coat,english text,subtitled\n","...\n","\n","...caption for 72_0.jpeg\n","\n","...\n","Anime screencap. Yellow subtitles text: “We don’t do reburburments for private property.” A brown-haired woman in an open coat with turtleneck, leaning forward eating at a table with a large container and three small boxes, with plants on a ledge behind. Dim light. turtleneck,sidelocks,jacket,food,plant,holding,brown hair,1girl,coat,indoors,solo,holding chopsticks,noodles,chopsticks,open mouth,brown eyes,trench coat,english text,subtitled\n","/content/tmp\n","82_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text car interior,driving,3boys,green eyes,multiple boys,brown hair,grey hair,black hair,steering wheel,open mouth,ground vehicle,car,jacket,fake screenshot,short hair,motor vehicle,subtitled,brown eyes,male focus,english text,shirt\n","...\n","\n","...caption for 82_0.jpeg\n","\n","...\n","Boruto (anime) car interior,driving,3boys,green eyes,multiple boys,brown hair,grey hair,black hair,steering wheel,open mouth,ground vehicle,car,jacket,fake screenshot,short hair,motor vehicle,subtitled,brown eyes,male focus,english text,shirt\n","/content/tmp\n","86_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text holding weapon,weapon,fake screenshot,retro artstyle,multiple boys,headband,gun,holding,handgun,english text,subtitled,parody,holding gun\n","...\n","\n","...caption for 86_0.jpeg\n","\n","...\n","Hand reaching toward bright light of gun, 4 boys silhouetted behind hand, subtitle: \"I just got out of jail and I ain't going back\" holding weapon,weapon,fake screenshot,retro artstyle,multiple boys,headband,gun,holding,handgun,english text,subtitled,parody,holding gun\n","/content/tmp\n","63_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text fake screenshot,motor vehicle,car,brown eyes,english text,male focus,steering wheel,meme,multiple boys,car interior,driving,short hair,2boys,subtitled,3boys,sweatdrop,brown hair,green eyes,black hair,parody,ground vehicle\n","...\n","\n","...caption for 63_0.jpeg\n","\n","...\n","The image is a digital photograph with a video game-inspired anime art style. Three young males sit inside a car, facing the audience. The man on the right is wearing a grey headband, grey and black jacket, and dark blue shirt, looking visibly agitated, with his cheeks flushed and hands shaking. His right eye is narrowed and his mouth appears half-open, suggesting impatience. The second person from the right wears a grey jacket over a white shirt and looks slightly annoyed. The boy on the left is not fully visible but appears to be wearing an orange shirt. Outside the car, a blurred view of a grassy area and green trees suggests the countryside. Dialogue reads, \"I-hop we're going to Hop. Oh, don't start.\" fake screenshot,motor vehicle,car,brown eyes,english text,male focus,steering wheel,meme,multiple boys,car interior,driving,short hair,2boys,subtitled,3boys,sweatdrop,brown hair,green eyes,black hair,parody,ground vehicle\n","/content/tmp\n","75_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text black hair,upper body,bags under eyes,gun,english text,weapon,parody,retro artstyle,solo,male focus,fake screenshot,1boy,shirt,handgun,revolver,black eyes,subtitled,white shirt\n","...\n","\n","...caption for 75_0.jpeg\n","\n","...\n","The image features a scene from the anime \"Baccano!,\" characterized by its stylized and retro art style. The subject is a young man with pale skin and messy black hair, positioned slightly off-center to the left. He wears a simple white shirt and appears shocked or alarmed. The background is dominated by various guns of different types scattered around, with some guns lying on the ground or in mid-air, suggesting a chaotic scene. Surrounding the guns are some blood splatter marks, indicating violence. The colors in the image are dark and muted, with dominant hues of black, grey, and brown. The scene is highly dynamic, with the subject looking directly at the camera, conveying a sense of panic or fear. Below the image is a yellow-tinted text box in English with bold, cursive writing that reads, \"I was hoping you'd find that out before I got here.\" The tone of the text suggests a critical or ominous situation. black hair,upper body,bags under eyes,gun,english text,weapon,parody,retro artstyle,solo,male focus,fake screenshot,1boy,shirt,handgun,revolver,black eyes,subtitled,white shirt\n","/content/tmp\n","81_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text upper body,english text,parody,rifle,weapon,holding,smile,male focus,subtitled,headband,1boy,solo,short hair,holding gun,holding weapon,retro artstyle,gun,jacket,grey jacket,indoors,assault rifle,red hair\n","...\n","\n","...caption for 81_0.jpeg\n","\n","...\n","Parody, Anime, 90s artstyle, rifle, holding, smile, motherfucker, subtitles, English, male focus, solo, short hair, holding gun, holding weapon, jacket, grey jacket, indoors, assault rifle, red hair upper body,english text,parody,rifle,weapon,holding,smile,male focus,subtitled,headband,1boy,solo,short hair,holding gun,holding weapon,retro artstyle,gun,jacket,grey jacket,indoors,assault rifle,red hair\n","/content/tmp\n","57_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text male focus,bangs,child,upper body,shirt,male child,white shirt,caution tape,bowtie,1boy,jacket,collared shirt,indoors,short hair,black-framed eyewear,english text,brown hair,red bow,solo,red bowtie,subtitled,glasses,blazer,looking at viewer,bow,blue jacket,blue eyes,parody,open mouth\n","...\n","\n","...caption for 57_0.jpeg\n","\n","...\n","In \"Futurama\" character likeness, a Caucasian lad in his teens has medium-length brown hair and dark-framed specs. He sports a blazer with white shirt and red bow tie. His skin's tan, blue eyes squinting in worry as he stands in front of a gloomy alley, graffiti-covered brick wall behind. Yellow caution tape loops through the scene. Yellow text reads, \"and a Louisiana man be doing Robbing your house at 2:00 in the morning?\" referencing \"The Jeffersons\" theme song. male focus,bangs,child,upper body,shirt,male child,white shirt,caution tape,bowtie,1boy,jacket,collared shirt,indoors,short hair,black-framed eyewear,english text,brown hair,red bow,solo,red bowtie,subtitled,glasses,blazer,looking at viewer,bow,blue jacket,blue eyes,parody,open mouth\n","/content/tmp\n","58_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text male focus,cape,parody,indoors,hands on hips,subtitled,door,1boy,mask,superhero,belt,solo,english text\n","...\n","\n","...caption for 58_0.jpeg\n","\n","...\n","animated series, blue, black, shirt, grey, yellow, superman, superman shirt, Batman, yellow cape, blue cape, yellow belt, dark skin, indoor, door, grey background, grey shirt, yellow font, male male focus,cape,parody,indoors,hands on hips,subtitled,door,1boy,mask,superhero,belt,solo,english text\n","/content/tmp\n","74_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text holding gun,tiles,handgun,weapon,1boy,holding,holding weapon,male focus,gun,subtitled,fake screenshot,english text,parody,solo\n","...\n","\n","...caption for 74_0.jpeg\n","\n","...\n","This photo, set in a gloomy, minimalist setting with a dark tiled floor, depicts a sinister situation. The figure holds a handgun with the silencer in place, pointing downward, giving a threatening and ominous impression. A shadow, suggesting another person standing, looms behind the figure's lower legs. A yellow, hand-drawn text at the bottom reads: \"Dude, they got nice tottles in here too...\" in bold, uppercase letters, with the 'D' in 'Dude' drawn to be the same width as the 'D' in 'totels', enhancing the comic book aesthetic of the image. This scene conveys tension and humor, with the dark background and yellow text contrast making the message pop. holding gun,tiles,handgun,weapon,1boy,holding,holding weapon,male focus,gun,subtitled,fake screenshot,english text,parody,solo\n","/content/tmp\n","67_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text black hair,1980s (style),2boys,jacket,subtitled,multiple boys,brown hair,male focus,parody,shirt,3boys,pants,scene reference,fake screenshot,sunglasses,english text,gun,retro artstyle\n"]}],"source":["\n","import os,random\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","my_mkdirs(f'{tgt_folder}')\n","split_folder = '/content/split/'\n","src_folder = '/content/'\n","if os.path.exists(f'{split_folder}'): src_folder = f'{split_folder}'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," textpath = filename.replace(suffix,'.txt')\n"," prompt_str = 'Describe the colors in the anime screencap and the subtitles text '\n"," if os.path.exists(f'{src_folder}{textpath}'):\n"," with open(f'{textpath}', 'r') as file:\n"," tags = file.read()\n"," #prompt_str = f'Please improve this prompt : {tags}'\n"," input_image = Image.open(f\"{filename}\").convert('RGB')\n"," caption = stream_chat(input_image, f'{prompt_str} {tags}')\n"," caption = caption + tags\n"," #caption = caption + \", and a logo of a black bar with rainbow glitch art outline that says 'FLUX Chroma V46' psychadelic art\"\n"," print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," print(caption)\n"," #---------#\n"," %cd {tgt_folder}\n"," f = open(f\"{num}.txt\", \"w\")\n"," f.write(f'{caption}')\n"," f.close()\n"," input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")\n"," os.remove(f'{src_folder}{textpath}')\n"," num = num+1"]},{"cell_type":"code","execution_count":6,"metadata":{"id":"5EztLCjkPq4U","colab":{"base_uri":"https://localhost:8080/","height":54},"executionInfo":{"status":"ok","timestamp":1753383024100,"user_tz":-120,"elapsed":131,"user":{"displayName":"","userId":""}},"outputId":"d0ac4f71-1b7d-46a6-b511-68287cb6c5dc"},"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n"]},{"output_type":"execute_result","data":{"text/plain":["'/content/tmp.zip'"],"application/vnd.google.colaboratory.intrinsic+json":{"type":"string"}},"metadata":{},"execution_count":6}],"source":["import shutil\n","%cd /content/\n","shutil.make_archive('/content/tmp', 'zip', '/content/tmp')"]},{"cell_type":"code","source":["# @markdown Save images of all urls found in image_urls.txt to workspace\n","\n","!wget -i image_urls.txt -P ./splits\n","\n"],"metadata":{"id":"v9UMCh3h_mNj"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"kM4TpfdB1amt"},"outputs":[],"source":["# @markdown Auto-disconnect from Google Colab upon running this cell\n","from google.colab import runtime\n","#runtime.unassign() #Disconnect from runtime"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753384460583},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753179095950},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753120703402},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752593897385},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752405756026},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1748859170548},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747227021653},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747225778912},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747224652750},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746209168116},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746181687155},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1742303655056},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740768524003},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740657473013},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739796923572},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739735627072}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
 
1
+ {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"Dwr7gk5OwuGC"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"1X-s_s971qB7"},"outputs":[],"source":["!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","# Use a custom prompt to instruct the image captioning model\n","custom_prompt = '' #{type:'string'}\n","enable_custom_prompt = False # {type:'boolean'}\n","if not enable_custom_prompt: custom_prompt = 'Describe the image in 400 words'\n","!pip install peft bitsandbytes\n","!pip install hf_xet\n","from huggingface_hub import InferenceClient\n","from torch import nn\n","from transformers import AutoModel, BitsAndBytesConfig, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n","import torch\n","import torch.amp.\n","from PIL import Image\n","import os\n","import torchvision.transforms.functional as TVF\n","\n","CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n","MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B-bnb-4bit\"\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [f\"{custom_prompt}\"],\n","}\n","\n","class ImageAdapter(nn.Module):\n","\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n","\t\tsuper().__init__()\n","\t\tself.deep_extract = deep_extract\n","\t\tif self.deep_extract:\n","\t\t\tinput_features = input_features * 5\n","\t\tself.linear1 = nn.Linear(input_features, output_features)\n","\t\tself.activation = nn.GELU()\n","\t\tself.linear2 = nn.Linear(output_features, output_features)\n","\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n","\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n","\t\tself.other_tokens = nn.Embedding(3, output_features)\n","\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n","\tdef forward(self, vision_outputs: torch.Tensor):\n","\t\tif self.deep_extract:\n","\t\t\tx = torch.concat((\n","\t\t\t\tvision_outputs[-2],\n","\t\t\t\tvision_outputs[3],\n","\t\t\t\tvision_outputs[7],\n","\t\t\t\tvision_outputs[13],\n","\t\t\t\tvision_outputs[20],\n","\t\t\t), dim=-1)\n","\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n","\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n","\t\telse:\n","\t\t\tx = vision_outputs[-2]\n","\t\tx = self.ln1(x)\n","\t\tif self.pos_emb is not None:\n","\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n","\t\t\tx = x + self.pos_emb\n","\t\tx = self.linear1(x)\n","\t\tx = self.activation(x)\n","\t\tx = self.linear2(x)\n","\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n","\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n","\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n","\t\treturn x\n","\tdef get_eot_embedding(self):\n","\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n","\n","clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n","clip_model = AutoModel.from_pretrained(CLIP_PATH)\n","clip_model = clip_model.vision_model\n","checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n","checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n","clip_model.load_state_dict(checkpoint)\n","# del checkpoint\n","clip_model.eval()\n","clip_model.requires_grad_(False)\n","clip_model.to(\"cuda\")\n","tokenizer = AutoTokenizer.from_pretrained(f'{MODEL_PATH}')\n","#tokenizer = AutoTokenizer.from_pretrained(\"unsloth/Meta-Llama-3.1-8B-bnb-4bit\")\n","assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n","text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, quantization_config = BitsAndBytesConfig(load_in_8bit=True), device_map=\"auto\", torch_dtype=torch.bfloat16)\n","text_model.load_adapter(\"/content/joy/text_model\")\n","text_model.eval()\n","image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n","image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n","image_adapter.eval()\n","image_adapter.to(\"cuda\")"]},{"cell_type":"code","execution_count":20,"metadata":{"id":"PjO3Wc4kzR08","executionInfo":{"status":"ok","timestamp":1753385832611,"user_tz":-120,"elapsed":7,"user":{"displayName":"","userId":""}}},"outputs":[],"source":["# @markdown higher temperature = prompt creativity (default 0.6) <br> higher top_p = higher noise reduction in latent embedding (default 0.9)\n","temperature = 1.35 # @param {type:'slider',min:0.5,max:4.0,step:0.05}\n","top_p = 0.75 # @param {type:'slider',min:0.1,max:0.95,step:0.05}\n","temperature = float(temperature)\n","top_p = float(top_p)\n","#-----#\n","num=1\n","#import torch\n","#import Image\n","\n","@torch.no_grad()\n","def stream_chat(input_image: Image.Image, prompt_str: str) -> str:\n"," torch.cuda.empty_cache()\n"," length =512\n"," #length = None if caption_length == \"any\" else caption_length\n"," #if isinstance(length, str):\n"," # try:\n"," # length = int(length)\n"," # except ValueError:\n"," # pass\n"," #if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n"," # caption_tone = \"formal\"\n"," #prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n"," #if prompt_key not in CAPTION_TYPE_MAP:\n"," # raise ValueError(f\"Invalid caption type: {prompt_key}\")\n"," #prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n"," print(f\"Prompt: {prompt_str}\")\n"," image = input_image.resize((384, 384), Image.LANCZOS)\n"," pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n"," pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n"," pixel_values = pixel_values.to('cuda')\n"," prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n"," with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n"," vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n"," image_features = vision_outputs.hidden_states\n"," embedded_images = image_adapter(image_features)\n"," embedded_images = embedded_images.to('cuda')\n"," prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n"," assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n"," embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n"," eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n"," inputs_embeds = torch.cat([\n"," embedded_bos.expand(embedded_images.shape[0], -1, -1),\n"," embedded_images.to(dtype=embedded_bos.dtype),\n"," prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n"," eot_embed.expand(embedded_images.shape[0], -1, -1),\n"," ], dim=1)\n"," input_ids = torch.cat([\n"," torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n"," torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n"," prompt,\n"," torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n"," ], dim=1).to('cuda')\n"," attention_mask = torch.ones_like(input_ids)\n"," generate_ids = text_model.generate(input_ids, top_p = top_p , temperature=temperature, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=3000, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n"," generate_ids = generate_ids[:, input_ids.shape[1]:]\n"," if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n"," generate_ids = generate_ids[:, :-1]\n"," caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n"," caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n"," return caption"]},{"cell_type":"code","source":["\n","%cd /content/\n","!unzip training_data.zip\n","\n","\n","\n","\n","\n","\n","\n"],"metadata":{"id":"c60a6jW-YwsN","executionInfo":{"status":"ok","timestamp":1753385771906,"user_tz":-120,"elapsed":113,"user":{"displayName":"","userId":""}},"outputId":"5b20e305-bf5d-49be-8d7b-66f0a747b092","colab":{"base_uri":"https://localhost:8080/"}},"execution_count":18,"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n","Archive: training_data.zip\n"," extracting: 000.txt \n"," extracting: 001.txt \n"," extracting: 002.txt \n"," extracting: 003.txt \n"," extracting: 004.txt \n"," extracting: 005.txt \n"," extracting: 006.txt \n"," extracting: 007.txt \n"," extracting: 008.txt \n"," extracting: 009.txt \n"," extracting: 010.txt \n"," extracting: 011.txt \n"," extracting: 012.txt \n"," extracting: 013.txt \n"," extracting: 014.txt \n"," extracting: 015.txt \n"," extracting: 016.txt \n"," extracting: 017.txt \n"," extracting: 018.txt \n"," extracting: 019.txt \n"," extracting: 020.txt \n"," extracting: 021.txt \n"," extracting: 022.txt \n"," extracting: 023.txt \n"," extracting: 024.txt \n"," extracting: 025.txt \n"," extracting: 026.txt \n"," extracting: 027.txt \n"," extracting: 028.txt \n"," extracting: 029.txt \n"," extracting: 030.txt \n"," extracting: 000.jpg \n"," extracting: 001.jpg \n"," extracting: 002.jpg \n"," extracting: 003.jpg \n"," extracting: 004.jpg \n"," extracting: 005.jpg \n"," extracting: 006.jpg \n"," extracting: 007.jpg \n"," extracting: 008.jpg \n"," extracting: 009.jpg \n"," extracting: 010.jpg \n"," extracting: 011.jpg \n"," extracting: 012.jpg \n"," extracting: 013.jpg \n"," extracting: 014.jpg \n"," extracting: 015.jpg \n"," extracting: 016.jpg \n"," extracting: 017.jpg \n"," extracting: 018.jpg \n"," extracting: 019.jpg \n"," extracting: 020.jpg \n"," extracting: 021.jpg \n"," extracting: 022.jpg \n"," extracting: 023.jpg \n"," extracting: 024.jpg \n"," extracting: 025.jpg \n"," extracting: 026.jpg \n"," extracting: 027.jpg \n"," extracting: 028.jpg \n"," extracting: 029.jpg \n"," extracting: 030.jpg \n"]}]},{"cell_type":"code","execution_count":21,"metadata":{"id":"mhccTDyzirVn","executionInfo":{"status":"ok","timestamp":1753385856689,"user_tz":-120,"elapsed":28,"user":{"displayName":"","userId":""}},"outputId":"b3b3bdb6-8d6d-4210-b70b-09c0554e4519","colab":{"base_uri":"https://localhost:8080/"}},"outputs":[{"output_type":"stream","name":"stdout","text":["Splitting all images found under /content/... \n"," into 1 along x-axis\n","/content\n"]}],"source":["# @markdown Split the image into 20 parts prior to running\n","no_parts = 1 # @param {type:'slider', min:1,max:30,step:1}\n","print(f'Splitting all images found under /content/... \\n into {no_parts} along x-axis')\n","import os,math,random\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","split_folder = f'/content/split/'\n","my_mkdirs(f'{split_folder}')\n","\n","\n","src_folder = '/content/'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","#num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," #while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," textpath = filename.replace(suffix,'.txt')\n"," #os.remove(f'{filename}')\n"," #continue\n"," image = Image.open(f\"{filename}\").convert('RGB')\n"," w,h=image.size\n"," #grid = product(range(0, h-h%d, d), range(0, w-w%d, d))\n"," divs=no_parts\n"," step=math.floor(w/divs)\n"," %cd {split_folder}\n"," for index in range(divs):\n"," box = (step*index, 0 ,step*(index+1),math.floor(1.0*h))\n"," image.crop(box).save(f'{num}_{index}.jpeg','JPEG')\n"," %cd /content/\n"," if os.path.exists(textpath):\n"," with open(f'{textpath}', 'r') as file:\n"," _tags = file.read()\n","\n"," print(_tags)\n"," if not _tags:continue\n"," tags=''\n"," _tags = [item.strip() for item in f'{_tags}'.split(',')]\n"," random.shuffle(_tags)\n"," for tag in _tags:\n"," tags = tags + tag + ' , '\n"," #----#\n"," tags = (tags + 'AAAA').replace(' , AAAA','')\n"," prompt_str = f' {tags}'\n"," %cd {split_folder}\n"," f = open(f'{num}_{index}.txt','w')\n"," f.write(f'{prompt_str}')\n"," f.close()\n"," #---#\n"," #-----#\n"," #----#\n"," num = num+1\n"," #caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n"," #print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," #print(caption)\n"," #---------#\n"," #f = open(f\"{num}.txt\", \"w\")\n"," #f.write(f'{caption}')\n"," #f.close()\n"," #input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")\n"," os.remove(f'{src_folder}{textpath}')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"J811UZU6xZEo","colab":{"base_uri":"https://localhost:8080/"},"outputId":"5f92aa26-3f5f-47b3-df45-4059922dbebb"},"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n","20_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text tiles , male focus , black hair , solo focus , long sleeves , hands on own head , parody , black pants , kneeling , multiple boys , vest , meme , subtitled , white shirt , black vest , english text , fake screenshot , tile floor , pants , shirt\n","...\n","\n","...caption for 20_0.jpeg\n","\n","...\n","A shot from an anime of two men, one in the background who is seated, the other man is kneeling with his back to the camera and head in his hands. The men are wearing white shirts and dark vests. They have dark, short hair. In the center of the image is \"You're so fucking nuts!\" written in English with an arrow pointing to the kneeling man. tiles , male focus , black hair , solo focus , long sleeves , hands on own head , parody , black pants , kneeling , multiple boys , vest , meme , subtitled , white shirt , black vest , english text , fake screenshot , tile floor , pants , shirt\n","/content/tmp\n","22_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text fur trim , indoors , black hair , 1boy , male focus , holding , earrings , solo , short hair , open book , dark , sitting , jewelry , book , candle , reading , letterboxed\n"]}],"source":["\n","import os,random\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","my_mkdirs(f'{tgt_folder}')\n","split_folder = '/content/split/'\n","src_folder = '/content/'\n","if os.path.exists(f'{split_folder}'): src_folder = f'{split_folder}'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," textpath = filename.replace(suffix,'.txt')\n"," prompt_str = 'Describe the colors in the anime screencap and the subtitles text '\n"," if os.path.exists(f'{src_folder}{textpath}'):\n"," with open(f'{textpath}', 'r') as file:\n"," tags = file.read()\n"," #prompt_str = f'Please improve this prompt : {tags}'\n"," input_image = Image.open(f\"{filename}\").convert('RGB')\n"," caption = stream_chat(input_image, f'{prompt_str} {tags}')\n"," caption = caption + tags\n"," #caption = caption + \", and a logo of a black bar with rainbow glitch art outline that says 'FLUX Chroma V46' psychadelic art\"\n"," print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," print(caption)\n"," #---------#\n"," %cd {tgt_folder}\n"," f = open(f\"{num}.txt\", \"w\")\n"," f.write(f'{caption}')\n"," f.close()\n"," input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")\n"," os.remove(f'{src_folder}{textpath}')\n"," num = num+1"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"5EztLCjkPq4U","colab":{"base_uri":"https://localhost:8080/","height":54},"executionInfo":{"status":"ok","timestamp":1753383024100,"user_tz":-120,"elapsed":131,"user":{"displayName":"","userId":""}},"outputId":"d0ac4f71-1b7d-46a6-b511-68287cb6c5dc"},"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n"]},{"output_type":"execute_result","data":{"text/plain":["'/content/tmp.zip'"],"application/vnd.google.colaboratory.intrinsic+json":{"type":"string"}},"metadata":{},"execution_count":6}],"source":["import shutil\n","%cd /content/\n","shutil.make_archive('/content/tmp', 'zip', '/content/tmp')"]},{"cell_type":"code","source":["# @markdown Save images of all urls found in image_urls.txt to workspace\n","\n","!wget -i image_urls.txt -P ./splits\n","\n"],"metadata":{"id":"v9UMCh3h_mNj"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"kM4TpfdB1amt"},"outputs":[],"source":["# @markdown Auto-disconnect from Google Colab upon running this cell\n","from google.colab import runtime\n","#runtime.unassign() #Disconnect from runtime"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753385887795},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753384460583},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753179095950},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753120703402},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752593897385},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752405756026},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1748859170548},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747227021653},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747225778912},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747224652750},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746209168116},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746181687155},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1742303655056},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740768524003},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740657473013},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739796923572},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739735627072}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}