codeShare commited on
Commit
707cd50
·
verified ·
1 Parent(s): 2c23e8e

Upload 5 files

Browse files
Joycaption_Alpha_One.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"Dwr7gk5OwuGC"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"1X-s_s971qB7"},"outputs":[],"source":["!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","# Use a custom prompt to instruct the image captioning model\n","custom_prompt = '' #{type:'string'}\n","enable_custom_prompt = False # {type:'boolean'}\n","if not enable_custom_prompt: custom_prompt = 'Describe the image in 400 words'\n","!pip install peft bitsandbytes\n","!pip install hf_xet\n","from huggingface_hub import InferenceClient\n","from torch import nn\n","from transformers import AutoModel, BitsAndBytesConfig, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n","import torch\n","import torch.amp\n","from PIL import Image\n","import os\n","import torchvision.transforms.functional as TVF\n","\n","CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n","MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B-bnb-4bit\"\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [f\"{custom_prompt}\"],\n","}\n","\n","class ImageAdapter(nn.Module):\n","\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n","\t\tsuper().__init__()\n","\t\tself.deep_extract = deep_extract\n","\t\tif self.deep_extract:\n","\t\t\tinput_features = input_features * 5\n","\t\tself.linear1 = nn.Linear(input_features, output_features)\n","\t\tself.activation = nn.GELU()\n","\t\tself.linear2 = nn.Linear(output_features, output_features)\n","\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n","\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n","\t\tself.other_tokens = nn.Embedding(3, output_features)\n","\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n","\tdef forward(self, vision_outputs: torch.Tensor):\n","\t\tif self.deep_extract:\n","\t\t\tx = torch.concat((\n","\t\t\t\tvision_outputs[-2],\n","\t\t\t\tvision_outputs[3],\n","\t\t\t\tvision_outputs[7],\n","\t\t\t\tvision_outputs[13],\n","\t\t\t\tvision_outputs[20],\n","\t\t\t), dim=-1)\n","\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n","\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n","\t\telse:\n","\t\t\tx = vision_outputs[-2]\n","\t\tx = self.ln1(x)\n","\t\tif self.pos_emb is not None:\n","\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n","\t\t\tx = x + self.pos_emb\n","\t\tx = self.linear1(x)\n","\t\tx = self.activation(x)\n","\t\tx = self.linear2(x)\n","\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n","\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n","\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n","\t\treturn x\n","\tdef get_eot_embedding(self):\n","\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n","\n","clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n","clip_model = AutoModel.from_pretrained(CLIP_PATH)\n","clip_model = clip_model.vision_model\n","checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n","checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n","clip_model.load_state_dict(checkpoint)\n","# del checkpoint\n","clip_model.eval()\n","clip_model.requires_grad_(False)\n","clip_model.to(\"cuda\")\n","tokenizer = AutoTokenizer.from_pretrained(f'{MODEL_PATH}')\n","#tokenizer = AutoTokenizer.from_pretrained(\"unsloth/Meta-Llama-3.1-8B-bnb-4bit\")\n","assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n","text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, quantization_config = BitsAndBytesConfig(load_in_8bit=True), device_map=\"auto\", torch_dtype=torch.bfloat16)\n","text_model.load_adapter(\"/content/joy/text_model\")\n","text_model.eval()\n","image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n","image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n","image_adapter.eval()\n","image_adapter.to(\"cuda\")"]},{"cell_type":"code","execution_count":2,"metadata":{"id":"PjO3Wc4kzR08","executionInfo":{"status":"ok","timestamp":1754463725243,"user_tz":-120,"elapsed":1,"user":{"displayName":"","userId":""}}},"outputs":[],"source":["# @markdown higher temperature = prompt creativity (default 0.6) <br> higher top_p = higher noise reduction in latent embedding (default 0.9)\n","temperature = 1.2 # @param {type:'slider',min:0.5,max:4.0,step:0.05}\n","top_p = 0.85 # @param {type:'slider',min:0.1,max:0.95,step:0.05}\n","temperature = float(temperature)\n","top_p = float(top_p)\n","#-----#\n","num=1\n","#import torch\n","#import Image\n","\n","@torch.no_grad()\n","def stream_chat(input_image: Image.Image, prompt_str: str) -> str:\n"," torch.cuda.empty_cache()\n"," length =512\n"," #length = None if caption_length == \"any\" else caption_length\n"," #if isinstance(length, str):\n"," # try:\n"," # length = int(length)\n"," # except ValueError:\n"," # pass\n"," #if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n"," # caption_tone = \"formal\"\n"," #prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n"," #if prompt_key not in CAPTION_TYPE_MAP:\n"," # raise ValueError(f\"Invalid caption type: {prompt_key}\")\n"," #prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n"," print(f\"Prompt: {prompt_str}\")\n"," image = input_image.resize((384, 384), Image.LANCZOS)\n"," pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n"," pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n"," pixel_values = pixel_values.to('cuda')\n"," prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n"," with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n"," vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n"," image_features = vision_outputs.hidden_states\n"," embedded_images = image_adapter(image_features)\n"," embedded_images = embedded_images.to('cuda')\n"," prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n"," assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n"," embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n"," eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n"," inputs_embeds = torch.cat([\n"," embedded_bos.expand(embedded_images.shape[0], -1, -1),\n"," embedded_images.to(dtype=embedded_bos.dtype),\n"," prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n"," eot_embed.expand(embedded_images.shape[0], -1, -1),\n"," ], dim=1)\n"," input_ids = torch.cat([\n"," torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n"," torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n"," prompt,\n"," torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n"," ], dim=1).to('cuda')\n"," attention_mask = torch.ones_like(input_ids)\n"," generate_ids = text_model.generate(input_ids, top_p = top_p , temperature=temperature, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=3000, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n"," generate_ids = generate_ids[:, input_ids.shape[1]:]\n"," if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n"," generate_ids = generate_ids[:, :-1]\n"," caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n"," caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n"," return caption"]},{"cell_type":"code","source":["\n","%cd /content/\n","!unzip training_data.zip\n","\n","\n","\n","\n","\n","\n","\n"],"metadata":{"id":"c60a6jW-YwsN"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"mhccTDyzirVn"},"outputs":[],"source":["# @markdown Split the image into 20 parts prior to running\n","no_parts = 20 # @param {type:'slider', min:1,max:30,step:1}\n","print(f'Splitting all images found under /content/... \\n into {no_parts} along x-axis')\n","import os,math,random\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","split_folder = f'/content/split/'\n","my_mkdirs(f'{split_folder}')\n","\n","\n","src_folder = '/content/'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","#num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," #while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," textpath = filename.replace(suffix,'.txt')\n"," #os.remove(f'{filename}')\n"," #continue\n"," image = Image.open(f\"{filename}\").convert('RGB')\n"," w,h=image.size\n"," #grid = product(range(0, h-h%d, d), range(0, w-w%d, d))\n"," divs=no_parts\n"," step=math.floor(w/divs)\n"," %cd {split_folder}\n"," for index in range(divs):\n"," box = (step*index, 0 ,step*(index+1),math.floor(1.0*h))\n"," image.crop(box).save(f'{num}_{index}.jpeg','JPEG')\n"," %cd /content/\n"," if os.path.exists(textpath):\n"," with open(f'{textpath}', 'r') as file:\n"," _tags = file.read()\n","\n"," print(_tags)\n"," if not _tags:continue\n"," tags=''\n"," _tags = [item.strip() for item in f'{_tags}'.split(',')]\n"," random.shuffle(_tags)\n"," for tag in _tags:\n"," tags = tags + tag + ' , '\n"," #----#\n"," tags = (tags + 'AAAA').replace(' , AAAA','')\n"," prompt_str = f' {tags}'\n"," %cd {split_folder}\n"," f = open(f'{num}_{index}.txt','w')\n"," f.write(f'{prompt_str}')\n"," f.close()\n"," #---#\n"," #-----#\n"," #----#\n"," num = num+1\n"," #caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n"," #print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," #print(caption)\n"," #---------#\n"," #f = open(f\"{num}.txt\", \"w\")\n"," #f.write(f'{caption}')\n"," #f.close()\n"," #input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")\n"," if os.path.exists(f'{src_folder}{textpath}'):os.remove(f'{src_folder}{textpath}')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"J811UZU6xZEo"},"outputs":[],"source":["\n","import os,random\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","my_mkdirs(f'{tgt_folder}')\n","split_folder = '/content/split/'\n","src_folder = '/content/'\n","if os.path.exists(f'{split_folder}'): src_folder = f'{split_folder}'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," textpath = filename.replace(suffix,'.txt')\n"," prompt_str = '< Summary of image > . < What is the subject? > . < What is the photo? > . < What are the aesthetics? > ' #@param {type:'string'}\n"," tags =''\n"," if os.path.exists(f'{src_folder}{textpath}'):\n"," with open(f'{textpath}', 'r') as file:\n"," tags = file.read()\n"," #prompt_str = f'Please improve this prompt : {tags}'\n"," input_image = Image.open(f\"{filename}\").convert('RGB')\n"," caption = stream_chat(input_image, f'{prompt_str} {tags}')\n"," caption = caption + tags\n"," #caption = caption + \", and a logo of a black bar with rainbow glitch art outline that says 'FLUX Chroma V46' psychadelic art\"\n"," print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," print(caption)\n"," #---------#\n"," %cd {tgt_folder}\n"," f = open(f\"{num}.txt\", \"w\")\n"," f.write(f'{caption}')\n"," f.close()\n"," input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")\n"," if os.path.exists(f'{src_folder}{textpath}'):os.remove(f'{src_folder}{textpath}')\n"," num = num+1"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"5EztLCjkPq4U","colab":{"base_uri":"https://localhost:8080/","height":54},"executionInfo":{"status":"ok","timestamp":1753386278449,"user_tz":-120,"elapsed":258,"user":{"displayName":"","userId":""}},"outputId":"abeea3bf-833a-47ae-f7b8-1f093a214e50"},"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n"]},{"output_type":"execute_result","data":{"text/plain":["'/content/tmp.zip'"],"application/vnd.google.colaboratory.intrinsic+json":{"type":"string"}},"metadata":{},"execution_count":23}],"source":["import shutil\n","%cd /content/\n","shutil.make_archive('/content/tmp', 'zip', '/content/tmp')"]},{"cell_type":"code","source":["# @markdown Save images of all urls found in image_urls.txt to workspace\n","\n","!wget -i image_urls.txt -P ./splits\n","\n"],"metadata":{"id":"v9UMCh3h_mNj"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"kM4TpfdB1amt"},"outputs":[],"source":["# @markdown Auto-disconnect from Google Colab upon running this cell\n","from google.colab import runtime\n","#runtime.unassign() #Disconnect from runtime"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1754464304573},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1754297479642},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753386997714},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753384460583},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753179095950},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753120703402},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752593897385},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752405756026},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1748859170548},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747227021653},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747225778912},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747224652750},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746209168116},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746181687155},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1742303655056},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740768524003},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740657473013},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739796923572},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739735627072}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
T5_encoding_test.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/T5_encoding_test.ipynb","timestamp":1753784751931},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/T5_encoding_test.ipynb","timestamp":1753506570273}],"authorship_tag":"ABX9TyPrayIBQD6IWp9FEuabFuiO"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"code","source":["!pip install transformers"],"metadata":{"id":"Q2jmuaxxF4ev"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown Use the T5 encoder only\n","# Step 2: Import necessary libraries\n","from transformers import T5Tokenizer, T5Model\n","import torch\n","\n","# Step 3: Load the T5 tokenizer and model\n","# You can use 't5-small', 't5-base', 't5-large', etc. 't5-small' is lighter for Colab\n","tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n","model = T5Model.from_pretrained(\"t5-base\")\n","\n","# Step 4: Define the input string\n","input_string = \"Studies have shown that owning a dog is good for you\" # @param {type:'string'}\n","\n","# Step 5: Tokenize the input string to get token IDs\n","input_ids = tokenizer(input_string, return_tensors=\"pt\").input_ids\n","print(\"Token IDs:\", input_ids)\n","\n","# Step 6: (Optional) Get hidden state embeddings\n","# Ensure the model is in evaluation mode\n","model.eval()\n","\n","# Forward pass to get encoder outputs\n","with torch.no_grad():\n"," outputs = model.encoder(input_ids=input_ids)\n"," encoder_hidden_states = outputs.last_hidden_state\n","\n","# Print the shape of the hidden states\n","print(\"Encoder Hidden States Shape:\", encoder_hidden_states.shape)\n","# Example: Shape will be [batch_size, sequence_length, hidden_size], e.g., [1, num_tokens, 768] for t5-base\n","\n","# Step 7: (Optional) Decode token IDs back to text for verification\n","decoded_text = tokenizer.decode(input_ids[0], skip_special_tokens=True)\n","print(\"Decoded Text:\", decoded_text)"],"metadata":{"id":"jT1UmiK8_jHs"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown 🇫🇷 Translate using the T5 model <br>\n","# @markdown Note: NOT a FLUX feature since FLUX only uses the T5 encoder!\n","\n","# Step 2: Import necessary libraries\n","from transformers import T5Tokenizer, T5ForConditionalGeneration\n","\n","# Step 3: Load the T5 tokenizer and model\n","# Use 't5-base' for balance; 't5-small' for speed, or 't5-large' for better performance\n","tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n","model = T5ForConditionalGeneration.from_pretrained(\"t5-base\")\n","\n","# Step 4: Define the input string with the instruction\n","input_string = \"translate to French: The sun is shining today.\" # @param {type:'string'}\n","\n","# Step 5: Tokenize the input string\n","input_ids = tokenizer(input_string, return_tensors=\"pt\").input_ids\n","\n","# Step 6: Generate the output\n","model.eval()\n","with torch.no_grad():\n"," outputs = model.generate(input_ids, max_length=50)\n"," translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)\n","\n","# Step 7: Print the result\n","print(\"Translated Text:\", translated_text)\n","\n"],"metadata":{"id":"lovIkU-uDLPn"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown ⚖️ Compare Similiarity\n","\n","# Step 1: Install required libraries\n","!pip install transformers torch\n","\n","# Step 2: Import necessary libraries\n","from transformers import T5Tokenizer, T5Model\n","import torch\n","import torch.nn.functional as F\n","\n","# Step 3: Load T5 tokenizer and model\n","tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n","model = T5Model.from_pretrained(\"t5-base\")\n","\n","# Step 4: Define input strings\n","text1 = \"a photo The sun is shining today\" # @param {type:'string'}\n","text2 = \"anime screencap The sun is shining today \" # @param {type:'string'}\n","\n","# Step 5: Tokenize the input strings\n","inputs1 = tokenizer(text1, return_tensors=\"pt\", padding=True, truncation=True)\n","inputs2 = tokenizer(text2, return_tensors=\"pt\", padding=True, truncation=True)\n","\n","# Step 6: Get T5 encoder hidden states\n","model.eval()\n","with torch.no_grad():\n"," # Get encoder outputs for both inputs\n"," outputs1 = model.encoder(input_ids=inputs1.input_ids)\n"," outputs2 = model.encoder(input_ids=inputs2.input_ids)\n","\n"," # Extract last hidden states [batch_size, sequence_length, hidden_size]\n"," hidden_states1 = outputs1.last_hidden_state\n"," hidden_states2 = outputs2.last_hidden_state\n","\n","# Step 7: Aggregate hidden states (mean pooling)\n","# Average across the sequence dimension to get a single vector per input\n","embedding1 = hidden_states1.mean(dim=1) # Shape: [1, hidden_size]\n","embedding2 = hidden_states2.mean(dim=1) # Shape: [1, hidden_size]\n","\n","# Step 8: Compute cosine similarity\n","cosine_sim = F.cosine_similarity(embedding1, embedding2, dim=1)\n","print(\"Cosine Similarity:\", cosine_sim.item())\n","\n","# Step 9: (Optional) Print token IDs for reference\n","print(\"Token IDs for text1:\", inputs1.input_ids)\n","print(\"Token IDs for text2:\", inputs2.input_ids)"],"metadata":{"id":"XPymy3EwByMQ"},"execution_count":null,"outputs":[]}]}
Video%20to%20Keyframe%20Extractor.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"Dwr7gk5OwuGC"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"wTbO9mWbDXNr"},"outputs":[],"source":["\n","# @markdown 🌊 Rapid keyframe processing\n","# @markdown <br> ------ <br> Extract Keyframes from ALL mp4 / webm videos found on Google Drive\n","# @markdown <br> Be mindful of Google Drive Terms of Service\n","# @markdown <br> This cell will process all mp4 videos found under\n","# @markdown <br> /content/drive/MyDrive/Saved from Chrome/\n","delete_mp4_when_done = True # @param {type:'boolean'}\n","# @markdown <br> deleted mp4/webm files will be found under 'trash' in your Google drive\n","# @markdown <br> -------\n","# @markdown <br> (Optional) Add a direct video link to below field.\n","# @markdown <br> Multiple links can be written in this field\n","# @markdown <br> separated by comma. Like this: <br> ' https:\\\\\\my_video.mp4 , https:\\\\\\second_video.webm , .... '\n","import os\n","import shutil\n","!pip install video-kf\n","!pip install ffmpeg-python\n","!pip install wget\n","!pip install moviepy\n","import wget\n","import videokf as vf\n","import time\n","proc_keyframes=True # @param {type:'boolean'}\n","proc_audio=False # @param {type:'boolean'}\n","#def mkdirs(folder):\n","# if not os.path.exists(folder):os.makedirs(folder)\n","#----#\n","direct_link = '' # @param {type:'string'}\n","# @markdown The linked videos will be downloaded to the Google drive prior to running the script.\n","# @markdown <br> This feature is useful for direct processing .webm from 4chan threads into keyframes\n","use_link = False # @param {type:'boolean'}\n","if direct_link.find('http')>-1: use_link = True\n","if use_link:\n"," %cd '/content/drive/MyDrive/Saved from Chrome/'\n"," for link in direct_link.split(','):\n"," if not link.find('http')>-1:continue\n"," wget.download(link.strip())\n"," time.sleep(5)\n"," %cd '/content/'\n","#-----#\n","filenames = []\n","srcpath = '/content/drive/MyDrive/Saved from Chrome/'\n","destpath = '/content/drive/MyDrive/'\n","localpath = '/content/'\n","converted = ''\n","for filename in os.listdir(f'{srcpath}'):\n"," if filename.find('.zip')>-1:\n"," %cd {srcpath}\n"," !unzip {filename}\n"," os.remove(filename)\n"," filename = filename.replace('.zip','')\n"," for suffix in ['.mp4','.webm']:\n"," if filename.find(f'{suffix}')>-1: filenames.append(filename)\n","#Rename the downloaded video to 'tgt0' before running this cell\n","def my_mkdirs(folder):\n"," if os.path.exists(folder):shutil.rmtree(folder)\n"," os.makedirs(folder)\n","#----#\n","# @markdown Write a funny name for the folder(s) containing the keyframes\n","name_keyframes_as='' # @param {type:'string'}\n","# @markdown Created .zip files will not be overwritten\n","#NUM_ITEMS = 1 # @param {type:'slider', min:1 , max:20,step:1}\n","if name_keyframes_as.strip()=='': name_keyframes_as='keyframes'\n","num = 0\n","savepath = ''\n","%cd {localpath}\n","for filename in filenames:\n"," tgt_folder = f'/content/tmp'\n"," my_mkdirs(f'{tgt_folder}')\n"," print(f'Now processing video {filename}...')\n"," if proc_keyframes:\n"," vf.extract_keyframes(f'{srcpath}{filename}',output_dir_keyframes=f'{tgt_folder}')\n"," savepath = f'{destpath}{name_keyframes_as}_v{num}_kf'\n"," #---#\n"," while os.path.exists(f'{savepath}.zip'):\n"," #print(f'{savepath}.zip already exists...')\n"," num = num+1\n"," savepath = f'{destpath}{name_keyframes_as}_v{num}_kf'\n"," #---#\n"," shutil.make_archive(savepath,'zip' , f'{tgt_folder}')\n"," #from moviepy.editor import VideoFileClip\n"," if proc_audio:\n"," from moviepy.editor import VideoFileClip\n"," # Load the WebM file\n"," video = VideoFileClip(f\"{srcpath}{filename}\")\n","\n"," # Extract audio and save as MP3 (or WAV, etc.)\n"," audio = video.audio\n"," savepath = f\"{destpath}_audio_v{num}.mp3\"\n","\n"," while os.path.exists(savepath):\n"," num = num+1\n"," savepath= f\"{destpath}_audio_v{num}.mp3\"\n"," #----#\n"," if audio:\n"," audio.write_audiofile(f'{savepath}')\n"," # Close the files to free resources\n"," audio.close()\n"," video.close()\n"," #----#\n"," if delete_mp4_when_done: os.remove(f'{srcpath}{filename}')\n"," num = num+1\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"Srb6DaP8-3d-","outputId":"5056b7ff-0020-4da7-b069-aa728da9a310","colab":{"base_uri":"https://localhost:8080/"}},"outputs":[{"output_type":"stream","name":"stderr","text":["t: 4%|▍ | 239/5695 [02:04<48:48, 1.86it/s, now=None]"]}],"source":["# @markdown 📮 Convert .mkv video to mp4 w/o audio at 0.5 fps <br>\n","# @markdown This script will process all .mkv files <br> found under .../Saved from Chrome/ in the drive folder\n","import moviepy.editor as moviepy\n","import os\n","import shutil\n","# @markdown higher fps to extract => longer processing time\n","# @markdown <br> Processing time at 1 fps is roughly 1:1 of the actual length of the video\n","# @markdown <br> For example; a 60 min video will take 60 min to convert to mp4 at 1 fps\n","frames_per_second = 1 # @param {type:'slider',min:0.1 , max:4 , step:0.1}\n","#src = '/content/drive/MyDrive/Saved from Chrome/ob2.mkv' # @param {type:'string'}\n","#----#\n","drivepath = '/content/drive/MyDrive/Saved from Chrome/'\n","for src in os.listdir(f'{drivepath}'):\n"," if not src.find('.mkv')>-1: continue\n"," dest = src.replace('.mkv','.mp4')\n"," if os.path.exists(f'{dest}'):continue\n"," clip = moviepy.VideoFileClip(f\"{drivepath}{src}\")\n"," clip.write_videofile(f\"{drivepath}{dest}\", audio=False, fps=frames_per_second)\n","#----#\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"DlwPZ1pMyp9t"},"outputs":[],"source":["\n","# @markdown Extract Keyframes from video uploaded in '/content/'\n","import os\n","import shutil\n","!pip install video-kf\n","#Rename the downloaded video to 'tgt' before running this cell\n","def my_mkdirs(folder):\n"," if os.path.exists(folder):shutil.rmtree(folder)\n"," os.makedirs(folder)\n","#----#\n","num=0\n","tgt_folder = f'/content/tmp/'\n","my_mkdirs(f'{tgt_folder}')\n","!video-kf '/content/tgt' -dir '/content/' -o '/content/tmp/'\n","shutil.make_archive('/content/images'+f'{num}' ,'zip' , f'{tgt_folder}')\n","num = num+1\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"hNbVdLKcGluC"},"outputs":[],"source":["# @markdown Extract Keyframes from video stored on Google Drive\n","# @markdown <br> Be mindful of Google Drive Terms of Service\n","import os\n","import shutil\n","!pip install video-kf\n","#Rename the downloaded video to 'tgt' before running this cell\n","def my_mkdirs(folder):\n"," if os.path.exists(folder):shutil.rmtree(folder)\n"," os.makedirs(folder)\n","#----#\n","num=0\n","tgt_folder = f'/content/tmp/'\n","my_mkdirs(f'{tgt_folder}')\n","!video-kf '/content/drive/MyDrive/Saved from Chrome/tgt' -dir '/content/' -o '/content/tmp/'\n","shutil.make_archive('/content/drive/MyDrive/images'+f'{num}' ,'zip' , f'{tgt_folder}')\n","num = num+1\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"bk4rg4zhCqZv"},"outputs":[],"source":["# @markdown .zip and upload keyframes on Google Drive\n","# @markdown <br> Be mindful of Google Drive Terms of Service\n","shutil.make_archive('/content/drive/MyDrive/images'+f'{num}' ,'zip' , f'{tgt_folder}')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"J811UZU6xZEo"},"outputs":[],"source":["\n","# @markdown Convert all uploaded images to '/content/'\n","# @markdown <br> to the more compact .webp format\n","# @markdown and place them in the '/content/tmp' folder\n","import os\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","#----#\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","#----#\n","tgt_folder = f'/content/tmp/'\n","src_folder = '/content/' # @param\n","my_mkdirs(f'{tgt_folder}')\n","#---#\n","suffixes = ['.jpg','.gif', '.jpeg','.WEBP','.PNG','.png', '.JPEG' , '.webp' , '.jpg']\n","num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," print(filename)\n"," %cd {src_folder}\n"," input_image = Image.open(f\"{filename}\").convert('RGB')\n"," %cd {tgt_folder}\n"," input_image.save(f'{num}.webp', \"WEBP\")\n"," num = num+1"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"5EztLCjkPq4U"},"outputs":[],"source":["# @markdown Make an archive by .zip the files in 'content/tmp'\n","#import shutil\n","#%cd /content/\n","#shutil.make_archive('/content/images' ,'zip', '/content/tmp')\n","#----#\n","shutil.rmtree('/content/drive/MyDrive/Saved from Chrome/tgt') #delete video\n","from google.colab import runtime\n","runtime.unassign() #Disconnect from runtime\n"]}],"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Video to Keyframe Extractor.ipynb","timestamp":1752976495385},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Video to Keyframe Extractor.ipynb","timestamp":1752782602276},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Video to Keyframe Extractor.ipynb","timestamp":1748371395582},{"file_id":"1G-vpB2y3uKbM4BuNg5N_1kQnb6mrxq-A","timestamp":1747499769982},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1741088234107},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740768524003},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740657473013},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739796923572},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739735627072}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
YT-playlist-to-mp3.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1747490904984},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1740037333374},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1736477078136},{"file_id":"https://huggingface.co/codeShare/JupyterNotebooks/blob/main/YT-playlist-to-mp3.ipynb","timestamp":1725365086834}]},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"markdown","source":["This Notebook will take a Youtube Playlist and convert all videos to MP3:s , which will be stored on a folder on your Google Drive."],"metadata":{"id":"I64oSgGJxki5"}},{"cell_type":"code","execution_count":1,"metadata":{"id":"KXsmL_npl5Zf","executionInfo":{"status":"ok","timestamp":1747489455031,"user_tz":-120,"elapsed":9,"user":{"displayName":"","userId":""}}},"outputs":[],"source":["#Initialize\n","import os\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","my_mkdirs('/content/tmp/')"]},{"cell_type":"code","source":["#NOTE: you need to download cookies from youtube to your drive folder because of recent Youtubr BS restrictions\n","#Reinstall youtube_dl because the version on Colab is outdated\n","!python3 -m pip install --force-reinstall https://github.com/yt-dlp/yt-dlp/archive/master.tar.gz\n","import yt_dlp as youtube_dl\n"],"metadata":{"id":"CT8O2CJYl-Cb"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#Mount Google Drive\n","from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"id":"vxae5FCml-0A"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown download video or playlist as audio file mp3\n","youtube_link = '' # @param {type:'string'}\n","playlist_start = 1\n","## @param {type:'number'}\n","playlist_end = 9999\n","## @param {type:'number'}\n","\n","#Extract all videos in YT playlist mp3 files\n","#Aborting this code is fine if list is latge ( You will keep downloaded mp3:s)\n","%cd /content/tmp\n","for playlist_URL in youtube_link.split(','):\n"," !yt-dlp --cookies /content/drive/MyDrive/ytcookies.txt --playlist-end {playlist_end} --playlist-start {playlist_start} --extract-audio --audio-format mp3 -o \"%(title)s.%(ext)s\" {playlist_URL}"],"metadata":{"id":"0K9n3HM6l-7x"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["\n","# @markdown Download video or playlist as highest video quality mp4\n","youtube_link = '' # @param {type:'string'}\n","playlist_start = 1\n","## @param {type:'number'}\n","playlist_end = 9999\n","## @param {type:'number'}\n","\n","#Extract all videos in YT playlist mp3 files\n","#Aborting this code is fine if list is latge ( You will keep downloaded mp3:s)\n","%cd /content/drive/MyDrive/Saved from Chrome/\n","for playlist_URL in youtube_link.split(','):\n"," !yt-dlp --cookies /content/drive/MyDrive/ytcookies.txt --playlist-end {playlist_end} --playlist-start {playlist_start} --merge-output-format mp4 -f \"bestvideo+bestaudio[ext=m4a]/best\" -o \"%(title)s.%(ext)s\" {playlist_URL}"],"metadata":{"id":"EWZ4sO7NfdA6"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["\n","drive_folder_name = 'my_zip_folder' # @param {type:'string'}\n","\n","%cd /content/\n","!zip -r /content/drive/MyDrive/{drive_folder_name}.zip /content/tmp"],"metadata":{"id":"D04FssOTma-2"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["from google.colab import runtime\n","#runtime.unassign()\n","\n","\n"],"metadata":{"id":"1JlaBNIKODCT"},"execution_count":null,"outputs":[]}]}