File size: 24,184 Bytes
cef1fa3 |
1 |
{"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"Dwr7gk5OwuGC"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"1X-s_s971qB7"},"outputs":[],"source":["!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","!apt -y install -qq aria2\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/text_model/adapter_config.json -d /content/joy/text_model -o adapter_config.json\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/text_model/adapter_model.safetensors -d /content/joy/text_model -o adapter_model.safetensors\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/clip_model.pt -d /content/joy -o clip_model.pt\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/raw/main/config.yaml -d /content/joy -o config.yaml\n","!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/joy-caption-alpha-one/resolve/main/image_adapter.pt -d /content/joy -o image_adapter.pt\n","\n","# Use a custom prompt to instruct the image captioning model\n","custom_prompt = '' #{type:'string'}\n","enable_custom_prompt = False # {type:'boolean'}\n","if not enable_custom_prompt: custom_prompt = 'Describe the image in 400 words'\n","!pip install peft bitsandbytes\n","!pip install hf_xet\n","from huggingface_hub import InferenceClient\n","from torch import nn\n","from transformers import AutoModel, BitsAndBytesConfig, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM\n","import torch\n","import torch.amp.\n","from PIL import Image\n","import os\n","import torchvision.transforms.functional as TVF\n","\n","CLIP_PATH = \"google/siglip-so400m-patch14-384\"\n","MODEL_PATH = \"unsloth/Meta-Llama-3.1-8B-bnb-4bit\"\n","CAPTION_TYPE_MAP = {\n"," (\"descriptive\", \"formal\", False, False): [f\"{custom_prompt}\"],\n","}\n","\n","class ImageAdapter(nn.Module):\n","\tdef __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):\n","\t\tsuper().__init__()\n","\t\tself.deep_extract = deep_extract\n","\t\tif self.deep_extract:\n","\t\t\tinput_features = input_features * 5\n","\t\tself.linear1 = nn.Linear(input_features, output_features)\n","\t\tself.activation = nn.GELU()\n","\t\tself.linear2 = nn.Linear(output_features, output_features)\n","\t\tself.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)\n","\t\tself.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))\n","\t\tself.other_tokens = nn.Embedding(3, output_features)\n","\t\tself.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3\n","\tdef forward(self, vision_outputs: torch.Tensor):\n","\t\tif self.deep_extract:\n","\t\t\tx = torch.concat((\n","\t\t\t\tvision_outputs[-2],\n","\t\t\t\tvision_outputs[3],\n","\t\t\t\tvision_outputs[7],\n","\t\t\t\tvision_outputs[13],\n","\t\t\t\tvision_outputs[20],\n","\t\t\t), dim=-1)\n","\t\t\tassert len(x.shape) == 3, f\"Expected 3, got {len(x.shape)}\" # batch, tokens, features\n","\t\t\tassert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f\"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}\"\n","\t\telse:\n","\t\t\tx = vision_outputs[-2]\n","\t\tx = self.ln1(x)\n","\t\tif self.pos_emb is not None:\n","\t\t\tassert x.shape[-2:] == self.pos_emb.shape, f\"Expected {self.pos_emb.shape}, got {x.shape[-2:]}\"\n","\t\t\tx = x + self.pos_emb\n","\t\tx = self.linear1(x)\n","\t\tx = self.activation(x)\n","\t\tx = self.linear2(x)\n","\t\tother_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))\n","\t\tassert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f\"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}\"\n","\t\tx = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)\n","\t\treturn x\n","\tdef get_eot_embedding(self):\n","\t\treturn self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)\n","\n","clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)\n","clip_model = AutoModel.from_pretrained(CLIP_PATH)\n","clip_model = clip_model.vision_model\n","checkpoint = torch.load(\"/content/joy/clip_model.pt\", map_location='cpu')\n","checkpoint = {k.replace(\"_orig_mod.module.\", \"\"): v for k, v in checkpoint.items()}\n","clip_model.load_state_dict(checkpoint)\n","# del checkpoint\n","clip_model.eval()\n","clip_model.requires_grad_(False)\n","clip_model.to(\"cuda\")\n","tokenizer = AutoTokenizer.from_pretrained(f'{MODEL_PATH}')\n","#tokenizer = AutoTokenizer.from_pretrained(\"unsloth/Meta-Llama-3.1-8B-bnb-4bit\")\n","assert isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast), f\"Tokenizer is of type {type(tokenizer)}\"\n","text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, quantization_config = BitsAndBytesConfig(load_in_8bit=True), device_map=\"auto\", torch_dtype=torch.bfloat16)\n","text_model.load_adapter(\"/content/joy/text_model\")\n","text_model.eval()\n","image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False)\n","image_adapter.load_state_dict(torch.load(\"/content/joy/image_adapter.pt\", map_location=\"cpu\"))\n","image_adapter.eval()\n","image_adapter.to(\"cuda\")"]},{"cell_type":"code","execution_count":20,"metadata":{"id":"PjO3Wc4kzR08","executionInfo":{"status":"ok","timestamp":1753385832611,"user_tz":-120,"elapsed":7,"user":{"displayName":"","userId":""}}},"outputs":[],"source":["# @markdown higher temperature = prompt creativity (default 0.6) <br> higher top_p = higher noise reduction in latent embedding (default 0.9)\n","temperature = 1.35 # @param {type:'slider',min:0.5,max:4.0,step:0.05}\n","top_p = 0.75 # @param {type:'slider',min:0.1,max:0.95,step:0.05}\n","temperature = float(temperature)\n","top_p = float(top_p)\n","#-----#\n","num=1\n","#import torch\n","#import Image\n","\n","@torch.no_grad()\n","def stream_chat(input_image: Image.Image, prompt_str: str) -> str:\n"," torch.cuda.empty_cache()\n"," length =512\n"," #length = None if caption_length == \"any\" else caption_length\n"," #if isinstance(length, str):\n"," # try:\n"," # length = int(length)\n"," # except ValueError:\n"," # pass\n"," #if caption_type == \"rng-tags\" or caption_type == \"training_prompt\":\n"," # caption_tone = \"formal\"\n"," #prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))\n"," #if prompt_key not in CAPTION_TYPE_MAP:\n"," # raise ValueError(f\"Invalid caption type: {prompt_key}\")\n"," #prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)\n"," print(f\"Prompt: {prompt_str}\")\n"," image = input_image.resize((384, 384), Image.LANCZOS)\n"," pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0\n"," pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])\n"," pixel_values = pixel_values.to('cuda')\n"," prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)\n"," with torch.amp.autocast_mode.autocast('cuda', enabled=True):\n"," vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)\n"," image_features = vision_outputs.hidden_states\n"," embedded_images = image_adapter(image_features)\n"," embedded_images = embedded_images.to('cuda')\n"," prompt_embeds = text_model.model.embed_tokens(prompt.to('cuda'))\n"," assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f\"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}\"\n"," embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))\n"," eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)\n"," inputs_embeds = torch.cat([\n"," embedded_bos.expand(embedded_images.shape[0], -1, -1),\n"," embedded_images.to(dtype=embedded_bos.dtype),\n"," prompt_embeds.expand(embedded_images.shape[0], -1, -1),\n"," eot_embed.expand(embedded_images.shape[0], -1, -1),\n"," ], dim=1)\n"," input_ids = torch.cat([\n"," torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),\n"," torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),\n"," prompt,\n"," torch.tensor([[tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")]], dtype=torch.long),\n"," ], dim=1).to('cuda')\n"," attention_mask = torch.ones_like(input_ids)\n"," generate_ids = text_model.generate(input_ids, top_p = top_p , temperature=temperature, inputs_embeds=inputs_embeds, attention_mask=attention_mask, max_new_tokens=3000, do_sample=True, suppress_tokens=None) # Uses the default which is temp=0.6, top_p=0.9\n"," generate_ids = generate_ids[:, input_ids.shape[1]:]\n"," if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids(\"<|eot_id|>\"):\n"," generate_ids = generate_ids[:, :-1]\n"," caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]\n"," caption = f'{caption.strip()}'.replace('Prompt: Describe the image in 400 words','')\n"," return caption"]},{"cell_type":"code","source":["\n","%cd /content/\n","!unzip training_data.zip\n","\n","\n","\n","\n","\n","\n","\n"],"metadata":{"id":"c60a6jW-YwsN","executionInfo":{"status":"ok","timestamp":1753385771906,"user_tz":-120,"elapsed":113,"user":{"displayName":"","userId":""}},"outputId":"5b20e305-bf5d-49be-8d7b-66f0a747b092","colab":{"base_uri":"https://localhost:8080/"}},"execution_count":18,"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n","Archive: training_data.zip\n"," extracting: 000.txt \n"," extracting: 001.txt \n"," extracting: 002.txt \n"," extracting: 003.txt \n"," extracting: 004.txt \n"," extracting: 005.txt \n"," extracting: 006.txt \n"," extracting: 007.txt \n"," extracting: 008.txt \n"," extracting: 009.txt \n"," extracting: 010.txt \n"," extracting: 011.txt \n"," extracting: 012.txt \n"," extracting: 013.txt \n"," extracting: 014.txt \n"," extracting: 015.txt \n"," extracting: 016.txt \n"," extracting: 017.txt \n"," extracting: 018.txt \n"," extracting: 019.txt \n"," extracting: 020.txt \n"," extracting: 021.txt \n"," extracting: 022.txt \n"," extracting: 023.txt \n"," extracting: 024.txt \n"," extracting: 025.txt \n"," extracting: 026.txt \n"," extracting: 027.txt \n"," extracting: 028.txt \n"," extracting: 029.txt \n"," extracting: 030.txt \n"," extracting: 000.jpg \n"," extracting: 001.jpg \n"," extracting: 002.jpg \n"," extracting: 003.jpg \n"," extracting: 004.jpg \n"," extracting: 005.jpg \n"," extracting: 006.jpg \n"," extracting: 007.jpg \n"," extracting: 008.jpg \n"," extracting: 009.jpg \n"," extracting: 010.jpg \n"," extracting: 011.jpg \n"," extracting: 012.jpg \n"," extracting: 013.jpg \n"," extracting: 014.jpg \n"," extracting: 015.jpg \n"," extracting: 016.jpg \n"," extracting: 017.jpg \n"," extracting: 018.jpg \n"," extracting: 019.jpg \n"," extracting: 020.jpg \n"," extracting: 021.jpg \n"," extracting: 022.jpg \n"," extracting: 023.jpg \n"," extracting: 024.jpg \n"," extracting: 025.jpg \n"," extracting: 026.jpg \n"," extracting: 027.jpg \n"," extracting: 028.jpg \n"," extracting: 029.jpg \n"," extracting: 030.jpg \n"]}]},{"cell_type":"code","execution_count":21,"metadata":{"id":"mhccTDyzirVn","executionInfo":{"status":"ok","timestamp":1753385856689,"user_tz":-120,"elapsed":28,"user":{"displayName":"","userId":""}},"outputId":"b3b3bdb6-8d6d-4210-b70b-09c0554e4519","colab":{"base_uri":"https://localhost:8080/"}},"outputs":[{"output_type":"stream","name":"stdout","text":["Splitting all images found under /content/... \n"," into 1 along x-axis\n","/content\n"]}],"source":["# @markdown Split the image into 20 parts prior to running\n","no_parts = 1 # @param {type:'slider', min:1,max:30,step:1}\n","print(f'Splitting all images found under /content/... \\n into {no_parts} along x-axis')\n","import os,math,random\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","split_folder = f'/content/split/'\n","my_mkdirs(f'{split_folder}')\n","\n","\n","src_folder = '/content/'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","#num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," #while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," textpath = filename.replace(suffix,'.txt')\n"," #os.remove(f'{filename}')\n"," #continue\n"," image = Image.open(f\"{filename}\").convert('RGB')\n"," w,h=image.size\n"," #grid = product(range(0, h-h%d, d), range(0, w-w%d, d))\n"," divs=no_parts\n"," step=math.floor(w/divs)\n"," %cd {split_folder}\n"," for index in range(divs):\n"," box = (step*index, 0 ,step*(index+1),math.floor(1.0*h))\n"," image.crop(box).save(f'{num}_{index}.jpeg','JPEG')\n"," %cd /content/\n"," if os.path.exists(textpath):\n"," with open(f'{textpath}', 'r') as file:\n"," _tags = file.read()\n","\n"," print(_tags)\n"," if not _tags:continue\n"," tags=''\n"," _tags = [item.strip() for item in f'{_tags}'.split(',')]\n"," random.shuffle(_tags)\n"," for tag in _tags:\n"," tags = tags + tag + ' , '\n"," #----#\n"," tags = (tags + 'AAAA').replace(' , AAAA','')\n"," prompt_str = f' {tags}'\n"," %cd {split_folder}\n"," f = open(f'{num}_{index}.txt','w')\n"," f.write(f'{prompt_str}')\n"," f.close()\n"," #---#\n"," #-----#\n"," #----#\n"," num = num+1\n"," #caption = stream_chat(input_image, \"descriptive\", \"formal\", \"any\")\n"," #print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," #print(caption)\n"," #---------#\n"," #f = open(f\"{num}.txt\", \"w\")\n"," #f.write(f'{caption}')\n"," #f.close()\n"," #input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")\n"," os.remove(f'{src_folder}{textpath}')"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"J811UZU6xZEo","colab":{"base_uri":"https://localhost:8080/"},"outputId":"5f92aa26-3f5f-47b3-df45-4059922dbebb"},"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n","20_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text tiles , male focus , black hair , solo focus , long sleeves , hands on own head , parody , black pants , kneeling , multiple boys , vest , meme , subtitled , white shirt , black vest , english text , fake screenshot , tile floor , pants , shirt\n","...\n","\n","...caption for 20_0.jpeg\n","\n","...\n","A shot from an anime of two men, one in the background who is seated, the other man is kneeling with his back to the camera and head in his hands. The men are wearing white shirts and dark vests. They have dark, short hair. In the center of the image is \"You're so fucking nuts!\" written in English with an arrow pointing to the kneeling man. tiles , male focus , black hair , solo focus , long sleeves , hands on own head , parody , black pants , kneeling , multiple boys , vest , meme , subtitled , white shirt , black vest , english text , fake screenshot , tile floor , pants , shirt\n","/content/tmp\n","22_0.jpeg\n","/content/split\n","Prompt: Describe the colors in the anime screencap and the subtitles text fur trim , indoors , black hair , 1boy , male focus , holding , earrings , solo , short hair , open book , dark , sitting , jewelry , book , candle , reading , letterboxed\n"]}],"source":["\n","import os,random\n","from PIL import Image\n","home_directory = '/content/'\n","using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n","if using_Kaggle : home_directory = '/kaggle/working/'\n","%cd {home_directory}\n","\n","def my_mkdirs(folder):\n"," if os.path.exists(folder)==False:\n"," os.makedirs(folder)\n","\n","\n","tgt_folder = f'/content/tmp/'\n","my_mkdirs(f'{tgt_folder}')\n","split_folder = '/content/split/'\n","src_folder = '/content/'\n","if os.path.exists(f'{split_folder}'): src_folder = f'{split_folder}'\n","suffixes = ['.gif','.png', '.jpeg' , '.webp' , '.jpg']\n","num = 1\n","for filename in os.listdir(src_folder):\n"," for suffix in suffixes:\n"," if not filename.find(suffix)>-1: continue\n"," while os.path.exists(f'{tgt_folder}{num}.txt'):num = num+1\n"," print(filename)\n"," %cd {src_folder}\n"," textpath = filename.replace(suffix,'.txt')\n"," prompt_str = 'Describe the colors in the anime screencap and the subtitles text '\n"," if os.path.exists(f'{src_folder}{textpath}'):\n"," with open(f'{textpath}', 'r') as file:\n"," tags = file.read()\n"," #prompt_str = f'Please improve this prompt : {tags}'\n"," input_image = Image.open(f\"{filename}\").convert('RGB')\n"," caption = stream_chat(input_image, f'{prompt_str} {tags}')\n"," caption = caption + tags\n"," #caption = caption + \", and a logo of a black bar with rainbow glitch art outline that says 'FLUX Chroma V46' psychadelic art\"\n"," print(f\"...\\n\\n...caption for {filename}\\n\\n...\")\n"," print(caption)\n"," #---------#\n"," %cd {tgt_folder}\n"," f = open(f\"{num}.txt\", \"w\")\n"," f.write(f'{caption}')\n"," f.close()\n"," input_image.save(f'{num}.jpeg', \"JPEG\")\n"," os.remove(f\"{src_folder}{filename}\")\n"," os.remove(f'{src_folder}{textpath}')\n"," num = num+1"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"5EztLCjkPq4U","colab":{"base_uri":"https://localhost:8080/","height":54},"executionInfo":{"status":"ok","timestamp":1753383024100,"user_tz":-120,"elapsed":131,"user":{"displayName":"","userId":""}},"outputId":"d0ac4f71-1b7d-46a6-b511-68287cb6c5dc"},"outputs":[{"output_type":"stream","name":"stdout","text":["/content\n"]},{"output_type":"execute_result","data":{"text/plain":["'/content/tmp.zip'"],"application/vnd.google.colaboratory.intrinsic+json":{"type":"string"}},"metadata":{},"execution_count":6}],"source":["import shutil\n","%cd /content/\n","shutil.make_archive('/content/tmp', 'zip', '/content/tmp')"]},{"cell_type":"code","source":["# @markdown Save images of all urls found in image_urls.txt to workspace\n","\n","!wget -i image_urls.txt -P ./splits\n","\n"],"metadata":{"id":"v9UMCh3h_mNj"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"kM4TpfdB1amt"},"outputs":[],"source":["# @markdown Auto-disconnect from Google Colab upon running this cell\n","from google.colab import runtime\n","#runtime.unassign() #Disconnect from runtime"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753385887795},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753384460583},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753179095950},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1753120703402},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752593897385},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1752405756026},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1748859170548},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747227021653},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747225778912},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1747224652750},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746209168116},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1746181687155},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1742303655056},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740768524003},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1740657473013},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739796923572},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Joycaption_Alpha_One.ipynb","timestamp":1739735627072}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0} |