File size: 5,377 Bytes
707cd50 |
1 |
{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/T5_encoding_test.ipynb","timestamp":1753784751931},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/T5_encoding_test.ipynb","timestamp":1753506570273}],"authorship_tag":"ABX9TyPrayIBQD6IWp9FEuabFuiO"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"code","source":["!pip install transformers"],"metadata":{"id":"Q2jmuaxxF4ev"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown Use the T5 encoder only\n","# Step 2: Import necessary libraries\n","from transformers import T5Tokenizer, T5Model\n","import torch\n","\n","# Step 3: Load the T5 tokenizer and model\n","# You can use 't5-small', 't5-base', 't5-large', etc. 't5-small' is lighter for Colab\n","tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n","model = T5Model.from_pretrained(\"t5-base\")\n","\n","# Step 4: Define the input string\n","input_string = \"Studies have shown that owning a dog is good for you\" # @param {type:'string'}\n","\n","# Step 5: Tokenize the input string to get token IDs\n","input_ids = tokenizer(input_string, return_tensors=\"pt\").input_ids\n","print(\"Token IDs:\", input_ids)\n","\n","# Step 6: (Optional) Get hidden state embeddings\n","# Ensure the model is in evaluation mode\n","model.eval()\n","\n","# Forward pass to get encoder outputs\n","with torch.no_grad():\n"," outputs = model.encoder(input_ids=input_ids)\n"," encoder_hidden_states = outputs.last_hidden_state\n","\n","# Print the shape of the hidden states\n","print(\"Encoder Hidden States Shape:\", encoder_hidden_states.shape)\n","# Example: Shape will be [batch_size, sequence_length, hidden_size], e.g., [1, num_tokens, 768] for t5-base\n","\n","# Step 7: (Optional) Decode token IDs back to text for verification\n","decoded_text = tokenizer.decode(input_ids[0], skip_special_tokens=True)\n","print(\"Decoded Text:\", decoded_text)"],"metadata":{"id":"jT1UmiK8_jHs"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown 🇫🇷 Translate using the T5 model <br>\n","# @markdown Note: NOT a FLUX feature since FLUX only uses the T5 encoder!\n","\n","# Step 2: Import necessary libraries\n","from transformers import T5Tokenizer, T5ForConditionalGeneration\n","\n","# Step 3: Load the T5 tokenizer and model\n","# Use 't5-base' for balance; 't5-small' for speed, or 't5-large' for better performance\n","tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n","model = T5ForConditionalGeneration.from_pretrained(\"t5-base\")\n","\n","# Step 4: Define the input string with the instruction\n","input_string = \"translate to French: The sun is shining today.\" # @param {type:'string'}\n","\n","# Step 5: Tokenize the input string\n","input_ids = tokenizer(input_string, return_tensors=\"pt\").input_ids\n","\n","# Step 6: Generate the output\n","model.eval()\n","with torch.no_grad():\n"," outputs = model.generate(input_ids, max_length=50)\n"," translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)\n","\n","# Step 7: Print the result\n","print(\"Translated Text:\", translated_text)\n","\n"],"metadata":{"id":"lovIkU-uDLPn"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown ⚖️ Compare Similiarity\n","\n","# Step 1: Install required libraries\n","!pip install transformers torch\n","\n","# Step 2: Import necessary libraries\n","from transformers import T5Tokenizer, T5Model\n","import torch\n","import torch.nn.functional as F\n","\n","# Step 3: Load T5 tokenizer and model\n","tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n","model = T5Model.from_pretrained(\"t5-base\")\n","\n","# Step 4: Define input strings\n","text1 = \"a photo The sun is shining today\" # @param {type:'string'}\n","text2 = \"anime screencap The sun is shining today \" # @param {type:'string'}\n","\n","# Step 5: Tokenize the input strings\n","inputs1 = tokenizer(text1, return_tensors=\"pt\", padding=True, truncation=True)\n","inputs2 = tokenizer(text2, return_tensors=\"pt\", padding=True, truncation=True)\n","\n","# Step 6: Get T5 encoder hidden states\n","model.eval()\n","with torch.no_grad():\n"," # Get encoder outputs for both inputs\n"," outputs1 = model.encoder(input_ids=inputs1.input_ids)\n"," outputs2 = model.encoder(input_ids=inputs2.input_ids)\n","\n"," # Extract last hidden states [batch_size, sequence_length, hidden_size]\n"," hidden_states1 = outputs1.last_hidden_state\n"," hidden_states2 = outputs2.last_hidden_state\n","\n","# Step 7: Aggregate hidden states (mean pooling)\n","# Average across the sequence dimension to get a single vector per input\n","embedding1 = hidden_states1.mean(dim=1) # Shape: [1, hidden_size]\n","embedding2 = hidden_states2.mean(dim=1) # Shape: [1, hidden_size]\n","\n","# Step 8: Compute cosine similarity\n","cosine_sim = F.cosine_similarity(embedding1, embedding2, dim=1)\n","print(\"Cosine Similarity:\", cosine_sim.item())\n","\n","# Step 9: (Optional) Print token IDs for reference\n","print(\"Token IDs for text1:\", inputs1.input_ids)\n","print(\"Token IDs for text2:\", inputs2.input_ids)"],"metadata":{"id":"XPymy3EwByMQ"},"execution_count":null,"outputs":[]}]} |