File size: 13,735 Bytes
67b6080
1
{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/JupyterNotebooks/blob/main/TA_image_to_dataset.ipynb","timestamp":1755089885354}],"authorship_tag":"ABX9TyPQiAaqDC2y+D1y9PUmRtm8"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"id":"lxcthG7VekSc"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# Step 1: Install required libraries and exiftool\n","!pip install Pillow imageio[ffmpeg] datasets pandas\n","!apt-get update && apt-get install -y libimage-exiftool-perl\n","\n","# Step 2: Import required libraries\n","import os\n","import glob\n","import subprocess\n","from PIL import Image\n","import imageio.v3 as iio\n","import pandas as pd\n","from datasets import Dataset, Features, Image as HFImage, Value\n","from google.colab import drive"],"metadata":{"id":"9XdSKlrXfXw7"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# Step 3: Mount Google Drive\n","drive.mount('/content/drive')\n","output_dir = '/content/drive/My Drive/exif_dataset' #@param {type:'string'}\n","\n","# Step 4: Define function to extract metadata using exiftool\n","def get_exif_data(image_path):\n","    try:\n","        # Run exiftool to extract all metadata as JSON\n","        result = subprocess.run(\n","            ['exiftool', '-j', image_path],\n","            stdout=subprocess.PIPE,\n","            stderr=subprocess.PIPE,\n","            text=True,\n","            check=True\n","        )\n","        # Parse JSON output (exiftool -j returns a list of dictionaries)\n","        metadata = eval(result.stdout)[0]  # First item in the list\n","        return metadata\n","    except subprocess.CalledProcessError as e:\n","        print(f\"exiftool error for {image_path}: {e.stderr}\")\n","        return {\"Error\": f\"exiftool failed: {str(e)}\"}\n","    except Exception as e:\n","        return {\"Error\": f\"Failed to read metadata: {str(e)}\"}\n","\n","# Step 5: Define function to convert image to WebM\n","def convert_to_webm(image_path, output_path):\n","    try:\n","        img = iio.imread(image_path)\n","        iio.imwrite(output_path, img, codec='vp8', fps=1, quality=8)\n","        return True\n","    except Exception as e:\n","        print(f\"Error converting {image_path} to WebM: {str(e)}\")\n","        return False\n","\n","# Step 6: Collect ALL images from /content/\n","image_dir = \"/content/\"\n","image_extensions = [\"*.jpg\", \"*.jpeg\", \"*.png\"]\n","image_paths = []\n","for ext in image_extensions:\n","    image_paths.extend(glob.glob(os.path.join(image_dir, ext)))\n","\n","if not image_paths:\n","    print(\"No images found in /content/\")\n","else:\n","    # Step 7: Process all images to collect metadata keys and data\n","    images = []\n","    webm_paths = []\n","    metadata_list = []\n","    all_metadata_keys = set()\n","\n","    for img_path in image_paths:\n","        print(f\"\\nProcessing {img_path}:\")\n","\n","        # Load image\n","        try:\n","            img = Image.open(img_path).convert('RGB')\n","        except Exception as e:\n","            print(f\"Error loading image {img_path}: {str(e)}\")\n","            continue\n","\n","        # Extract metadata with exiftool\n","        metadata = get_exif_data(img_path)\n","        print(\"Metadata (via exiftool):\")\n","        for key, value in metadata.items():\n","            print(f\"  {key}: {value}\")\n","            all_metadata_keys.add(key)  # Collect unique metadata keys\n","\n","        # Convert to WebM\n","        webm_path = os.path.splitext(img_path)[0] + \".webm\"\n","        if convert_to_webm(img_path, webm_path):\n","            print(f\"  Saved WebM: {webm_path}\")\n","            images.append(img)\n","            webm_paths.append(webm_path)\n","            metadata_list.append(metadata)\n","        else:\n","            print(f\"  Skipped WebM conversion for {img_path}\")\n","            continue\n","\n","    # Step 8: Check if any images were processed\n","    if not images:\n","        print(\"No images were successfully processed.\")\n","    else:\n","        # Step 9: Prepare dataset dictionary with separate columns for each metadata key\n","        data_dict = {'image': images, 'webm_path': webm_paths}\n","\n","        # Initialize columns for each metadata key with None\n","        for key in all_metadata_keys:\n","            data_dict[key] = [None] * len(images)\n","\n","        # Populate metadata values\n","        for i, metadata in enumerate(metadata_list):\n","            for key, value in metadata.items():\n","                data_dict[key][i] = str(value)  # Convert values to strings\n","\n","        # Step 10: Define dataset features\n","        features = Features({\n","            'image': HFImage(),\n","            'webm_path': Value(\"string\"),\n","            **{key: Value(\"string\") for key in all_metadata_keys}  # Dynamic columns for metadata keys\n","        })\n","\n","        # Step 11: Create Hugging Face Dataset\n","        dataset = Dataset.from_dict(data_dict, features=features)\n","\n","        # Step 12: Verify the dataset\n","        print(\"\\nDataset Summary:\")\n","        print(dataset)\n","        if len(dataset) > 0:\n","            print(\"\\nExample of accessing first item:\")\n","            print(\"WebM Path:\", dataset['webm_path'][0])\n","            print(\"Image type:\", type(dataset['image'][0]))\n","            print(\"Image size:\", dataset['image'][0].size)\n","            print(\"Metadata columns (first item):\")\n","            for key in all_metadata_keys:\n","                if dataset[key][0] is not None:\n","                    print(f\"  {key}: {dataset[key][0]}\")\n","\n","        # Step 13: Save dataset to Google Drive\n","        try:\n","            os.makedirs(output_dir, exist_ok=True)\n","            dataset.save_to_disk(output_dir)\n","            print(f\"\\nDataset saved to {output_dir}\")\n","        except Exception as e:\n","            print(f\"Error saving dataset to Google Drive: {str(e)}\")"],"metadata":{"id":"FAbdolPVf5my"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#@markdown Create a new dataset with 'image' and 'text' from the original dataset\n","\n","\n","# Step 14: Import required libraries\n","from datasets import Dataset, load_from_disk, Image as HFImage, Value\n","import json\n","from PIL import Image\n","import io\n","\n","# Step 15: Define the path to the original dataset on Google Drive\n","dataset_path = '/content/drive/MyDrive/exif_dataset'  #@param {type:'string'}\n","\n","# Step 16: Load the original dataset\n","try:\n","    dataset = load_from_disk(dataset_path)\n","    print(\"Original dataset loaded successfully!\")\n","except Exception as e:\n","    print(f\"Error loading dataset: {e}\")\n","    raise\n","\n","# Step 17: Function to extract 'text' from the 'Prompt' dictionary\n","def extract_text_from_prompt(prompt):\n","    try:\n","        # Parse the prompt (assuming it's a string representation of a dictionary)\n","        # Safely handle cases where prompt might not be a string or valid JSON\n","        if isinstance(prompt, str):\n","            try:\n","                prompt_dict = json.loads(prompt)\n","            except json.JSONDecodeError:\n","                # Handle cases where the string is not valid JSON\n","                print(f\"Warning: Could not parse JSON from prompt string: {prompt[:100]}...\") # Print a snippet\n","                return \"\"\n","        elif isinstance(prompt, dict):\n","            prompt_dict = prompt\n","        else:\n","            # Handle cases where prompt is not a string or dict\n","            print(f\"Warning: Unexpected prompt type: {type(prompt)}\")\n","            return \"\"\n","\n","        # Look for the 'CLIPTextEncode' node with the main text description\n","        for node_key, node_data in prompt_dict.items():\n","            if isinstance(node_data, dict) and node_data.get('class_type') == 'CLIPTextEncode' and 'inputs' in node_data and 'text' in node_data['inputs']:\n","                return str(node_data['inputs']['text']) # Ensure text is a string\n","        return \"\"  # Return empty string if no valid text is found\n","    except Exception as e:\n","        print(f\"Error processing prompt: {e}\")\n","        return \"\"\n","\n","# Step 18: Create lists for the new dataset\n","new_data = {\n","    'image': [],\n","    'text': []\n","}\n","\n","# Step 19: Process each item in the dataset\n","print(f\"Processing {len(dataset)} items...\")\n","for i in range(len(dataset)):\n","    try:\n","        # Get the image and Prompt field\n","        image = dataset['image'][i]\n","        prompt = dataset['Prompt'][i]\n","\n","        # Extract the text from Prompt\n","        text = extract_text_from_prompt(prompt)\n","\n","        # Check if text is empty or contains no letters (a-z)\n","        if not text.strip():  # Skip if text is empty or only whitespace\n","            print(f\"Skipping item at index {i}: Empty text\")\n","            continue\n","        if not any(c.isalpha() for c in text.lower()):  # Skip if no letters a-z\n","            print(f\"Skipping item at index {i}: No letters in text: {text[:50]}...\")\n","            continue\n","\n","        # Convert PIL Image to bytes\n","        img_byte_arr = io.BytesIO()\n","        image.save(img_byte_arr, format='PNG')  # Use a common format like PNG\n","        img_bytes = img_byte_arr.getvalue()\n","\n","        new_data['image'].append(img_bytes)\n","        new_data['text'].append(text)\n","\n","    except Exception as e:\n","        print(f\"Error processing item at index {i}: {e}\")\n","        continue  # Skip this item and continue with the next\n","\n","# Step 20: Define dataset features with Image type\n","features = Features({\n","    'image': HFImage(),\n","    'text': Value(\"string\")\n","})\n","\n","# Step 21: Create a new Hugging Face dataset from the byte data\n","new_dataset = Dataset.from_dict(new_data, features=features)\n","\n","# Step 22: Define the path to save the new dataset\n","new_dataset_path = '/content/drive/MyDrive/to_add_dataset'  #@param {type:'string'}\n","\n","# Step 23: Save the new dataset\n","try:\n","    # Ensure the directory exists\n","    import os\n","    os.makedirs(new_dataset_path, exist_ok=True)\n","    new_dataset.save_to_disk(new_dataset_path)\n","    print(f\"New dataset saved successfully to {new_dataset_path}!\")\n","except Exception as e:\n","    print(f\"Error saving new dataset: {e}\")\n","    raise\n","\n","# Step 24: Verify the new dataset\n","print(\"\\nNew dataset info:\")\n","print(new_dataset)\n","\n","# Step 25: Example of accessing an item in the new dataset\n","index_to_test = 0  #@param {type:'slider', max:200}\n","index=index_to_test\n","if index < len(new_dataset):\n","    print(\"\\nExample of accessing item at index\", index)\n","    print(\"Text:\", new_dataset['text'][index])\n","    # When accessing, the Image feature automatically loads the image bytes back into a PIL Image\n","    print(\"Image type:\", type(new_dataset['image'][index]))\n","    print(\"Image size:\", new_dataset['image'][index].size)\n","\n","    # Optional: Display the image\n","    display(new_dataset['image'][index])\n","else:\n","    print(f\"\\nIndex {index} is out of bounds for the new dataset (size {len(new_dataset)}).\")"],"metadata":{"id":"dEKJP11Z8gI5"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#@markdown Merge the two datasets into one\n","\n","# Step 1: Import required libraries\n","from datasets import load_from_disk, concatenate_datasets\n","from google.colab import drive\n","\n","# Step 2: Mount Google Drive (only needed in Google Colab)\n","drive.mount('/content/drive')\n","\n","# Step 3: Define paths for the datasets\n","dataset1_path = '/content/drive/MyDrive/my_datasetv2' #@param {type:'string'}\n","dataset2_path = '/content/drive/MyDrive/to_add_dataset' #@param {type:'string'}\n","merged_dataset_path = '/content/drive/MyDrive/my_datasetv3'  #@param {type:'string'}\n","\n","# Step 4: Load the datasets\n","try:\n","    dataset1 = load_from_disk(dataset1_path)\n","    dataset2 = load_from_disk(dataset2_path)\n","    print(\"Datasets loaded successfully!\")\n","except Exception as e:\n","    print(f\"Error loading datasets: {e}\")\n","    raise\n","\n","# Step 5: Verify the datasets\n","print(\"Dataset 1:\", dataset1)\n","print(\"Dataset 2:\", dataset2)\n","\n","# Step 6: Merge the datasets\n","try:\n","    dataset = concatenate_datasets([dataset1, dataset2])\n","    print(\"Datasets merged successfully!\")\n","except Exception as e:\n","    print(f\"Error merging datasets: {e}\")\n","    raise\n","\n","# Step 7: Verify the merged dataset\n","print(\"Merged Dataset:\", dataset)\n","dataset1=''\n","dataset2=''\n","# Step 8: Save the merged dataset to Google Drive\n","try:\n","    dataset.save_to_disk(merged_dataset_path)\n","    print(f\"Merged dataset saved successfully to {merged_dataset_path}\")\n","except Exception as e:\n","    print(f\"Error saving merged dataset: {e}\")\n","    raise\n","\n","# Step 9: Optional - Verify the saved dataset by loading it back\n","try:\n","    dataset = load_from_disk(merged_dataset_path)\n","    print(\"Saved merged dataset loaded successfully for verification:\")\n","    print(dataset)\n","except Exception as e:\n","    print(f\"Error loading saved merged dataset: {e}\")\n","    raise"],"metadata":{"id":"HF_cmJu1EMJV"},"execution_count":null,"outputs":[]}]}