codeShare commited on
Commit
2c23e8e
·
verified ·
1 Parent(s): 67d5ac5

Upload TA_image_to_dataset.ipynb

Browse files
Files changed (1) hide show
  1. TA_image_to_dataset.ipynb +1 -0
TA_image_to_dataset.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[],"authorship_tag":"ABX9TyMeaCDE0/A8gxHJC+1SUs4o"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"code","source":["#@markdown Build a dataset from ALL images in /content/ with EXIF metadata (using exiftool) as separate columns and WebM files, saving to Google Drive\n","\n","# Step 1: Install required libraries and exiftool\n","!pip install Pillow imageio[ffmpeg] datasets pandas\n","!apt-get update && apt-get install -y libimage-exiftool-perl\n","\n","# Step 2: Import required libraries\n","import os\n","import glob\n","import subprocess\n","from PIL import Image\n","import imageio.v3 as iio\n","import pandas as pd\n","from datasets import Dataset, Features, Image as HFImage, Value\n","from google.colab import drive\n","\n","# Step 3: Mount Google Drive\n","drive.mount('/content/drive')\n","output_dir = '/content/drive/My Drive/exif_dataset' #@param {type:'string'}\n","\n","# Step 4: Define function to extract metadata using exiftool\n","def get_exif_data(image_path):\n"," try:\n"," # Run exiftool to extract all metadata as JSON\n"," result = subprocess.run(\n"," ['exiftool', '-j', image_path],\n"," stdout=subprocess.PIPE,\n"," stderr=subprocess.PIPE,\n"," text=True,\n"," check=True\n"," )\n"," # Parse JSON output (exiftool -j returns a list of dictionaries)\n"," metadata = eval(result.stdout)[0] # First item in the list\n"," return metadata\n"," except subprocess.CalledProcessError as e:\n"," print(f\"exiftool error for {image_path}: {e.stderr}\")\n"," return {\"Error\": f\"exiftool failed: {str(e)}\"}\n"," except Exception as e:\n"," return {\"Error\": f\"Failed to read metadata: {str(e)}\"}\n","\n","# Step 5: Define function to convert image to WebM\n","def convert_to_webm(image_path, output_path):\n"," try:\n"," img = iio.imread(image_path)\n"," iio.imwrite(output_path, img, codec='vp8', fps=1, quality=8)\n"," return True\n"," except Exception as e:\n"," print(f\"Error converting {image_path} to WebM: {str(e)}\")\n"," return False\n","\n","# Step 6: Collect ALL images from /content/\n","image_dir = \"/content/\"\n","image_extensions = [\"*.jpg\", \"*.jpeg\", \"*.png\"]\n","image_paths = []\n","for ext in image_extensions:\n"," image_paths.extend(glob.glob(os.path.join(image_dir, ext)))\n","\n","if not image_paths:\n"," print(\"No images found in /content/\")\n","else:\n"," # Step 7: Process all images to collect metadata keys and data\n"," images = []\n"," webm_paths = []\n"," metadata_list = []\n"," all_metadata_keys = set()\n","\n"," for img_path in image_paths:\n"," print(f\"\\nProcessing {img_path}:\")\n","\n"," # Load image\n"," try:\n"," img = Image.open(img_path).convert('RGB')\n"," except Exception as e:\n"," print(f\"Error loading image {img_path}: {str(e)}\")\n"," continue\n","\n"," # Extract metadata with exiftool\n"," metadata = get_exif_data(img_path)\n"," print(\"Metadata (via exiftool):\")\n"," for key, value in metadata.items():\n"," print(f\" {key}: {value}\")\n"," all_metadata_keys.add(key) # Collect unique metadata keys\n","\n"," # Convert to WebM\n"," webm_path = os.path.splitext(img_path)[0] + \".webm\"\n"," if convert_to_webm(img_path, webm_path):\n"," print(f\" Saved WebM: {webm_path}\")\n"," images.append(img)\n"," webm_paths.append(webm_path)\n"," metadata_list.append(metadata)\n"," else:\n"," print(f\" Skipped WebM conversion for {img_path}\")\n"," continue\n","\n"," # Step 8: Check if any images were processed\n"," if not images:\n"," print(\"No images were successfully processed.\")\n"," else:\n"," # Step 9: Prepare dataset dictionary with separate columns for each metadata key\n"," data_dict = {'image': images, 'webm_path': webm_paths}\n","\n"," # Initialize columns for each metadata key with None\n"," for key in all_metadata_keys:\n"," data_dict[key] = [None] * len(images)\n","\n"," # Populate metadata values\n"," for i, metadata in enumerate(metadata_list):\n"," for key, value in metadata.items():\n"," data_dict[key][i] = str(value) # Convert values to strings\n","\n"," # Step 10: Define dataset features\n"," features = Features({\n"," 'image': HFImage(),\n"," 'webm_path': Value(\"string\"),\n"," **{key: Value(\"string\") for key in all_metadata_keys} # Dynamic columns for metadata keys\n"," })\n","\n"," # Step 11: Create Hugging Face Dataset\n"," dataset = Dataset.from_dict(data_dict, features=features)\n","\n"," # Step 12: Verify the dataset\n"," print(\"\\nDataset Summary:\")\n"," print(dataset)\n"," if len(dataset) > 0:\n"," print(\"\\nExample of accessing first item:\")\n"," print(\"WebM Path:\", dataset['webm_path'][0])\n"," print(\"Image type:\", type(dataset['image'][0]))\n"," print(\"Image size:\", dataset['image'][0].size)\n"," print(\"Metadata columns (first item):\")\n"," for key in all_metadata_keys:\n"," if dataset[key][0] is not None:\n"," print(f\" {key}: {dataset[key][0]}\")\n","\n"," # Step 13: Save dataset to Google Drive\n"," try:\n"," os.makedirs(output_dir, exist_ok=True)\n"," dataset.save_to_disk(output_dir)\n"," print(f\"\\nDataset saved to {output_dir}\")\n"," except Exception as e:\n"," print(f\"Error saving dataset to Google Drive: {str(e)}\")"],"metadata":{"id":"qVr4anf9KMh7"},"execution_count":null,"outputs":[]}]}