freddyaboulton HF Staff commited on
Commit
cf21e4d
·
verified ·
1 Parent(s): 7492b6e

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. requirements.txt +3 -2
  3. run.ipynb +1 -1
README.md CHANGED
@@ -5,7 +5,7 @@ emoji: 🔥
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
- sdk_version: 4.44.1
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
 
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
+ sdk_version: 5.0.0
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
requirements.txt CHANGED
@@ -1,2 +1,3 @@
1
- gradio-client @ git+https://github.com/gradio-app/gradio@a15381b23d3f6b59180e83a94a5279feccbf79a2#subdirectory=client/python
2
- https://gradio-pypi-previews.s3.amazonaws.com/a15381b23d3f6b59180e83a94a5279feccbf79a2/gradio-4.44.1-py3-none-any.whl
 
 
1
+ gradio-client @ git+https://github.com/gradio-app/gradio@bbf9ba7e997022960c621f72baa891185bd03732#subdirectory=client/python
2
+ https://gradio-pypi-previews.s3.amazonaws.com/bbf9ba7e997022960c621f72baa891185bd03732/gradio-5.0.0-py3-none-any.whl
3
+ numpy
run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_selections"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "with gr.Blocks() as demo:\n", " tolerance = gr.Slider(label=\"Tolerance\", info=\"How different colors can be in a segment.\", minimum=0, maximum=256*3, value=50)\n", " with gr.Row():\n", " input_img = gr.Image(label=\"Input\")\n", " output_img = gr.Image(label=\"Selected Segment\")\n", "\n", " def get_select_coords(img, tolerance, evt: gr.SelectData):\n", " visited_pixels = set()\n", " pixels_in_queue = set()\n", " pixels_in_segment = set()\n", " start_pixel = img[evt.index[1], evt.index[0]]\n", " pixels_in_queue.add((evt.index[1], evt.index[0]))\n", " while len(pixels_in_queue) > 0:\n", " pixel = pixels_in_queue.pop()\n", " visited_pixels.add(pixel)\n", " neighbors = []\n", " if pixel[0] > 0:\n", " neighbors.append((pixel[0] - 1, pixel[1]))\n", " if pixel[0] < img.shape[0] - 1:\n", " neighbors.append((pixel[0] + 1, pixel[1]))\n", " if pixel[1] > 0:\n", " neighbors.append((pixel[0], pixel[1] - 1))\n", " if pixel[1] < img.shape[1] - 1:\n", " neighbors.append((pixel[0], pixel[1] + 1))\n", " for neighbor in neighbors:\n", " if neighbor in visited_pixels:\n", " continue\n", " neighbor_pixel = img[neighbor[0], neighbor[1]]\n", " if np.abs(neighbor_pixel - start_pixel).sum() < tolerance:\n", " pixels_in_queue.add(neighbor)\n", " pixels_in_segment.add(neighbor)\n", "\n", " out = img.copy() * 0.2\n", " out = out.astype(np.uint8)\n", " for pixel in pixels_in_segment:\n", " out[pixel[0], pixel[1]] = img[pixel[0], pixel[1]]\n", " return out\n", "\n", " input_img.select(get_select_coords, [input_img, tolerance], output_img)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_selections"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "with gr.Blocks() as demo:\n", " tolerance = gr.Slider(label=\"Tolerance\", info=\"How different colors can be in a segment.\", minimum=0, maximum=256*3, value=50)\n", " with gr.Row():\n", " input_img = gr.Image(label=\"Input\")\n", " output_img = gr.Image(label=\"Selected Segment\")\n", "\n", " def get_select_coords(img, tolerance, evt: gr.SelectData):\n", " visited_pixels = set()\n", " pixels_in_queue = set()\n", " pixels_in_segment = set()\n", " start_pixel = img[evt.index[1], evt.index[0]]\n", " pixels_in_queue.add((evt.index[1], evt.index[0]))\n", " while len(pixels_in_queue) > 0:\n", " pixel = pixels_in_queue.pop()\n", " visited_pixels.add(pixel)\n", " neighbors = []\n", " if pixel[0] > 0:\n", " neighbors.append((pixel[0] - 1, pixel[1]))\n", " if pixel[0] < img.shape[0] - 1:\n", " neighbors.append((pixel[0] + 1, pixel[1]))\n", " if pixel[1] > 0:\n", " neighbors.append((pixel[0], pixel[1] - 1))\n", " if pixel[1] < img.shape[1] - 1:\n", " neighbors.append((pixel[0], pixel[1] + 1))\n", " for neighbor in neighbors:\n", " if neighbor in visited_pixels:\n", " continue\n", " neighbor_pixel = img[neighbor[0], neighbor[1]]\n", " if np.abs(neighbor_pixel - start_pixel).sum() < tolerance:\n", " pixels_in_queue.add(neighbor)\n", " pixels_in_segment.add(neighbor)\n", "\n", " out = img.copy() * 0.2\n", " out = out.astype(np.uint8)\n", " for pixel in pixels_in_segment:\n", " out[pixel[0], pixel[1]] = img[pixel[0], pixel[1]]\n", " return out\n", "\n", " input_img.select(get_select_coords, [input_img, tolerance], output_img)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}