aliabd HF Staff commited on
Commit
1ab074d
·
verified ·
1 Parent(s): 3ffd4b6

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. requirements.txt +2 -2
  2. run.ipynb +1 -1
  3. run.py +0 -2
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
- gradio-client @ git+https://github.com/gradio-app/gradio@de997e67c9a7feb9e2eccebf92969366dbd67eba#subdirectory=client/python
2
- https://gradio-builds.s3.amazonaws.com/de997e67c9a7feb9e2eccebf92969366dbd67eba/gradio-4.39.0-py3-none-any.whl
3
  pillow
4
  torch
5
  torchvision
 
1
+ gradio-client @ git+https://github.com/gradio-app/gradio@9b42ba8f1006c05d60a62450d3036ce0d6784f86#subdirectory=client/python
2
+ https://gradio-builds.s3.amazonaws.com/9b42ba8f1006c05d60a62450d3036ce0d6784f86/gradio-4.39.0-py3-none-any.whl
3
  pillow
4
  torch
5
  torchvision
run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_classifier_2"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio pillow torch torchvision"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/imagenet_labels.json https://github.com/gradio-app/gradio/raw/main/demo/image_classifier_2/files/imagenet_labels.json"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import requests\n", "import torch\n", "from PIL import Image\n", "from torchvision import transforms\n", "\n", "import gradio as gr\n", "\n", "model = torch.hub.load(\"pytorch/vision:v0.6.0\", \"resnet18\", pretrained=True).eval()\n", "\n", "# Download human-readable labels for ImageNet.\n", "response = requests.get(\"https://git.io/JJkYN\")\n", "labels = response.text.split(\"\\n\")\n", "\n", "\n", "def predict(inp):\n", " inp = Image.fromarray(inp.astype(\"uint8\"), \"RGB\")\n", " inp = transforms.ToTensor()(inp).unsqueeze(0)\n", " with torch.no_grad():\n", " prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n", " return {labels[i]: float(prediction[i]) for i in range(1000)}\n", "\n", "\n", "inputs = gr.Image()\n", "outputs = gr.Label(num_top_classes=3)\n", "\n", "demo = gr.Interface(fn=predict, inputs=inputs, outputs=outputs)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_classifier_2"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio pillow torch torchvision"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/imagenet_labels.json https://github.com/gradio-app/gradio/raw/main/demo/image_classifier_2/files/imagenet_labels.json"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import requests\n", "import torch\n", "from PIL import Image\n", "from torchvision import transforms\n", "\n", "import gradio as gr\n", "\n", "model = torch.hub.load(\"pytorch/vision:v0.6.0\", \"resnet18\", pretrained=True).eval()\n", "\n", "# Download human-readable labels for ImageNet.\n", "response = requests.get(\"https://git.io/JJkYN\")\n", "labels = response.text.split(\"\\n\")\n", "\n", "def predict(inp):\n", " inp = Image.fromarray(inp.astype(\"uint8\"), \"RGB\")\n", " inp = transforms.ToTensor()(inp).unsqueeze(0)\n", " with torch.no_grad():\n", " prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n", " return {labels[i]: float(prediction[i]) for i in range(1000)}\n", "\n", "inputs = gr.Image()\n", "outputs = gr.Label(num_top_classes=3)\n", "\n", "demo = gr.Interface(fn=predict, inputs=inputs, outputs=outputs)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py CHANGED
@@ -11,7 +11,6 @@ model = torch.hub.load("pytorch/vision:v0.6.0", "resnet18", pretrained=True).eva
11
  response = requests.get("https://git.io/JJkYN")
12
  labels = response.text.split("\n")
13
 
14
-
15
  def predict(inp):
16
  inp = Image.fromarray(inp.astype("uint8"), "RGB")
17
  inp = transforms.ToTensor()(inp).unsqueeze(0)
@@ -19,7 +18,6 @@ def predict(inp):
19
  prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
20
  return {labels[i]: float(prediction[i]) for i in range(1000)}
21
 
22
-
23
  inputs = gr.Image()
24
  outputs = gr.Label(num_top_classes=3)
25
 
 
11
  response = requests.get("https://git.io/JJkYN")
12
  labels = response.text.split("\n")
13
 
 
14
  def predict(inp):
15
  inp = Image.fromarray(inp.astype("uint8"), "RGB")
16
  inp = transforms.ToTensor()(inp).unsqueeze(0)
 
18
  prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
19
  return {labels[i]: float(prediction[i]) for i in range(1000)}
20
 
 
21
  inputs = gr.Image()
22
  outputs = gr.Label(num_top_classes=3)
23