sonobit commited on
Commit
30c1c19
·
1 Parent(s): a69ccaa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -15
app.py CHANGED
@@ -1,20 +1,23 @@
1
  import gradio as gr
2
  import torch
3
- from transformers import AutoModel, AutoTokenizer
 
4
 
5
- # Load the model and tokenizer
6
- model = AutoModel.from_pretrained("detectron2/faster_rcnn_R_50_FPN_3x")
7
- tokenizer = AutoTokenizer.from_pretrained("detectron2/faster_rcnn_R_50_FPN_3x")
8
 
9
- # Define a function that takes an image as input and returns the output of the model
10
- def detect_faces(image):
11
- inputs = {"image": image}
12
- output = model(**inputs)
13
- boxes = output[0]["boxes"]
14
- return boxes
15
 
16
- # Create the Gradio interface
17
- interface = gr.Interface(fn=detect_faces, inputs=gr.inputs.Image(), outputs=gr.outputs.Boxes())
18
-
19
- # Launch the interface
20
- interface.launch()
 
 
 
1
  import gradio as gr
2
  import torch
3
+ import requests
4
+ from torchvision import transforms
5
 
6
+ model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
7
+ response = requests.get("https://git.io/JJkYN")
8
+ labels = response.text.split("\n")
9
 
10
+ def predict(inp):
11
+ inp = transforms.ToTensor()(inp).unsqueeze(0)
12
+ with torch.no_grad():
13
+ prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
14
+ confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
15
+ return confidences
16
 
17
+ demo = gr.Interface(fn=predict,
18
+ inputs=gr.inputs.Image(type="pil"),
19
+ outputs=gr.outputs.Label(num_top_classes=3),
20
+ examples=[["cheetah.jpg"]],
21
+ )
22
+
23
+ demo.launch()