andrewwwwwwww commited on
Commit
cbd48ae
·
verified ·
1 Parent(s): e7d4718

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -90
app.py CHANGED
@@ -1,96 +1,31 @@
1
- # Importing some modules
2
  import gradio as gr
3
  from transformers import pipeline
4
 
5
- # Loading in the model
6
  MODEL_AGE = pipeline('image-classification', model='nateraw/vit-age-classifier', device=-1)
7
- MODEL_EMOTION = pipeline('image-classification', model='dennisjooo/emotion_classification', device=-1)
8
 
9
- def classify_image(image, top_k):
10
- # Getting the classification result
11
  age_result = MODEL_AGE(image)
12
- emotion_result = MODEL_EMOTION(image)
13
-
14
- # Reformating the classification result into a dictionary
15
- age_result = {result['label']: result['score'] for result in age_result[:min(int(top_k), 8)]}
16
- emotion_result = {result['label']: result['score'] for result in emotion_result[:min(int(top_k), 7)]}
17
-
18
- # Add some text comment to it lol
19
- comment = text_comment(list(age_result.keys())[0])
20
-
21
- # Returning the classification result
22
- return age_result, comment, emotion_result
23
-
24
- # Snarky comment based on age
25
- def text_comment(pred_class):
26
- match pred_class:
27
- case "3-9":
28
- return "Lost your way to the playground?"
29
- case "10-19":
30
- return "But Mom, I'm not a kid anymore!"
31
- case "20-29":
32
- return "You're in your prime!"
33
- case "30-39":
34
- return "Oof, watch out for those wrinkles!"
35
- case "40-49":
36
- return "You're still young at heart!"
37
- case "50-59":
38
- return "Retirement is just around the corner!"
39
- case "60-69":
40
- return "You're a senior citizen now!"
41
- case "more than 70":
42
- return "Hey Siri, play 'My Way' by Frank Sinatra"
43
-
44
-
45
- if __name__ == "__main__":
46
- # Definining the title of the interface
47
- title_text = """
48
- # I will guess your age and mood based on your picture!
49
- ---
50
- Totally not creepy, I promise :)
51
- <br>Made by [Andrew]. A project for REA Mastering AI course.
52
- Age guessing model from [nateraw/vit-age-classifier](https://huggingface.co/nateraw/vit-age-classifier)
53
- <br>Mood-guessing model is a [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k)
54
- trained on [FastJobs/Visual_Emotional_Analysis](https://huggingface.co/datasets/FastJobs/Visual_Emotional_Analysis)
55
- """
56
-
57
- # Creating the Gradio interface
58
- with gr.Blocks() as demo:
59
- gr.Markdown(title_text)
60
- with gr.Row(equal_height=True):
61
- with gr.Column():
62
- # Creating the input block
63
- image = gr.Image(label="Upload a picture of yourself", type="pil", scale=2)
64
-
65
- # Creating the example block
66
- gr.Examples(examples=[
67
- "./images/andrew.jpg",
68
- "./images/feifei.jpg",
69
- "./images/geoff.jpg",
70
- "./images/ilya.jpg",
71
- "./images/karpathy.jpg",
72
- "./images/lex.jpg"
73
- ], inputs=[image], label="Or choose an example")
74
-
75
-
76
- with gr.Column():
77
- # Getting the top k hyperparameter
78
- top_k = gr.Number(label="How many guesses do I get?", value=1)
79
-
80
- # Creating the output block
81
- age_label = gr.Label(label="Hey it's me, your age!")
82
- comment = gr.Textbox(label="Based on your age, I think you are...",
83
- placeholder="I'm still learning, so I might be wrong!")
84
- emotion_label = gr.Label(label="Hey it's me, your emotion!")
85
-
86
- with gr.Row():
87
- # Submit button
88
- btn = gr.Button("Beep boop, guess my age and emotion!")
89
- btn.click(classify_image, inputs=[image, top_k], outputs=[age_label, comment, emotion_label])
90
-
91
- # Clear button
92
- clear = gr.Button("Poof begone!")
93
- clear.click(lambda: [None, None, None, None], inputs=[], outputs=[image, age_label, comment, emotion_label])
94
-
95
- # Launching the interface
96
- demo.launch(share=True, debug=True)
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # Load the model
5
  MODEL_AGE = pipeline('image-classification', model='nateraw/vit-age-classifier', device=-1)
 
6
 
7
+ def predict(image):
 
8
  age_result = MODEL_AGE(image)
9
+ # Get the top prediction
10
+ top_age = age_result[0]['label']
11
+ # You can return a number or a string, but your iOS app expects a number
12
+ # Let's map the label to a representative age (e.g., the midpoint of the range)
13
+ age_map = {
14
+ "3-9": 6,
15
+ "10-19": 15,
16
+ "20-29": 25,
17
+ "30-39": 35,
18
+ "40-49": 45,
19
+ "50-59": 55,
20
+ "60-69": 65,
21
+ "more than 70": 75
22
+ }
23
+ return {"data": [age_map.get(top_age, 30)]}
24
+
25
+ iface = gr.Interface(
26
+ fn=predict,
27
+ inputs=gr.Image(type="pil"),
28
+ outputs=gr.Json()
29
+ )
30
+
31
+ iface.launch()