Spaces:
Sleeping
Sleeping
Commit
·
1b9b794
1
Parent(s):
cf5954f
create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Installing Necessary Packages
|
2 |
+
|
3 |
+
!pip install transformers
|
4 |
+
!pip install torch
|
5 |
+
!pip install opencv-python
|
6 |
+
!pip install openai
|
7 |
+
!pip install sentencepiece
|
8 |
+
|
9 |
+
# Importing Necessary Packages and classes
|
10 |
+
|
11 |
+
from transformers import AutoImageProcessor, AutoModelForImageClassification
|
12 |
+
from IPython.display import display, Javascript
|
13 |
+
from google.colab.output import eval_js
|
14 |
+
from base64 import b64decode
|
15 |
+
from IPython.display import Image
|
16 |
+
import cv2
|
17 |
+
import openai
|
18 |
+
import os
|
19 |
+
import pandas as pd
|
20 |
+
import time
|
21 |
+
from transformers import BarkModel, BarkProcessor
|
22 |
+
from IPython.display import Audio
|
23 |
+
|
24 |
+
# Defining the camera in the system
|
25 |
+
|
26 |
+
def take_photo(filename='photo.jpg', quality=0.8):
|
27 |
+
js = Javascript('''
|
28 |
+
async function takePhoto(quality) {
|
29 |
+
const div = document.createElement('div');
|
30 |
+
const capture = document.createElement('button');
|
31 |
+
capture.textContent = 'Capture';
|
32 |
+
div.appendChild(capture);
|
33 |
+
|
34 |
+
const video = document.createElement('video');
|
35 |
+
video.style.display = 'block';
|
36 |
+
const stream = await navigator.mediaDevices.getUserMedia({video: true});
|
37 |
+
|
38 |
+
document.body.appendChild(div);
|
39 |
+
div.appendChild(video);
|
40 |
+
video.srcObject = stream;
|
41 |
+
await video.play();
|
42 |
+
|
43 |
+
// Resize the output to fit the video element.
|
44 |
+
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
|
45 |
+
|
46 |
+
// Wait for Capture to be clicked.
|
47 |
+
await new Promise((resolve) => capture.onclick = resolve);
|
48 |
+
|
49 |
+
const canvas = document.createElement('canvas');
|
50 |
+
canvas.width = video.videoWidth;
|
51 |
+
canvas.height = video.videoHeight;
|
52 |
+
canvas.getContext('2d').drawImage(video, 0, 0);
|
53 |
+
stream.getVideoTracks()[0].stop();
|
54 |
+
div.remove();
|
55 |
+
return canvas.toDataURL('image/jpeg', quality);
|
56 |
+
}
|
57 |
+
''')
|
58 |
+
display(js)
|
59 |
+
data = eval_js('takePhoto({})'.format(quality))
|
60 |
+
binary = b64decode(data.split(',')[1])
|
61 |
+
with open(filename, 'wb') as f:
|
62 |
+
f.write(binary)
|
63 |
+
return filename
|
64 |
+
|
65 |
+
# Capturing snaps using given button and saving them
|
66 |
+
|
67 |
+
try:
|
68 |
+
filename = take_photo()
|
69 |
+
print('Saved to {}'.format(filename))
|
70 |
+
|
71 |
+
# Show the image which was just taken.
|
72 |
+
display(Image(filename))
|
73 |
+
except Exception as err:
|
74 |
+
# Errors will be thrown if the user does not have a webcam or if they do not
|
75 |
+
# grant the page permission to access it.
|
76 |
+
print(str(err))
|
77 |
+
|
78 |
+
# Using the pre-trained Dog Breed Identification Model
|
79 |
+
|
80 |
+
image_processor = AutoImageProcessor.from_pretrained("wesleyacheng/dog-breeds-multiclass-image-classification-with-vit")
|
81 |
+
dog_breed_model = AutoModelForImageClassification.from_pretrained("wesleyacheng/dog-breeds-multiclass-image-classification-with-vit")
|
82 |
+
|
83 |
+
# Importing the saved image
|
84 |
+
|
85 |
+
img_path='/content/n02088094_60.jpg'
|
86 |
+
|
87 |
+
image=cv2.imread(img_path)
|
88 |
+
|
89 |
+
# Preprocessing the captured image using pre-trained model based preprocessor
|
90 |
+
|
91 |
+
inputs = image_processor(images=image, return_tensors="pt")
|
92 |
+
|
93 |
+
# Predicting the output using model from huggingface
|
94 |
+
|
95 |
+
outputs = dog_breed_model(**inputs)
|
96 |
+
logits = outputs.logits
|
97 |
+
|
98 |
+
# Finding the exact output class and corresponding label
|
99 |
+
|
100 |
+
predicted_class_idx = logits.argmax(-1).item()
|
101 |
+
|
102 |
+
predicted_class_actual=dog_breed_model.config.id2label[predicted_class_idx]
|
103 |
+
predicted_class_actual=predicted_class_actual.split("_")
|
104 |
+
|
105 |
+
str1=""
|
106 |
+
|
107 |
+
for ele in predicted_class_actual:
|
108 |
+
str1+=ele+" "
|
109 |
+
|
110 |
+
print("Predicted class:", str1)
|
111 |
+
|
112 |
+
# Specifying the OpenAI API key
|
113 |
+
|
114 |
+
openai.api_key = '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'
|
115 |
+
|
116 |
+
# Specifying the chatGPT engine
|
117 |
+
|
118 |
+
def get_completion(prompt, model="gpt-3.5-turbo"):
|
119 |
+
|
120 |
+
messages = [{"role": "user", "content": prompt}]
|
121 |
+
response = openai.ChatCompletion.create(
|
122 |
+
model=model,
|
123 |
+
messages=messages,
|
124 |
+
temperature=0,
|
125 |
+
)
|
126 |
+
return response.choices[0].message["content"]
|
127 |
+
|
128 |
+
# Getting simple data from ChatGPT API
|
129 |
+
|
130 |
+
prompt = "chracterstics and behaviour of "+str1+" in a paragraph"
|
131 |
+
|
132 |
+
response = get_completion(prompt)
|
133 |
+
|
134 |
+
print(response)
|
135 |
+
|
136 |
+
# Importing a English Text-To-Speech Model from huggingface
|
137 |
+
|
138 |
+
tts_model = BarkModel.from_pretrained("suno/bark-small")
|
139 |
+
tts_processor = BarkProcessor.from_pretrained("suno/bark-small")
|
140 |
+
|
141 |
+
# Preprocessing the text data using imported preprocessor and generating output from model
|
142 |
+
|
143 |
+
inputs = tts_processor(response, voice_preset="v2/en_speaker_3")
|
144 |
+
|
145 |
+
speech_output = tts_model.generate(**inputs).cpu().numpy()
|
146 |
+
|
147 |
+
# Output of generated speech
|
148 |
+
|
149 |
+
sampling_rate = tts_model.generation_config.sample_rate
|
150 |
+
Audio(speech_output[0], rate=sampling_rate)
|