Commit
·
6640fa0
1
Parent(s):
998d998
Update app.py
Browse files
app.py
CHANGED
|
@@ -249,8 +249,10 @@ class CLIPDemo:
|
|
| 249 |
def draw_text(
|
| 250 |
key,
|
| 251 |
plot=False,
|
|
|
|
| 252 |
):
|
| 253 |
|
|
|
|
| 254 |
image = Image.open("data/logo.png")
|
| 255 |
st.image(image, use_column_width="always")
|
| 256 |
|
|
@@ -259,7 +261,7 @@ def draw_text(
|
|
| 259 |
text_encoder = AutoModel.from_pretrained(CLIP_TEXT_MODEL_PATH, local_files_only=True)
|
| 260 |
vision_encoder = CLIPVisionModel.from_pretrained(CLIP_VISION_MODEL_PATH, local_files_only=True)
|
| 261 |
tokenizer = AutoTokenizer.from_pretrained(TEXT_MODEL)
|
| 262 |
-
model = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer)
|
| 263 |
model.compute_image_embeddings(glob.glob(SPECTROGRAMS_PATH + "/*.jpeg")[:1000])
|
| 264 |
st.session_state["model"] = model
|
| 265 |
|
|
@@ -302,13 +304,12 @@ def draw_text(
|
|
| 302 |
def draw_audio(
|
| 303 |
key,
|
| 304 |
plot=False,
|
|
|
|
| 305 |
):
|
| 306 |
|
| 307 |
image = Image.open("data/logo.png")
|
| 308 |
st.image(image, use_column_width="always")
|
| 309 |
|
| 310 |
-
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 311 |
-
|
| 312 |
if 'model' not in st.session_state:
|
| 313 |
#with st.spinner('We are orginizing your traks...'):
|
| 314 |
text_encoder = AutoModel.from_pretrained(CLIP_TEXT_MODEL_PATH, local_files_only=True)
|
|
@@ -371,6 +372,7 @@ def draw_audio(
|
|
| 371 |
def draw_camera(
|
| 372 |
key,
|
| 373 |
plot=False,
|
|
|
|
| 374 |
):
|
| 375 |
|
| 376 |
image = Image.open("data/logo.png")
|
|
@@ -381,7 +383,7 @@ def draw_camera(
|
|
| 381 |
text_encoder = AutoModel.from_pretrained(CLIP_TEXT_MODEL_PATH, local_files_only=True)
|
| 382 |
vision_encoder = CLIPVisionModel.from_pretrained(CLIP_VISION_MODEL_PATH, local_files_only=True)
|
| 383 |
tokenizer = AutoTokenizer.from_pretrained(TEXT_MODEL)
|
| 384 |
-
model = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer)
|
| 385 |
model.compute_image_embeddings(glob.glob(SPECTROGRAMS_PATH + "/*.jpeg")[:5000])
|
| 386 |
st.session_state["model"] = model
|
| 387 |
#st.session_state['model'] = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer)
|
|
@@ -429,15 +431,17 @@ def draw_camera(
|
|
| 429 |
selected = streamlit_menu(example=3)
|
| 430 |
df = pd.read_csv('full_metadata.csv', index_col=False)
|
| 431 |
|
|
|
|
|
|
|
| 432 |
if selected == "Text":
|
| 433 |
# st.title(f"You have selected {selected}")
|
| 434 |
-
draw_text("text", plot=True)
|
| 435 |
if selected == "Audio":
|
| 436 |
# st.title(f"You have selected {selected}")
|
| 437 |
-
draw_audio("audio", plot=True)
|
| 438 |
if selected == "Camera":
|
| 439 |
# st.title(f"You have selected {selected}")
|
| 440 |
-
#draw_camera("camera", plot=True)
|
| 441 |
pass
|
| 442 |
|
| 443 |
# with st.sidebar:
|
|
|
|
| 249 |
def draw_text(
|
| 250 |
key,
|
| 251 |
plot=False,
|
| 252 |
+
device=None,
|
| 253 |
):
|
| 254 |
|
| 255 |
+
|
| 256 |
image = Image.open("data/logo.png")
|
| 257 |
st.image(image, use_column_width="always")
|
| 258 |
|
|
|
|
| 261 |
text_encoder = AutoModel.from_pretrained(CLIP_TEXT_MODEL_PATH, local_files_only=True)
|
| 262 |
vision_encoder = CLIPVisionModel.from_pretrained(CLIP_VISION_MODEL_PATH, local_files_only=True)
|
| 263 |
tokenizer = AutoTokenizer.from_pretrained(TEXT_MODEL)
|
| 264 |
+
model = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer, device=device)
|
| 265 |
model.compute_image_embeddings(glob.glob(SPECTROGRAMS_PATH + "/*.jpeg")[:1000])
|
| 266 |
st.session_state["model"] = model
|
| 267 |
|
|
|
|
| 304 |
def draw_audio(
|
| 305 |
key,
|
| 306 |
plot=False,
|
| 307 |
+
device=None,
|
| 308 |
):
|
| 309 |
|
| 310 |
image = Image.open("data/logo.png")
|
| 311 |
st.image(image, use_column_width="always")
|
| 312 |
|
|
|
|
|
|
|
| 313 |
if 'model' not in st.session_state:
|
| 314 |
#with st.spinner('We are orginizing your traks...'):
|
| 315 |
text_encoder = AutoModel.from_pretrained(CLIP_TEXT_MODEL_PATH, local_files_only=True)
|
|
|
|
| 372 |
def draw_camera(
|
| 373 |
key,
|
| 374 |
plot=False,
|
| 375 |
+
device=None,
|
| 376 |
):
|
| 377 |
|
| 378 |
image = Image.open("data/logo.png")
|
|
|
|
| 383 |
text_encoder = AutoModel.from_pretrained(CLIP_TEXT_MODEL_PATH, local_files_only=True)
|
| 384 |
vision_encoder = CLIPVisionModel.from_pretrained(CLIP_VISION_MODEL_PATH, local_files_only=True)
|
| 385 |
tokenizer = AutoTokenizer.from_pretrained(TEXT_MODEL)
|
| 386 |
+
model = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer, device=device)
|
| 387 |
model.compute_image_embeddings(glob.glob(SPECTROGRAMS_PATH + "/*.jpeg")[:5000])
|
| 388 |
st.session_state["model"] = model
|
| 389 |
#st.session_state['model'] = CLIPDemo(vision_encoder=vision_encoder, text_encoder=text_encoder, tokenizer=tokenizer)
|
|
|
|
| 431 |
selected = streamlit_menu(example=3)
|
| 432 |
df = pd.read_csv('full_metadata.csv', index_col=False)
|
| 433 |
|
| 434 |
+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 435 |
+
|
| 436 |
if selected == "Text":
|
| 437 |
# st.title(f"You have selected {selected}")
|
| 438 |
+
draw_text("text", plot=True, device=device)
|
| 439 |
if selected == "Audio":
|
| 440 |
# st.title(f"You have selected {selected}")
|
| 441 |
+
draw_audio("audio", plot=True, device=device)
|
| 442 |
if selected == "Camera":
|
| 443 |
# st.title(f"You have selected {selected}")
|
| 444 |
+
#draw_camera("camera", plot=True, device=device)
|
| 445 |
pass
|
| 446 |
|
| 447 |
# with st.sidebar:
|