Spaces:
Sleeping
Sleeping
Set up space
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- Sarabun-Regular.ttf +0 -0
- __init__.py +1 -0
- __pycache__ copy/__init__.cpython-312.pyc +0 -0
- app.py +111 -0
- image_url.csv +0 -0
- requirements.txt +14 -0
- setup.py +18 -0
- visual_bge.egg-info/PKG-INFO +19 -0
- visual_bge.egg-info/SOURCES.txt +7 -0
- visual_bge.egg-info/dependency_links.txt +1 -0
- visual_bge.egg-info/requires.txt +4 -0
- visual_bge.egg-info/top_level.txt +1 -0
- visual_bge/__pycache__/modeling.cpython-310.pyc +0 -0
- visual_bge/__pycache__/modeling.cpython-312.pyc +0 -0
- visual_bge/__pycache__/modeling.cpython-39.pyc +0 -0
- visual_bge/eva_clip/__init__.py +11 -0
- visual_bge/eva_clip/__pycache__/__init__.cpython-310.pyc +0 -0
- visual_bge/eva_clip/__pycache__/__init__.cpython-312.pyc +0 -0
- visual_bge/eva_clip/__pycache__/__init__.cpython-39.pyc +0 -0
- visual_bge/eva_clip/__pycache__/constants.cpython-310.pyc +0 -0
- visual_bge/eva_clip/__pycache__/constants.cpython-312.pyc +0 -0
- visual_bge/eva_clip/__pycache__/constants.cpython-39.pyc +0 -0
- visual_bge/eva_clip/__pycache__/eva_vit_model.cpython-310.pyc +0 -0
- visual_bge/eva_clip/__pycache__/eva_vit_model.cpython-312.pyc +0 -0
- visual_bge/eva_clip/__pycache__/eva_vit_model.cpython-39.pyc +0 -0
- visual_bge/eva_clip/__pycache__/factory.cpython-310.pyc +0 -0
- visual_bge/eva_clip/__pycache__/factory.cpython-312.pyc +0 -0
- visual_bge/eva_clip/__pycache__/factory.cpython-39.pyc +0 -0
- visual_bge/eva_clip/__pycache__/hf_configs.cpython-310.pyc +0 -0
- visual_bge/eva_clip/__pycache__/hf_configs.cpython-312.pyc +0 -0
- visual_bge/eva_clip/__pycache__/hf_configs.cpython-39.pyc +0 -0
- visual_bge/eva_clip/__pycache__/hf_model.cpython-310.pyc +0 -0
- visual_bge/eva_clip/__pycache__/hf_model.cpython-312.pyc +0 -0
- visual_bge/eva_clip/__pycache__/hf_model.cpython-39.pyc +0 -0
- visual_bge/eva_clip/__pycache__/loss.cpython-310.pyc +0 -0
- visual_bge/eva_clip/__pycache__/loss.cpython-312.pyc +0 -0
- visual_bge/eva_clip/__pycache__/loss.cpython-39.pyc +0 -0
- visual_bge/eva_clip/__pycache__/model.cpython-310.pyc +0 -0
- visual_bge/eva_clip/__pycache__/model.cpython-312.pyc +0 -0
- visual_bge/eva_clip/__pycache__/model.cpython-39.pyc +0 -0
- visual_bge/eva_clip/__pycache__/modified_resnet.cpython-310.pyc +0 -0
- visual_bge/eva_clip/__pycache__/modified_resnet.cpython-312.pyc +0 -0
- visual_bge/eva_clip/__pycache__/modified_resnet.cpython-39.pyc +0 -0
- visual_bge/eva_clip/__pycache__/openai.cpython-310.pyc +0 -0
- visual_bge/eva_clip/__pycache__/openai.cpython-312.pyc +0 -0
- visual_bge/eva_clip/__pycache__/openai.cpython-39.pyc +0 -0
- visual_bge/eva_clip/__pycache__/pretrained.cpython-310.pyc +0 -0
- visual_bge/eva_clip/__pycache__/pretrained.cpython-312.pyc +0 -0
- visual_bge/eva_clip/__pycache__/pretrained.cpython-39.pyc +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
Visualized_m3.pth filter=lfs diff=lfs merge=lfs -text
|
37 |
+
image filter=lfs diff=lfs merge=lfs -text
|
Sarabun-Regular.ttf
ADDED
Binary file (90.2 kB). View file
|
|
__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .modeling import Visualized_BGE
|
__pycache__ copy/__init__.cpython-312.pyc
ADDED
Binary file (227 Bytes). View file
|
|
app.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from visual_bge.modeling import Visualized_BGE
|
4 |
+
from qdrant_client import QdrantClient
|
5 |
+
from qdrant_client.http.models import Filter, FieldCondition, MatchValue
|
6 |
+
from PIL import Image
|
7 |
+
from io import BytesIO
|
8 |
+
import requests
|
9 |
+
import matplotlib.pyplot as plt
|
10 |
+
from matplotlib import font_manager
|
11 |
+
import textwrap
|
12 |
+
import os
|
13 |
+
import tempfile
|
14 |
+
import pandas as pd
|
15 |
+
from huggingface_hub import hf_hub_download
|
16 |
+
|
17 |
+
model_weight = hf_hub_download(repo_id="BAAI/bge-visualized", filename="Visualized_m3.pth")
|
18 |
+
|
19 |
+
data = pd.read_csv("image_url.csv")
|
20 |
+
|
21 |
+
# Load Thai font
|
22 |
+
thai_font = font_manager.FontProperties(fname='./Sarabun-Regular.ttf')
|
23 |
+
|
24 |
+
# Load model
|
25 |
+
model = Visualized_BGE(
|
26 |
+
model_name_bge="BAAI/bge-m3",
|
27 |
+
model_weight=model_weight
|
28 |
+
)
|
29 |
+
model.eval()
|
30 |
+
|
31 |
+
# Load Qdrant connection
|
32 |
+
qdrant_client = QdrantClient(
|
33 |
+
url=os.environ.get("QDRANT_URL"),
|
34 |
+
api_key=os.environ.get("QDRANT_API_KEY")
|
35 |
+
)
|
36 |
+
|
37 |
+
# Visual helper function
|
38 |
+
def visualize_results(results):
|
39 |
+
cols = 4
|
40 |
+
rows = (len(results) + cols - 1) // cols
|
41 |
+
fig, axs = plt.subplots(rows, cols, figsize=(cols * 3, rows * 3))
|
42 |
+
axs = axs.flatten() if hasattr(axs, 'flatten') else [axs]
|
43 |
+
|
44 |
+
for i, res in enumerate(results):
|
45 |
+
try:
|
46 |
+
image_url = data.iloc[int(res.payload["index"])]
|
47 |
+
img = Image.open(image_url) if os.path.exists(image_url) else Image.open(BytesIO(requests.get(image_url).content))
|
48 |
+
name = res.payload['name']
|
49 |
+
if len(name) > 30:
|
50 |
+
name = name[:27] + "..."
|
51 |
+
wrapped_name = textwrap.fill(name, width=15)
|
52 |
+
axs[i].imshow(img)
|
53 |
+
axs[i].set_title(f"{wrapped_name}\nScore: {res.score:.2f}", fontproperties=thai_font, fontsize=10)
|
54 |
+
axs[i].axis('off')
|
55 |
+
except Exception as e:
|
56 |
+
axs[i].text(0.5, 0.5, f'Error: {str(e)}', ha='center', va='center', fontsize=8)
|
57 |
+
axs[i].axis('off')
|
58 |
+
|
59 |
+
for j in range(len(results), len(axs)):
|
60 |
+
axs[j].axis('off')
|
61 |
+
|
62 |
+
plt.tight_layout(pad=3.0)
|
63 |
+
plt.subplots_adjust(hspace=0.5)
|
64 |
+
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmpfile:
|
65 |
+
fig.savefig(tmpfile.name)
|
66 |
+
return tmpfile.name
|
67 |
+
|
68 |
+
# Text Query Handler
|
69 |
+
def search_by_text(text_input):
|
70 |
+
if not text_input.strip():
|
71 |
+
return "Please provide a text input.", None
|
72 |
+
query_vector = model.encode(text=text_input)[0].tolist()
|
73 |
+
results = qdrant_client.query_points(
|
74 |
+
collection_name="bge_visualized_m3",
|
75 |
+
query=query_vector,
|
76 |
+
with_payload=True,
|
77 |
+
).points
|
78 |
+
image_path = visualize_results(results)
|
79 |
+
return f"Results for: {text_input}", image_path
|
80 |
+
|
81 |
+
# Image Query Handler
|
82 |
+
def search_by_image(image_input):
|
83 |
+
if image_input is None:
|
84 |
+
return "Please upload an image.", None
|
85 |
+
query_vector = model.encode(image=image_input)[0].tolist()
|
86 |
+
results = qdrant_client.query_points(
|
87 |
+
collection_name="bge_visualized_m3",
|
88 |
+
query=query_vector,
|
89 |
+
with_payload=True,
|
90 |
+
).points
|
91 |
+
image_path = visualize_results(results)
|
92 |
+
return "Results for image query", image_path
|
93 |
+
|
94 |
+
# Gradio UI
|
95 |
+
with gr.Blocks() as demo:
|
96 |
+
gr.Markdown("# 🔍 Visualized BGE: Multimodal Search with Qdrant")
|
97 |
+
with gr.Tab("📝 Text Query"):
|
98 |
+
text_input = gr.Textbox(label="Enter text to search")
|
99 |
+
text_output = gr.Textbox(label="Query Info")
|
100 |
+
text_image = gr.Image(label="Results", type="filepath")
|
101 |
+
text_btn = gr.Button("Search")
|
102 |
+
text_btn.click(fn=search_by_text, inputs=text_input, outputs=[text_output, text_image])
|
103 |
+
|
104 |
+
with gr.Tab("🖼️ Image Query"):
|
105 |
+
image_input = gr.Image(label="Upload image to search", type="pil")
|
106 |
+
image_output = gr.Textbox(label="Query Info")
|
107 |
+
image_result = gr.Image(label="Results", type="filepath")
|
108 |
+
image_btn = gr.Button("Search")
|
109 |
+
image_btn.click(fn=search_by_image, inputs=image_input, outputs=[image_output, image_result])
|
110 |
+
|
111 |
+
demo.launch()
|
image_url.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
requirements.txt
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torchvision
|
2 |
+
timm
|
3 |
+
einops
|
4 |
+
ftfy
|
5 |
+
transformers==4.52.3
|
6 |
+
qdrant_client
|
7 |
+
torchvision==0.22.0
|
8 |
+
torch==2.7.0
|
9 |
+
torchaudio==2.7.0
|
10 |
+
gradio
|
11 |
+
requests
|
12 |
+
pillow
|
13 |
+
os
|
14 |
+
pandas
|
setup.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from setuptools import setup, find_packages
|
2 |
+
|
3 |
+
setup(
|
4 |
+
name="visual_bge",
|
5 |
+
version="0.1.0",
|
6 |
+
description='visual_bge',
|
7 |
+
long_description="./README.md",
|
8 |
+
long_description_content_type="text/markdown",
|
9 |
+
url='https://github.com/FlagOpen/FlagEmbedding/tree/master/research/visual_bge',
|
10 |
+
packages=find_packages(),
|
11 |
+
install_requires=[
|
12 |
+
'torchvision',
|
13 |
+
'timm',
|
14 |
+
'einops',
|
15 |
+
'ftfy'
|
16 |
+
],
|
17 |
+
python_requires='>=3.6',
|
18 |
+
)
|
visual_bge.egg-info/PKG-INFO
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.4
|
2 |
+
Name: visual_bge
|
3 |
+
Version: 0.1.0
|
4 |
+
Summary: visual_bge
|
5 |
+
Home-page: https://github.com/FlagOpen/FlagEmbedding/tree/master/research/visual_bge
|
6 |
+
Requires-Python: >=3.6
|
7 |
+
Description-Content-Type: text/markdown
|
8 |
+
Requires-Dist: torchvision
|
9 |
+
Requires-Dist: timm
|
10 |
+
Requires-Dist: einops
|
11 |
+
Requires-Dist: ftfy
|
12 |
+
Dynamic: description
|
13 |
+
Dynamic: description-content-type
|
14 |
+
Dynamic: home-page
|
15 |
+
Dynamic: requires-dist
|
16 |
+
Dynamic: requires-python
|
17 |
+
Dynamic: summary
|
18 |
+
|
19 |
+
./README.md
|
visual_bge.egg-info/SOURCES.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
README.md
|
2 |
+
setup.py
|
3 |
+
visual_bge.egg-info/PKG-INFO
|
4 |
+
visual_bge.egg-info/SOURCES.txt
|
5 |
+
visual_bge.egg-info/dependency_links.txt
|
6 |
+
visual_bge.egg-info/requires.txt
|
7 |
+
visual_bge.egg-info/top_level.txt
|
visual_bge.egg-info/dependency_links.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
|
visual_bge.egg-info/requires.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torchvision
|
2 |
+
timm
|
3 |
+
einops
|
4 |
+
ftfy
|
visual_bge.egg-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
|
visual_bge/__pycache__/modeling.cpython-310.pyc
ADDED
Binary file (10.1 kB). View file
|
|
visual_bge/__pycache__/modeling.cpython-312.pyc
ADDED
Binary file (17.1 kB). View file
|
|
visual_bge/__pycache__/modeling.cpython-39.pyc
ADDED
Binary file (10 kB). View file
|
|
visual_bge/eva_clip/__init__.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
2 |
+
from .factory import create_model, create_model_and_transforms, create_model_from_pretrained, get_tokenizer, create_eva_vision_and_transforms
|
3 |
+
from .factory import list_models, add_model_config, get_model_config, load_checkpoint
|
4 |
+
from .loss import ClipLoss
|
5 |
+
from .model import CLIP, CustomCLIP, CLIPTextCfg, CLIPVisionCfg,\
|
6 |
+
convert_weights_to_lp, convert_weights_to_fp16, trace_model, get_cast_dtype
|
7 |
+
from .openai import load_openai_model, list_openai_models
|
8 |
+
from .pretrained import list_pretrained, list_pretrained_models_by_tag, list_pretrained_tags_by_model,\
|
9 |
+
get_pretrained_url, download_pretrained_from_url, is_pretrained_cfg, get_pretrained_cfg, download_pretrained
|
10 |
+
from .tokenizer import SimpleTokenizer, tokenize
|
11 |
+
from .transform import image_transform
|
visual_bge/eva_clip/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.32 kB). View file
|
|
visual_bge/eva_clip/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (1.36 kB). View file
|
|
visual_bge/eva_clip/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (1.31 kB). View file
|
|
visual_bge/eva_clip/__pycache__/constants.cpython-310.pyc
ADDED
Binary file (309 Bytes). View file
|
|
visual_bge/eva_clip/__pycache__/constants.cpython-312.pyc
ADDED
Binary file (309 Bytes). View file
|
|
visual_bge/eva_clip/__pycache__/constants.cpython-39.pyc
ADDED
Binary file (307 Bytes). View file
|
|
visual_bge/eva_clip/__pycache__/eva_vit_model.cpython-310.pyc
ADDED
Binary file (16.1 kB). View file
|
|
visual_bge/eva_clip/__pycache__/eva_vit_model.cpython-312.pyc
ADDED
Binary file (31.3 kB). View file
|
|
visual_bge/eva_clip/__pycache__/eva_vit_model.cpython-39.pyc
ADDED
Binary file (16 kB). View file
|
|
visual_bge/eva_clip/__pycache__/factory.cpython-310.pyc
ADDED
Binary file (12.1 kB). View file
|
|
visual_bge/eva_clip/__pycache__/factory.cpython-312.pyc
ADDED
Binary file (20.2 kB). View file
|
|
visual_bge/eva_clip/__pycache__/factory.cpython-39.pyc
ADDED
Binary file (12 kB). View file
|
|
visual_bge/eva_clip/__pycache__/hf_configs.cpython-310.pyc
ADDED
Binary file (710 Bytes). View file
|
|
visual_bge/eva_clip/__pycache__/hf_configs.cpython-312.pyc
ADDED
Binary file (780 Bytes). View file
|
|
visual_bge/eva_clip/__pycache__/hf_configs.cpython-39.pyc
ADDED
Binary file (708 Bytes). View file
|
|
visual_bge/eva_clip/__pycache__/hf_model.cpython-310.pyc
ADDED
Binary file (7.38 kB). View file
|
|
visual_bge/eva_clip/__pycache__/hf_model.cpython-312.pyc
ADDED
Binary file (12.8 kB). View file
|
|
visual_bge/eva_clip/__pycache__/hf_model.cpython-39.pyc
ADDED
Binary file (7.37 kB). View file
|
|
visual_bge/eva_clip/__pycache__/loss.cpython-310.pyc
ADDED
Binary file (3.35 kB). View file
|
|
visual_bge/eva_clip/__pycache__/loss.cpython-312.pyc
ADDED
Binary file (6.11 kB). View file
|
|
visual_bge/eva_clip/__pycache__/loss.cpython-39.pyc
ADDED
Binary file (3.32 kB). View file
|
|
visual_bge/eva_clip/__pycache__/model.cpython-310.pyc
ADDED
Binary file (13.5 kB). View file
|
|
visual_bge/eva_clip/__pycache__/model.cpython-312.pyc
ADDED
Binary file (22.3 kB). View file
|
|
visual_bge/eva_clip/__pycache__/model.cpython-39.pyc
ADDED
Binary file (13.2 kB). View file
|
|
visual_bge/eva_clip/__pycache__/modified_resnet.cpython-310.pyc
ADDED
Binary file (6.48 kB). View file
|
|
visual_bge/eva_clip/__pycache__/modified_resnet.cpython-312.pyc
ADDED
Binary file (12.6 kB). View file
|
|
visual_bge/eva_clip/__pycache__/modified_resnet.cpython-39.pyc
ADDED
Binary file (6.34 kB). View file
|
|
visual_bge/eva_clip/__pycache__/openai.cpython-310.pyc
ADDED
Binary file (4.83 kB). View file
|
|
visual_bge/eva_clip/__pycache__/openai.cpython-312.pyc
ADDED
Binary file (7.66 kB). View file
|
|
visual_bge/eva_clip/__pycache__/openai.cpython-39.pyc
ADDED
Binary file (4.78 kB). View file
|
|
visual_bge/eva_clip/__pycache__/pretrained.cpython-310.pyc
ADDED
Binary file (9.14 kB). View file
|
|
visual_bge/eva_clip/__pycache__/pretrained.cpython-312.pyc
ADDED
Binary file (13.1 kB). View file
|
|
visual_bge/eva_clip/__pycache__/pretrained.cpython-39.pyc
ADDED
Binary file (9 kB). View file
|
|