Spaces:
Build error
Build error
Sujit Pal
commited on
Commit
·
17476c1
1
Parent(s):
f58917e
fix: fixing result output and bypassing large files problem
Browse files- dashboard_image2image.py +1 -1
- dashboard_text2image.py +1 -1
- images/test-captions.json +0 -0
- utils.py +4 -6
dashboard_image2image.py
CHANGED
|
@@ -14,7 +14,7 @@ BASELINE_MODEL = "openai/clip-vit-base-patch32"
|
|
| 14 |
MODEL_PATH = "flax-community/clip-rsicd-v2"
|
| 15 |
IMAGE_VECTOR_FILE = "./vectors/test-bs128x8-lr5e-6-adam-ckpt-1.tsv"
|
| 16 |
IMAGES_DIR = "./images"
|
| 17 |
-
CAPTIONS_FILE = os.path.join(IMAGES_DIR, "
|
| 18 |
|
| 19 |
@st.cache(allow_output_mutation=True)
|
| 20 |
def load_example_images():
|
|
|
|
| 14 |
MODEL_PATH = "flax-community/clip-rsicd-v2"
|
| 15 |
IMAGE_VECTOR_FILE = "./vectors/test-bs128x8-lr5e-6-adam-ckpt-1.tsv"
|
| 16 |
IMAGES_DIR = "./images"
|
| 17 |
+
CAPTIONS_FILE = os.path.join(IMAGES_DIR, "test-captions.json")
|
| 18 |
|
| 19 |
@st.cache(allow_output_mutation=True)
|
| 20 |
def load_example_images():
|
dashboard_text2image.py
CHANGED
|
@@ -13,7 +13,7 @@ BASELINE_MODEL = "openai/clip-vit-base-patch32"
|
|
| 13 |
MODEL_PATH = "flax-community/clip-rsicd-v2"
|
| 14 |
IMAGE_VECTOR_FILE = "./vectors/test-bs128x8-lr5e-6-adam-ckpt-1.tsv"
|
| 15 |
IMAGES_DIR = "./images"
|
| 16 |
-
CAPTIONS_FILE = os.path.join(IMAGES_DIR, "
|
| 17 |
|
| 18 |
def app():
|
| 19 |
filenames, index = utils.load_index(IMAGE_VECTOR_FILE)
|
|
|
|
| 13 |
MODEL_PATH = "flax-community/clip-rsicd-v2"
|
| 14 |
IMAGE_VECTOR_FILE = "./vectors/test-bs128x8-lr5e-6-adam-ckpt-1.tsv"
|
| 15 |
IMAGES_DIR = "./images"
|
| 16 |
+
CAPTIONS_FILE = os.path.join(IMAGES_DIR, "test-captions.json")
|
| 17 |
|
| 18 |
def app():
|
| 19 |
filenames, index = utils.load_index(IMAGE_VECTOR_FILE)
|
images/test-captions.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
utils.py
CHANGED
|
@@ -38,11 +38,9 @@ def load_model(model_path, baseline_model):
|
|
| 38 |
def load_captions(caption_file):
|
| 39 |
image2caption = {}
|
| 40 |
with open(caption_file, "r") as fcap:
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
filename =
|
| 44 |
-
captions = []
|
| 45 |
-
for sentence in image["sentences"]:
|
| 46 |
-
captions.append(sentence["raw"])
|
| 47 |
image2caption[filename] = captions
|
| 48 |
return image2caption
|
|
|
|
| 38 |
def load_captions(caption_file):
|
| 39 |
image2caption = {}
|
| 40 |
with open(caption_file, "r") as fcap:
|
| 41 |
+
for line in fcap:
|
| 42 |
+
data = json.loads(line.strip())
|
| 43 |
+
filename = data["filename"]
|
| 44 |
+
captions = data["captions"]
|
|
|
|
|
|
|
| 45 |
image2caption[filename] = captions
|
| 46 |
return image2caption
|