Spaces:
Runtime error
Runtime error
eliphatfs
commited on
Commit
·
f981cf0
1
Parent(s):
f0ae1c5
Fixes.
Browse files- app.py +1 -0
- openshape/caption.py +1 -1
- openshape/classification.py +1 -1
app.py
CHANGED
@@ -58,6 +58,7 @@ def load_data():
|
|
58 |
|
59 |
|
60 |
def render_pc(ncols, col, pc):
|
|
|
61 |
cols = st.columns(ncols)
|
62 |
with cols[col]:
|
63 |
rgb = (pc[:, 3:] * 255).astype(numpy.uint8)
|
|
|
58 |
|
59 |
|
60 |
def render_pc(ncols, col, pc):
|
61 |
+
pc = pc[:2048]
|
62 |
cols = st.columns(ncols)
|
63 |
with cols[col]:
|
64 |
rgb = (pc[:, 3:] * 255).astype(numpy.uint8)
|
openshape/caption.py
CHANGED
@@ -149,7 +149,7 @@ def pc_caption(pc_encoder: torch.nn.Module, pc, cond_scale):
|
|
149 |
prefix = pc_encoder(torch.tensor(pc.T[None], device=ref_dev))
|
150 |
prefix = prefix.float() * cond_scale
|
151 |
prefix_embed = model.clip_project(prefix).reshape(1, prefix_length, -1)
|
152 |
-
text
|
153 |
return text
|
154 |
|
155 |
|
|
|
149 |
prefix = pc_encoder(torch.tensor(pc.T[None], device=ref_dev))
|
150 |
prefix = prefix.float() * cond_scale
|
151 |
prefix_embed = model.clip_project(prefix).reshape(1, prefix_length, -1)
|
152 |
+
text = generate2(model, tokenizer, embed=prefix_embed)
|
153 |
return text
|
154 |
|
155 |
|
openshape/classification.py
CHANGED
@@ -7,7 +7,7 @@ from . import lvis
|
|
7 |
@torch.no_grad()
|
8 |
def pred_lvis_sims(pc_encoder: torch.nn.Module, pc):
|
9 |
ref_dev = next(pc_encoder.parameters()).device
|
10 |
-
enc = pc_encoder(torch.tensor(pc[:, [0, 2, 1]].T[None], device=ref_dev)).cpu()
|
11 |
sim = torch.matmul(F.normalize(lvis.feats, dim=-1), F.normalize(enc, dim=-1).squeeze())
|
12 |
argsort = torch.argsort(sim, descending=True)
|
13 |
return OrderedDict((lvis.categories[i], sim[i]) for i in argsort)
|
|
|
7 |
@torch.no_grad()
|
8 |
def pred_lvis_sims(pc_encoder: torch.nn.Module, pc):
|
9 |
ref_dev = next(pc_encoder.parameters()).device
|
10 |
+
enc = pc_encoder(torch.tensor(pc[:, [0, 2, 1, 3, 4, 5]].T[None], device=ref_dev)).cpu()
|
11 |
sim = torch.matmul(F.normalize(lvis.feats, dim=-1), F.normalize(enc, dim=-1).squeeze())
|
12 |
argsort = torch.argsort(sim, descending=True)
|
13 |
return OrderedDict((lvis.categories[i], sim[i]) for i in argsort)
|