Correctly sending to device tensors
Browse files
app.py
CHANGED
@@ -70,9 +70,9 @@ def text_to_image(image, prompt, duplications: float):
|
|
70 |
maurice_embeddings = list()
|
71 |
for batch in batched_inputs:
|
72 |
# Load on device
|
73 |
-
batch['input_ids'].to(model.device)
|
74 |
-
batch['attention_mask'].to(model.device)
|
75 |
-
batch['pixel_values'].to(model.device)
|
76 |
output = model.generate(**batch, max_new_tokens=500, temperature=0.3)
|
77 |
|
78 |
# Unload GPU
|
@@ -85,7 +85,7 @@ def text_to_image(image, prompt, duplications: float):
|
|
85 |
|
86 |
for text in generated_text:
|
87 |
text_output = text.split("ASSISTANT:")[-1]
|
88 |
-
text_embeddings = embedder.encode(text_output)
|
89 |
maurice_description.append(text_output)
|
90 |
maurice_embeddings.append(text_embeddings)
|
91 |
|
|
|
70 |
maurice_embeddings = list()
|
71 |
for batch in batched_inputs:
|
72 |
# Load on device
|
73 |
+
batch['input_ids'] = batch['input_ids'].to(model.device)
|
74 |
+
batch['attention_mask'] = batch['attention_mask'].to(model.device)
|
75 |
+
batch['pixel_values'] = batch['pixel_values'].to(model.device)
|
76 |
output = model.generate(**batch, max_new_tokens=500, temperature=0.3)
|
77 |
|
78 |
# Unload GPU
|
|
|
85 |
|
86 |
for text in generated_text:
|
87 |
text_output = text.split("ASSISTANT:")[-1]
|
88 |
+
text_embeddings = embedder.encode(text_output)
|
89 |
maurice_description.append(text_output)
|
90 |
maurice_embeddings.append(text_embeddings)
|
91 |
|