fthor commited on
Commit
854f0cf
·
1 Parent(s): dacd4b7

added embeddings

Browse files
Files changed (1) hide show
  1. app.py +11 -7
app.py CHANGED
@@ -3,11 +3,15 @@ import torch
3
  from transformers import AutoProcessor, LlavaForConditionalGeneration
4
  from transformers import BitsAndBytesConfig
5
 
 
 
6
  quantization_config = BitsAndBytesConfig(
7
  load_in_4bit=True,
8
  bnb_4bit_compute_dtype=torch.float16
9
  )
10
 
 
 
11
  model_id = "llava-hf/llava-1.5-7b-hf"
12
 
13
  processor = AutoProcessor.from_pretrained(model_id)
@@ -22,13 +26,13 @@ def text_to_image(image, prompt):
22
  prompt = f'USER: <image>\n{prompt}\nASSISTANT:'
23
 
24
  inputs = processor([prompt], images=[image], padding=True, return_tensors="pt").to(model.device)
25
- for k, v in inputs.items():
26
- print(k, v.shape)
27
- print(inputs)
28
- output = model.generate(**inputs, max_new_tokens=100)
29
  generated_text = processor.batch_decode(output, skip_special_tokens=True)
30
- for text in generated_text:
31
- return text.split("ASSISTANT:")[-1]
 
 
 
32
 
33
 
34
  demo = gr.Interface(
@@ -37,7 +41,7 @@ demo = gr.Interface(
37
  gr.Image(label='Select an image to analyze', type='pil'),
38
  gr.Textbox(label='Enter Prompt')
39
  ],
40
- outputs=gr.Textbox(label='Maurice says:')
41
  )
42
 
43
  if __name__ == "__main__":
 
3
  from transformers import AutoProcessor, LlavaForConditionalGeneration
4
  from transformers import BitsAndBytesConfig
5
 
6
+ from sentence_transformers import SentenceTransformer, util
7
+
8
  quantization_config = BitsAndBytesConfig(
9
  load_in_4bit=True,
10
  bnb_4bit_compute_dtype=torch.float16
11
  )
12
 
13
+ embedder = SentenceTransformer('all-mpnet-base-v2')
14
+
15
  model_id = "llava-hf/llava-1.5-7b-hf"
16
 
17
  processor = AutoProcessor.from_pretrained(model_id)
 
26
  prompt = f'USER: <image>\n{prompt}\nASSISTANT:'
27
 
28
  inputs = processor([prompt], images=[image], padding=True, return_tensors="pt").to(model.device)
29
+ output = model.generate(**inputs, max_new_tokens=500)
 
 
 
30
  generated_text = processor.batch_decode(output, skip_special_tokens=True)
31
+ text = generated_text.pop()
32
+ text_output = text.split("ASSISTANT:")[-1]
33
+ text_embeddings = embedder.encode(text_output)
34
+
35
+ return text_output, dict(text_embeddings=text_embeddings)
36
 
37
 
38
  demo = gr.Interface(
 
41
  gr.Image(label='Select an image to analyze', type='pil'),
42
  gr.Textbox(label='Enter Prompt')
43
  ],
44
+ outputs=[gr.Textbox(label='Maurice says:'), gr.JSON(label='Embedded text')]
45
  )
46
 
47
  if __name__ == "__main__":