redo62 mouaddb commited on
Commit
2b83fb8
·
0 Parent(s):

Duplicate from mouaddb/image2text-comp

Browse files

Co-authored-by: D <[email protected]>

Files changed (10) hide show
  1. .gitattributes +40 -0
  2. README.md +14 -0
  3. app.py +65 -0
  4. requirements.txt +2 -0
  5. test-1.jpeg +0 -0
  6. test-2.jpeg +0 -0
  7. test-3.jpeg +0 -0
  8. test-4.jpeg +0 -0
  9. test-5.jpeg +0 -0
  10. test-6.jpg +3 -0
.gitattributes ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ test-1.jpg filter=lfs diff=lfs merge=lfs -text
36
+ test-3.jpg filter=lfs diff=lfs merge=lfs -text
37
+ test-4.jpg filter=lfs diff=lfs merge=lfs -text
38
+ test-5.jpg filter=lfs diff=lfs merge=lfs -text
39
+ test-6.jpg filter=lfs diff=lfs merge=lfs -text
40
+ test-7.jpg filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Image2text Comp
3
+ emoji: 🐠
4
+ colorFrom: purple
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 3.28.2
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: mouaddb/image2text-comp
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoProcessor, AutoTokenizer, AutoImageProcessor, AutoModelForCausalLM, BlipForConditionalGeneration, VisionEncoderDecoderModel
3
+ import torch
4
+
5
+ git_processor_base = AutoProcessor.from_pretrained("microsoft/git-base-coco")
6
+ git_model_base = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
7
+
8
+ git_processor_large = AutoProcessor.from_pretrained("microsoft/git-large-coco")
9
+ git_model_large = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
10
+
11
+ blip_processor_base = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
12
+ blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
13
+
14
+ blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
15
+ blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
16
+
17
+ vitgpt_processor = AutoImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
18
+ vitgpt_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
19
+ vitgpt_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
20
+
21
+ device = "cuda" if torch.cuda.is_available() else "cpu"
22
+
23
+ git_model_base.to(device)
24
+ blip_model_base.to(device)
25
+ git_model_large.to(device)
26
+ blip_model_large.to(device)
27
+ vitgpt_model.to(device)
28
+
29
+ def generate_caption(processor, model, image, tokenizer=None):
30
+ inputs = processor(images=image, return_tensors="pt").to(device)
31
+
32
+ generated_ids = model.generate(pixel_values=inputs.pixel_values, max_length=50)
33
+
34
+ if tokenizer is not None:
35
+ generated_caption = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
36
+ else:
37
+ generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
38
+
39
+ return generated_caption
40
+
41
+
42
+ def generate_captions(image):
43
+ caption_git_base = generate_caption(git_processor_base, git_model_base, image)
44
+
45
+ caption_git_large = generate_caption(git_processor_large, git_model_large, image)
46
+
47
+ caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image)
48
+
49
+ caption_blip_large = generate_caption(blip_processor_large, blip_model_large, image)
50
+
51
+ caption_vitgpt = generate_caption(vitgpt_processor, vitgpt_model, image, vitgpt_tokenizer)
52
+
53
+ return caption_git_base, caption_git_large, caption_blip_base, caption_blip_large, caption_vitgpt
54
+
55
+
56
+ examples = [["test-1.jpeg"], ["test-2.jpeg"], ["test-3.jpeg"], ["test-4.jpeg"], ["test-5.jpeg"], ["test-6.jpg"]]
57
+ outputs = [gr.outputs.Textbox(label="Caption generated by GIT-base"), gr.outputs.Textbox(label="Caption generated by GIT-large"), gr.outputs.Textbox(label="Caption generated by BLIP-base"), gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by ViT+GPT-2")]
58
+
59
+
60
+ interface = gr.Interface(fn=generate_captions,
61
+ inputs=gr.inputs.Image(type="pil"),
62
+ outputs=outputs,
63
+ examples=examples,
64
+ enable_queue=True)
65
+ interface.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ git+https://github.com/huggingface/transformers.git
2
+ torch
test-1.jpeg ADDED
test-2.jpeg ADDED
test-3.jpeg ADDED
test-4.jpeg ADDED
test-5.jpeg ADDED
test-6.jpg ADDED

Git LFS Details

  • SHA256: cc7d664dca740b71bc13345ece39b3017a791af8b3f082bcb0148fe032c2235f
  • Pointer size: 131 Bytes
  • Size of remote file: 675 kB