aps commited on
Commit
657fc3f
·
0 Parent(s):

Init commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ unsplash-dataset/features.npy filter=lfs diff=lfs merge=lfs -text
29
+ unsplash-dataset filter=lfs diff=lfs merge=lfs -text
30
+ styles filter=lfs diff=lfs merge=lfs -text
31
+ styles/starry.jpeg filter=lfs diff=lfs merge=lfs -text
32
+ styles/mona1.jpeg filter=lfs diff=lfs merge=lfs -text
33
+ unsplash-dataset/photos.csv filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ gradio_queue.db
README.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: FLAVA Neural Style Transfer
3
+ emoji: 🔥
4
+ colorFrom: pink
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ app_file: app.py
8
+ pinned: true
9
+ ---
10
+
11
+ # Configuration
12
+
13
+ `title`: _string_
14
+ Display title for the Space
15
+
16
+ `emoji`: _string_
17
+ Space emoji (emoji-only character allowed)
18
+
19
+ `colorFrom`: _string_
20
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
+
22
+ `colorTo`: _string_
23
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
+
25
+ `sdk`: _string_
26
+ Can be either `gradio` or `streamlit`
27
+
28
+ `sdk_version` : _string_
29
+ Only applicable for `streamlit` SDK.
30
+ See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
+
32
+ `app_file`: _string_
33
+ Path to your main application file (which contains either `gradio` or `streamlit` Python code).
34
+ Path is relative to the root of the repository.
35
+
36
+ `pinned`: _boolean_
37
+ Whether the Space stays on top of your list.
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from io import BytesIO
3
+ import requests
4
+ from datetime import datetime
5
+ import random
6
+
7
+ # Interface utilities
8
+ import gradio as gr
9
+
10
+ # Data utilities
11
+ import numpy as np
12
+ import pandas as pd
13
+
14
+ # Image utilities
15
+ from PIL import Image
16
+ import cv2
17
+
18
+ # FLAVA Model
19
+ import torch
20
+ from transformers import BertTokenizer, FlavaModel
21
+
22
+ # Style Transfer Model
23
+ import paddlehub as hub
24
+
25
+
26
+
27
+ os.system("hub install stylepro_artistic==1.0.1")
28
+ stylepro_artistic = hub.Module(name="stylepro_artistic")
29
+
30
+
31
+
32
+ # FLAVA Model
33
+ device = "cuda" if torch.cuda.is_available() else "cpu"
34
+ model = FlavaModel.from_pretrained("facebook/flava-full")
35
+ tokenizer = BertTokenizer.from_pretrained("facebook/flava-full")
36
+ model = model.to(device)
37
+
38
+ # Load Data
39
+ photo_features = np.load("unsplash-dataset/features.npy")
40
+ photo_data = pd.read_csv("unsplash-dataset/photos.csv")
41
+
42
+ def image_from_text(text_input):
43
+ start=datetime.now()
44
+
45
+ ## Inference
46
+ with torch.no_grad():
47
+ inputs = tokenizer([text_input], padding=True, return_tensors="pt").to(device)
48
+ text_features = model.get_text_features(**inputs)[:, 0, :].cpu().numpy()
49
+
50
+ ## Find similarity
51
+ similarities = list((text_features @ photo_features.T).squeeze(0))
52
+
53
+ ## Return best image :)
54
+ idx = sorted(zip(similarities, range(photo_features.shape[0])), key=lambda x: x[0], reverse=True)[0][1]
55
+ photo = photo_data.iloc[idx]
56
+
57
+ print(f"Time spent at FLAVA: {datetime.now()-start}")
58
+
59
+ start=datetime.now()
60
+ # Downlaod image
61
+ response = requests.get(photo["path"])
62
+ pil_image = Image.open(BytesIO(response.content)).convert("RGB")
63
+ open_cv_image = np.array(pil_image)
64
+ # Convert RGB to BGR
65
+ open_cv_image = open_cv_image[:, :, ::-1].copy()
66
+
67
+ print(f"Time spent at Image request: {datetime.now()-start}")
68
+
69
+ return open_cv_image
70
+
71
+ def inference(content, style):
72
+ content_image = image_from_text(content)
73
+ start=datetime.now()
74
+
75
+ result = stylepro_artistic.style_transfer(
76
+ images=[{
77
+ "content": content_image,
78
+ "styles": [cv2.imread(style.name)]
79
+ }])
80
+
81
+ print(f"Time spent at Style Transfer: {datetime.now()-start}")
82
+ return Image.fromarray(np.uint8(result[0]["data"])[:,:,::-1]).convert("RGB")
83
+
84
+ if __name__ == "__main__":
85
+ title = "FLAVA Neural Style Transfer"
86
+ description = "Gradio demo for Neural Style Transfer. Inspired from <a href='https://huggingface.co/spaces/WaterKnight/neural-style-transfer'>this demo for CLIP</a>. To use it, simply enter the text for image content and upload style image. Read more at the links below."
87
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2003.07694'target='_blank'>Parameter-Free Style Projection for Arbitrary Style Transfer</a> | <a href='https://github.com/PaddlePaddle/PaddleHub' target='_blank'>Github Repo</a></br><a href='https://arxiv.org/abs/2112.04482' target='_blank'>FLAVA paper</a> | <a href='https://huggingface.co/transformers/model_doc/flava.html' target='_blank'>Hugging Face FLAVA Implementation</a></p>"
88
+ examples=[
89
+ ["a cute kangaroo", "styles/starry.jpeg"],
90
+ ["man holding beer", "styles/mona1.jpeg"],
91
+ ]
92
+ demo = gr.Interface(inference,
93
+ inputs=[
94
+ gr.inputs.Textbox(lines=1, placeholder="Describe the content of the image", default="a cute kangaroo", label="Describe the image to which the style will be applied"),
95
+ gr.inputs.Image(type="file", label="Style to be applied"),
96
+ ],
97
+ outputs=gr.outputs.Image(type="pil"),
98
+ enable_queue=True,
99
+ title=title,
100
+ description=description,
101
+ article=article,
102
+ examples=examples
103
+ )
104
+ demo.launch(share=True)
packages.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ffmpeg
2
+ libsm6
3
+ libxext6
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ paddlepaddle
2
+ paddlehub
3
+ transformers
4
+ torch
styles/mona1.jpeg ADDED

Git LFS Details

  • SHA256: dc8c1ade58e729a21ad58384d9054ae8f7478deef22a758b0a741122649da797
  • Pointer size: 131 Bytes
  • Size of remote file: 598 kB
styles/starry.jpeg ADDED

Git LFS Details

  • SHA256: 0191ce372cd452a043e69385079813f7a13d13e5e22f05b8506d9b36982e758f
  • Pointer size: 132 Bytes
  • Size of remote file: 1.36 MB
unsplash-dataset/features.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17f7b7a1f297f314f3728eb50e16a18780263fa9ec99b8286c58c5fb4b6853df
3
+ size 153354368
unsplash-dataset/photos.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ad7fe00e233cf6c210083e7f3497927b322dfb8d194152be126b8574e428056
3
+ size 3860383