Update app.py
Browse files
app.py
CHANGED
@@ -1,103 +1,82 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
|
36 |
-
|
37 |
|
38 |
-
|
39 |
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
#
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
from fastapi import FastAPI
|
85 |
-
import gradio as gr
|
86 |
-
|
87 |
-
CUSTOM_PATH = "/gradio"
|
88 |
-
|
89 |
-
app = FastAPI()
|
90 |
-
|
91 |
-
@app.get("/")
|
92 |
-
def read_main():
|
93 |
-
return {"message": "This is your main app"}
|
94 |
-
|
95 |
-
# 新增的 /test 路由
|
96 |
-
@app.get("/test")
|
97 |
-
def test_api():
|
98 |
-
return {"message": "This is the test API"}
|
99 |
-
|
100 |
-
io = gr.Interface(lambda x: "Hello, " + x + "!", "textbox", "textbox")
|
101 |
-
app = gr.mount_gradio_app(app, io, path=CUSTOM_PATH)
|
102 |
-
|
103 |
-
# 运行这个 FastAPI 应用:`uvicorn run:app`,并访问 http://localhost:8000/gradio 以及 http://localhost:8000/test
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import spaces
|
3 |
+
from PIL import Image
|
4 |
+
import torch
|
5 |
+
from transformers import AutoModelForCausalLM, AutoProcessor
|
6 |
+
|
7 |
+
|
8 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
9 |
+
|
10 |
+
model = AutoModelForCausalLM.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen-v1.5", trust_remote_code=True).to(device)
|
11 |
+
processor = AutoProcessor.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen-v1.5", trust_remote_code=True)
|
12 |
+
|
13 |
+
@spaces.GPU(duration=180)
|
14 |
+
def infer(prompt, image):
|
15 |
+
max_size = 256
|
16 |
+
width, height = image.size
|
17 |
+
if width > height:
|
18 |
+
new_width = max_size
|
19 |
+
new_height = int((new_width / width) * height)
|
20 |
+
else:
|
21 |
+
new_height = max_size
|
22 |
+
new_width = int((new_height / height) * width)
|
23 |
+
|
24 |
+
image = image.resize((new_width, new_height), Image.LANCZOS)
|
25 |
+
|
26 |
+
inputs = processor(text=prompt, images=image, return_tensors="pt").to(device)
|
27 |
+
|
28 |
+
generated_ids = model.generate(
|
29 |
+
input_ids=inputs["input_ids"],
|
30 |
+
pixel_values=inputs["pixel_values"],
|
31 |
+
max_new_tokens=1024,
|
32 |
+
do_sample=False,
|
33 |
+
num_beams=3
|
34 |
+
)
|
35 |
|
36 |
+
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
|
37 |
|
38 |
+
parsed_answer = processor.post_process_generation(generated_text, task=prompt, image_size=(image.width, image.height))
|
39 |
|
40 |
+
return parsed_answer
|
41 |
+
|
42 |
+
css = """
|
43 |
+
#col-container {
|
44 |
+
margin: 0 auto;
|
45 |
+
max-width: 800px;
|
46 |
+
}
|
47 |
+
"""
|
48 |
+
|
49 |
+
with gr.Blocks(css=css) as app:
|
50 |
+
with gr.Column(elem_id="col-container"):
|
51 |
+
gr.Markdown(f"""# Tag The Image
|
52 |
+
Get tag based on images using the Florence-2-base-PromptGen-v1.5 model.
|
53 |
+
""")
|
54 |
+
|
55 |
+
with gr.Row():
|
56 |
+
prompt = gr.Text(
|
57 |
+
label="Prompt",
|
58 |
+
show_label=False,
|
59 |
+
max_lines=1,
|
60 |
+
placeholder="Enter your prompt or blank here.",
|
61 |
+
container=False,
|
62 |
+
)
|
63 |
+
image_input = gr.Image(
|
64 |
+
label="Image",
|
65 |
+
type="pil",
|
66 |
+
show_label=False,
|
67 |
+
container=False,
|
68 |
+
)
|
69 |
+
run_button = gr.Button("Run", scale=0)
|
70 |
+
|
71 |
+
result = gr.Textbox(label="Generated Text", show_label=False)
|
72 |
+
|
73 |
+
|
74 |
+
gr.on(
|
75 |
+
triggers=[run_button.click, prompt.submit],
|
76 |
+
fn=infer,
|
77 |
+
inputs=[prompt, image_input],
|
78 |
+
outputs=[result]
|
79 |
+
)
|
80 |
+
|
81 |
+
app.queue()
|
82 |
+
app.launch(show_error=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|