Eaglebelt commited on
Commit
23a2028
·
verified ·
1 Parent(s): 24215c3

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -19
app.py CHANGED
@@ -1,27 +1,38 @@
1
  import os
2
  import io
3
- import PIL.Image # Workaround for PIL/Gradio bug :contentReference[oaicite:13]{index=13}
4
  import gradio as gr
5
  from gradio_client import Client, handle_file
6
 
 
7
  from numpy import array
8
  # 1. Load your HF token from env
9
  HF_TOKEN = os.getenv("HF_TOKEN") # export HF_TOKEN="hf_..."
10
  # 1) Connect to the Leffa Gradio app’s predict endpoint
11
  # Use the full "/call/predict" API path as shown on the View API page
12
- client = Client("franciszzj/Leffa", hf_token=HF_TOKEN, ) # Gradio Python client
 
 
 
13
 
14
- def virtual_tryon(person_path, garment_path):
15
- # 2) Wrap file inputs so Gradio client uploads them correctly
16
- person_file = handle_file(person_path) # handle_file uploads the image :contentReference[oaicite:6]{index=6}
 
 
 
 
 
 
 
17
  garment_file = handle_file(garment_path)
18
 
19
- # 3) Build inputs in the exact order shown on the “Use via API” page :contentReference[oaicite:7]{index=7}
20
 
21
- # 4) Call the named endpoint with handle_file inputs
22
  result = client.predict(
23
- person_file, # Person Image
24
- garment_file, # Garment Image
25
  ref_acceleration=False,
26
  step=30,
27
  scale=2.5,
@@ -29,22 +40,38 @@ def virtual_tryon(person_path, garment_path):
29
  vt_model_type="viton_hd",
30
  vt_garment_type="upper_body",
31
  vt_repaint=False,
32
- api_name="/leffa_predict_vt"
33
- )
34
- # result[0] is the generated image filepath on the server
35
  return result[0] # Gradio will download & display this file
36
 
37
  # 5) Gradio UI
 
 
38
  with gr.Blocks() as demo:
39
- gr.Markdown("## V_TRY DEMO")
40
  with gr.Row():
41
  src = gr.Image(sources="upload", type="filepath", label="Person Image")
42
- ref = gr.Image(sources="upload", type="filepath", label="Garment Image")
 
 
 
 
 
 
 
 
 
43
  with gr.Column():
44
- out = gr.Image(type="filepath", label="Result", )
 
 
 
45
  btn = gr.Button("Generate")
46
- btn.click(virtual_tryon, [src, ref], out)
 
 
 
 
 
 
47
 
48
- demo.launch(share=True,
49
- show_error=True,
50
- pwa=True,)
 
1
  import os
2
  import io
3
+ import PIL.Image # Workaround for PIL/Gradio bug :contentReference[oaicite:13]{index=13}
4
  import gradio as gr
5
  from gradio_client import Client, handle_file
6
 
7
+ from gradio_client.client import re
8
  from numpy import array
9
  # 1. Load your HF token from env
10
  HF_TOKEN = os.getenv("HF_TOKEN") # export HF_TOKEN="hf_..."
11
  # 1) Connect to the Leffa Gradio app’s predict endpoint
12
  # Use the full "/call/predict" API path as shown on the View API page
13
+ client = Client(
14
+ "franciszzj/Leffa",
15
+ hf_token=HF_TOKEN,
16
+ ) # Gradio Python client
17
 
18
+
19
+ def virtual_tryon(
20
+ person_path,
21
+ garment_path,
22
+ vt_garment_type="upper_body",
23
+ ):
24
+ # 2) Wrap file inputs so Gradio client uploads them correctly
25
+ person_file = handle_file(
26
+ person_path
27
+ ) # handle_file uploads the image :contentReference[oaicite:6]{index=6}
28
  garment_file = handle_file(garment_path)
29
 
30
+ # 3) Build inputs in the exact order shown on the “Use via API” page :contentReference[oaicite:7]{index=7}
31
 
32
+ # 4) Call the named endpoint with handle_file inputs
33
  result = client.predict(
34
+ person_file, # Person Image
35
+ garment_file, # Garment Image
36
  ref_acceleration=False,
37
  step=30,
38
  scale=2.5,
 
40
  vt_model_type="viton_hd",
41
  vt_garment_type="upper_body",
42
  vt_repaint=False,
43
+ api_name="/leffa_predict_vt")
44
+ # result[0] is the generated image filepath on the server
 
45
  return result[0] # Gradio will download & display this file
46
 
47
  # 5) Gradio UI
48
+
49
+
50
  with gr.Blocks() as demo:
51
+ gr.Markdown("## Leffa Virtual Try-On")
52
  with gr.Row():
53
  src = gr.Image(sources="upload", type="filepath", label="Person Image")
54
+ ref = gr.Image(sources="upload",
55
+ type="filepath",
56
+ label="Garment Image")
57
+ vt_garment_type = gr.Radio(
58
+ label="Garment Type",
59
+ choices=[("Upper", "upper_body"), ("Lower", "lower_body"),
60
+ ("Dress", "dresses")],
61
+ value="upper_body",
62
+ )
63
+
64
  with gr.Column():
65
+ out = gr.Image(
66
+ type="filepath",
67
+ label="Result",
68
+ )
69
  btn = gr.Button("Generate")
70
+ btn.click(virtual_tryon, [src, ref, vt_garment_type], out)
71
+
72
+ demo.launch(
73
+ share=True,
74
+ show_error=True,
75
+ pwa=True,
76
+ )
77