Spaces:
Runtime error
Runtime error
lionelgarnier
commited on
Commit
·
08f5d28
1
Parent(s):
e69d279
test pre integrate mistral
Browse files- app.py +53 -4
- requirements.txt +2 -0
app.py
CHANGED
@@ -4,6 +4,10 @@ import random
|
|
4 |
import spaces
|
5 |
import torch
|
6 |
from diffusers import DiffusionPipeline
|
|
|
|
|
|
|
|
|
7 |
|
8 |
dtype = torch.bfloat16
|
9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
@@ -44,9 +48,8 @@ css="""
|
|
44 |
with gr.Blocks(css=css) as demo:
|
45 |
|
46 |
with gr.Column(elem_id="col-container"):
|
47 |
-
gr.Markdown(f"""#
|
48 |
-
|
49 |
-
[[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-schnell)]
|
50 |
""")
|
51 |
|
52 |
with gr.Row():
|
@@ -61,9 +64,55 @@ with gr.Blocks(css=css) as demo:
|
|
61 |
|
62 |
run_button = gr.Button("Run", scale=0)
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
result = gr.Image(label="Result", show_label=False)
|
65 |
|
66 |
-
with gr.Accordion("Advanced Settings", open=False):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
seed = gr.Slider(
|
69 |
label="Seed",
|
|
|
4 |
import spaces
|
5 |
import torch
|
6 |
from diffusers import DiffusionPipeline
|
7 |
+
from huggingface_hub import InferenceClient
|
8 |
+
|
9 |
+
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
10 |
+
|
11 |
|
12 |
dtype = torch.bfloat16
|
13 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
48 |
with gr.Blocks(css=css) as demo:
|
49 |
|
50 |
with gr.Column(elem_id="col-container"):
|
51 |
+
gr.Markdown(f"""# Text to Product
|
52 |
+
Using Mistral + Flux + Trellis
|
|
|
53 |
""")
|
54 |
|
55 |
with gr.Row():
|
|
|
64 |
|
65 |
run_button = gr.Button("Run", scale=0)
|
66 |
|
67 |
+
refined_prompt = gr.Text(
|
68 |
+
label="Refined Prompt",
|
69 |
+
show_label=False,
|
70 |
+
max_lines=10,
|
71 |
+
placeholder="Prompt refined by Mistral",
|
72 |
+
container=False
|
73 |
+
)
|
74 |
+
|
75 |
result = gr.Image(label="Result", show_label=False)
|
76 |
|
77 |
+
with gr.Accordion("Advanced Settings istral", open=False):
|
78 |
+
gr.Slider(
|
79 |
+
label="Temperature",
|
80 |
+
value=0.9,
|
81 |
+
minimum=0.0,
|
82 |
+
maximum=1.0,
|
83 |
+
step=0.05,
|
84 |
+
interactive=True,
|
85 |
+
info="Higher values produce more diverse outputs",
|
86 |
+
),
|
87 |
+
gr.Slider(
|
88 |
+
label="Max new tokens",
|
89 |
+
value=256,
|
90 |
+
minimum=0,
|
91 |
+
maximum=1048,
|
92 |
+
step=64,
|
93 |
+
interactive=True,
|
94 |
+
info="The maximum numbers of new tokens",
|
95 |
+
),
|
96 |
+
gr.Slider(
|
97 |
+
label="Top-p (nucleus sampling)",
|
98 |
+
value=0.90,
|
99 |
+
minimum=0.0,
|
100 |
+
maximum=1,
|
101 |
+
step=0.05,
|
102 |
+
interactive=True,
|
103 |
+
info="Higher values sample more low-probability tokens",
|
104 |
+
),
|
105 |
+
gr.Slider(
|
106 |
+
label="Repetition penalty",
|
107 |
+
value=1.2,
|
108 |
+
minimum=1.0,
|
109 |
+
maximum=2.0,
|
110 |
+
step=0.05,
|
111 |
+
interactive=True,
|
112 |
+
info="Penalize repeated tokens",
|
113 |
+
)
|
114 |
+
|
115 |
+
with gr.Accordion("Advanced Settings Flux", open=False):
|
116 |
|
117 |
seed = gr.Slider(
|
118 |
label="Seed",
|
requirements.txt
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
--extra-index-url https://download.pytorch.org/whl/cu121
|
2 |
|
|
|
|
|
3 |
torch==2.4.0
|
4 |
torchvision==0.19.0
|
5 |
pillow==10.4.0
|
|
|
1 |
--extra-index-url https://download.pytorch.org/whl/cu121
|
2 |
|
3 |
+
huggingface_hub
|
4 |
+
mistral_inference
|
5 |
torch==2.4.0
|
6 |
torchvision==0.19.0
|
7 |
pillow==10.4.0
|