Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -20,6 +20,7 @@ logging.getLogger("http").setLevel(logging.WARNING)
|
|
20 |
logging.getLogger("httpx").setLevel(logging.WARNING)
|
21 |
|
22 |
import gradio as gr
|
|
|
23 |
|
24 |
from conversation import default_conversation, conv_templates, SeparatorStyle
|
25 |
|
@@ -239,7 +240,7 @@ def stream_response(model, inputs, streamer, prompt, gen_kwargs):
|
|
239 |
yield generated_text
|
240 |
|
241 |
|
242 |
-
|
243 |
def http_chat_bot(state, temperature, top_k, top_p, max_new_tokens):
|
244 |
global model, args, streamer # Use global model and args
|
245 |
logging.info("http_chat_bot.")
|
@@ -317,6 +318,7 @@ def http_chat_bot(state, temperature, top_k, top_p, max_new_tokens):
|
|
317 |
return (state, state.to_gradio_chatbot()) + (enable_btn,) * 2
|
318 |
|
319 |
|
|
|
320 |
def http_gen_edit_bot(state, temperature, top_k, top_p, image_gen_temperature,
|
321 |
image_gen_top_k, image_gen_top_p, max_output_tokens,
|
322 |
llm_cfg_scale, resolution_wh, use_diffusion, diffusion_cfg_scale, diffusion_num_inference_steps):
|
|
|
20 |
logging.getLogger("httpx").setLevel(logging.WARNING)
|
21 |
|
22 |
import gradio as gr
|
23 |
+
import spaces
|
24 |
|
25 |
from conversation import default_conversation, conv_templates, SeparatorStyle
|
26 |
|
|
|
240 |
yield generated_text
|
241 |
|
242 |
|
243 |
+
@spaces.GPU
|
244 |
def http_chat_bot(state, temperature, top_k, top_p, max_new_tokens):
|
245 |
global model, args, streamer # Use global model and args
|
246 |
logging.info("http_chat_bot.")
|
|
|
318 |
return (state, state.to_gradio_chatbot()) + (enable_btn,) * 2
|
319 |
|
320 |
|
321 |
+
@spaces.GPU
|
322 |
def http_gen_edit_bot(state, temperature, top_k, top_p, image_gen_temperature,
|
323 |
image_gen_top_k, image_gen_top_p, max_output_tokens,
|
324 |
llm_cfg_scale, resolution_wh, use_diffusion, diffusion_cfg_scale, diffusion_num_inference_steps):
|