Spaces:
Running
Running
File size: 2,316 Bytes
6a422c8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
from __future__ import annotations
import os
import re
import traceback
import uuid
from copy import deepcopy
import gradio as gr
import json
def stream_predict(
chatbot, # ChatBot
user_input, # Textbox
upload_image_url, # imageUrl
agent # agent
):
print(f'upload_image_url: {upload_image_url}')
if not user_input:
if len(history) == 0:
yield chatbot, '请输入问题……'
return
else:
user_input = chatbot[-1][0]
chatbot = chatbot[:-1]
yield chatbot, '重新生成回答……'
else:
print(upload_image_url)
if upload_image_url:
user_input = f'\n' + user_input
print('user_input:', user_input)
yield chatbot, '开始生成回答……'
chatbot.append((user_input, '处理中...'))
yield chatbot, '开始实时传输回答……'
response = ''
try:
for frame in agent.stream_run(user_input, remote=False):
is_final = frame.get('is_final')
llm_result = frame.get('llm_text', '')
exec_result = frame.get('exec_result', '')
error_message = frame.get('error')
if is_final:
chatbot[-1] = (chatbot[-1][0], response)
yield chatbot, '完成回答'
break
elif error_message:
chatbot[-1] = (chatbot[-1][0], error_message)
yield chatbot, ''
else:
if len(exec_result) != 0:
# llm_result
frame_text = f'\n\n<|startofexec|>{exec_result}<|endofexec|>\n'
else:
# action_exec_result
frame_text = llm_result
response = f'{response}{frame_text}'
chatbot[-1] = (chatbot[-1][0], response)
yield chatbot, ''
except Exception:
traceback.print_exc()
chatbot[-1] = (chatbot[-1][0], 'chat_async error.')
yield chatbot, ''
def upload_image(file):
gr_file_path = f'./file={file.name}'
return [
gr.HTML.update(
f"<div class=\"uploaded-image-box\"><img src={gr_file_path}></img><div>",
visible=True), gr_file_path
]
|