SolidUI / app.py
dlimeng's picture
Update app.py
1f1a2d1
raw
history blame
2.64 kB
import gradio as gr
import matplotlib.pyplot as plt
import io
import numpy as np
from PIL import Image
import requests
import json
import re
# 执行 Python 代码并生成图像
def execute_code(code):
namespace = {}
exec(code, namespace)
fig = namespace.get('fig') # Assume the code generates a matplotlib figure named 'fig'
if fig:
img = get_image_data(fig)
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format='PNG')
img_byte_arr = img_byte_arr.getvalue()
img_b64 = base64.b64encode(img_byte_arr).decode('utf-8')
return img_b64
else:
raise ValueError("The code did not generate a matplotlib figure named 'fig'")
def gpt_inference(base_url, model, openai_key, prompt):
newprompt = f'Write Python code that does the following: \n\n{prompt}\n\nNote, the code is going to be executed in a Jupyter Python kernel. The code should create a matplotlib figure and assign it to a variable named "fig". The "fig" variable will be used for further processing.\n\nLast instruction, and this is the most important, just return code. No other outputs, as your full response will directly be executed in the kernel.'
data = {
"model": model,
"messages": [
{
"role": "user",
"content": newprompt
}
],
"temperature": 0.7,
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {openai_key}",
}
response = requests.post(f"{base_url}/v1/chat/completions", headers=headers, data=json.dumps(data))
def extract_code(text):
# Match triple backtick blocks first
triple_match = re.search(r'```(?:\w+\n)?(.+?)```', text, re.DOTALL)
if triple_match:
return triple_match.group(1).strip()
else:
# If no triple backtick blocks, match single backtick blocks
single_match = re.search(r'`(.+?)`', text, re.DOTALL)
if single_match:
return single_match.group(1).strip()
# If no code blocks found, return original text
return text
if response.status_code != 200:
return "Error: " + response.text, 500
code = extract_code(response.json()["choices"][0]["message"]["content"])
print(f"code:{code}")
img = execute_code(code)
return img
iface = gr.Interface(
fn=gpt_inference,
inputs=["text", gr.inputs.Dropdown(choices=["gpt3.5-turbo", "gpt4"], label="Model"), "text", "text"],
outputs=gr.Image(),
input_labels=["Base URL", "Model", "OpenAI Key","Prompt"]
)
iface.launch()