File size: 2,453 Bytes
87daab4 22d26a9 f3ffe2b 87daab4 22d26a9 f3ffe2b 87daab4 f3ffe2b 87daab4 22d26a9 87daab4 f3ffe2b 87daab4 22d26a9 fdf4060 22d26a9 fdf4060 87daab4 551db13 22d26a9 551db13 22d26a9 551db13 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import gradio as gr
import matplotlib.pyplot as plt
import io
import numpy as np
from PIL import Image
import requests
import json
import re
# 将图像转换为 base64,以便在 gradio 中显示
def get_image_data(fig):
buf = io.BytesIO()
fig.savefig(buf, format='PNG')
buf.seek(0)
img = Image.open(buf)
return img
# 执行 Python 代码并生成图像
def execute_code(code):
namespace = {}
exec(code, namespace)
fig = namespace.get('fig') # Assume the code generates a matplotlib figure named 'fig'
if fig:
return get_image_data(fig)
else:
raise ValueError("The code did not generate a matplotlib figure named 'fig'")
def gpt_inference(base_url, model, openai_key, prompt):
newprompt = f'Write Python code that does the following: \n\n{prompt}\n\nNote, the code is going to be executed in a Jupyter Python kernel.\n\nLast instruction, and this is the most important, just return code. No other outputs, as your full response will directly be executed in the kernel.'
data = {
"model": model,
"messages": [
{
"role": "user",
"content": newprompt
}
],
"temperature": 0.7,
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {openai_key}",
}
response = requests.post(f"{base_url}/v1/chat/completions", headers=headers, data=json.dumps(data))
def extract_code(text):
# Match triple backtick blocks first
triple_match = re.search(r'```(?:\w+\n)?(.+?)```', text, re.DOTALL)
if triple_match:
return triple_match.group(1).strip()
else:
# If no triple backtick blocks, match single backtick blocks
single_match = re.search(r'`(.+?)`', text, re.DOTALL)
if single_match:
return single_match.group(1).strip()
# If no code blocks found, return original text
return text
if response.status_code != 200:
return "Error: " + response.text, 500
code = extract_code(response.json()["choices"][0]["message"]["content"])
img = execute_code(code)
return img
iface = gr.Interface(
fn=gpt_inference,
inputs=["text", gr.inputs.Dropdown(choices=["gpt3.5-turbo", "gpt4"], label="Model"), "text", "text"],
outputs=gr.outputs.Image(type="pil"),
input_labels=["Base URL", "Model", "OpenAI Key","Prompt"]
)
iface.launch() |