Spaces:
Sleeping
Sleeping
File size: 4,380 Bytes
c35600d 00bec19 c35600d 00bec19 8b1f0bb b38c52a 0745d65 8b1f0bb 00bec19 8b1f0bb b38c52a 00bec19 8b1f0bb c35600d 7204704 00bec19 b38c52a 00bec19 8b1f0bb 00bec19 7204704 00bec19 7204704 00bec19 7204704 00bec19 c35600d b38c52a 8b1f0bb c35600d 00bec19 59a0a73 00bec19 8b1f0bb 00bec19 8b1f0bb 00bec19 c35600d 00bec19 c35600d 863e408 00bec19 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
import gradio as gr
import os
import json
import requests
API_URL = "https://api.openai.com/v1/chat/completions"
def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[], history=[]):
narration_prompt = f"๋์์์ ์ฌ์ฉํ ์ ๋ฌธ์ ์ธ ๋๋ ์ด์
์ ์์ฑํ๋ผ. ๋ฐ๋์ ํ๊ธ๋ก ์์ฑํ ๊ฒ. ์ผ์ฒด์ ์ง๋ฌธ์ด๋ ์ง์, ๋ฐฐ๊ฒฝ ์ค๋ช
๋ฑ์ ๋
ธ์ถ ํ๊ฑฐ๋ ์ถ๋ ฅํ์ง ๋ง๊ณ ์์ํ ๋๋ ์ด์
๋ง 2์ค์ฉ ๋ฌถ์ด์ ์ต๋ 8์ค ์ด๋ด๋ก ์ถ๋ ฅ๋ ฅ. ์
๋ ฅ: '{inputs}'"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {openai_api_key}"
}
payload = {
"model": "gpt-4-1106-preview",
"messages": [{"role": "system", "content": narration_prompt}],
"temperature": temperature,
"top_p": top_p,
"n": 1,
"stream": True,
"presence_penalty": 0,
"frequency_penalty": 0,
"max_tokens": 1000
}
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
partial_words = ""
token_counter = 0
try:
for chunk in response.iter_lines():
if chunk:
try:
chunk_text = chunk.decode()
# JSON ํ์์ด ์ฌ๋ฐ๋ฅธ์ง ํ์ธ
if chunk_text.strip().startswith("{"):
chunk_data = json.loads(chunk_text) # JSON ํ์ฑ
# ... ๋๋จธ์ง ์ฝ๋ ...
else:
print("Unexpected format:", chunk_text)
except json.JSONDecodeError as e:
print("JSON ํ์ฑ ์ค๋ฅ:", e)
except Exception as e:
print("์๋ต ์ฒ๋ฆฌ ์ค๋ฅ:", e)
except Exception as e:
print("์ ์ฒด ์๋ต ์ฒ๋ฆฌ ์ค๋ฅ:", e)
return chatbot, history, chat_counter
# ๋๋จธ์ง ์ฝ๋ ๋ถ๋ถ (์ธํฐํ์ด์ค ์์ฑ, ์์ ์ถ๊ฐ ๋ฑ)์ ๊ทธ๋๋ก ์ ์ง๋ฉ๋๋ค.
# ์ฝ๋ ์คํ ๋ถ๋ถ
# ์: demo.launch() ๋ฑ
def reset_textbox():
return gr.update(value='')
title = """<h1 align='center'>ํ์ ์คํฌ๋ฆฝํธ</h1>"""
description = "์์ ์์ฑ์ ์ํ ์คํฌ๋ฆฝํธ๋ฅผ AI๊ฐ ์๋์ผ๋ก ์์ฑํฉ๋๋ค. ์ฃผ์ ํค์๋๋ ๋ชฉ์ ๋ฑ ํ์ํ ๋ด์ฉ๋ง ๊ฐ๋จํ ์
๋ ฅํ์ธ์."
with gr.Blocks(css="#col_container {width: 1000px; margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}") as demo:
gr.HTML(title)
with gr.Column(elem_id="col_container"):
openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here")
chatbot = gr.Chatbot(elem_id='chatbot')
inputs = gr.Textbox(placeholder="์ฌ๊ธฐ์ ์
๋ ฅํ์ธ์.", label="๋๋ ์ด์
์คํฌ๋ฆฝํธ๋ฅผ ์์ฑํ๊ณ ์ถ์ ์ฃผ์ ์ด๋ ๋ฌธ์ฅ์ ์
๋ ฅํ์ธ์.")
state = gr.State([])
b1 = gr.Button()
with gr.Accordion("Parameters", open=False):
top_p = gr.Slider(minimum=0, maximum=1.0, value=1.0, step=0.05, label="Top-p (nucleus sampling)")
temperature = gr.Slider(minimum=0, maximum=5.0, value=1.0, step=0.1, label="Temperature")
chat_counter = gr.Number(value=0, visible=False, precision=0)
examples = gr.Examples(examples=[
["์ํ ์ค๋ช
:์๋ก ์ถ์๋ 'ํ ๋ฆฌ' ๋ฆฝ๋ฐค์ FDA ์น์ธ, ์ต๊ณ ์ ๋ณด์ต๋ ฅ, ๊ตฌ๋งค์ง์ 1์"],
["๋ธ๋๋ฉ: 'ํ ๋ฆฌ'๋ฆฝ๋ฐค์ 20๋ ์ฌ์ฑ์๊ฒ ์ดํํ ๋ธ๋๋ฉ์ด ํ์ํด"],
["๊ด๊ณ : ์ค๋ ๋ถ๋ชจ๋๊ณผ ์น์ง ์ ๋ฌผ์ ๋ฒ์ฑํฌ ๋ณด๋ฆฌ๊ตด๋น '๋ฒ์ฑ๊ตด๋น'๊ฐ ์ต๊ณ ๋๋๋ค."],
["์ ๋ณด ๊ณต์ : ๋นํ๋ฏผC ๊ณผ๋ค ๋ณต์ฉ์ ๊ฑด๊ฐ์ ์คํ๋ ค ํด๋กญ๋ค."],
["ํ๋ณด: 'OpenAI'๋ '์ฑGPT'์ ๋ง์ถค GPT '์คํ ์ด'๋ฅผ ์คํํ์๋ค."],
["์ธ์ฌ: '์ ํ ๋ฒ์ธ'์ ๊ณ ๊ฐ๊ณผ ์์ง์์ ์ํ ์ง์ทจ์ ์ธ 2024๋
์ ๋
์ธ์ฌ"]
], inputs=[inputs], fn=predict, outputs=[chatbot, state, chat_counter])
inputs.submit(predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter])
b1.click(predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter])
b1.click(reset_textbox, [], [inputs])
inputs.submit(reset_textbox, [], [inputs])
demo.launch(debug=True)
|