seawolf2357 commited on
Commit
00bec19
ยท
verified ยท
1 Parent(s): 811d879

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -122
app.py CHANGED
@@ -1,19 +1,18 @@
1
  import gradio as gr
2
- import os
3
- import json
4
  import requests
5
 
6
- #Streaming endpoint
7
- API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
8
-
9
- #Testing with my Open AI Key
10
- OPENAI_API_KEY = os.getenv("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")
11
 
12
  def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[], history=[]):
13
-
14
- # ์‚ฌ์šฉ์ž์˜ ์ž…๋ ฅ์„ ๋‚˜๋ ˆ์ด์…˜ ์Šคํƒ€์ผ์˜ ํ”„๋กฌํ”„ํŠธ๋กœ ๋ณ€ํ™˜
15
  narration_prompt = f"๋™์˜์ƒ์— ์‚ฌ์šฉํ•  ์ „๋ฌธ์ ์ธ ๋‚˜๋ ˆ์ด์…˜์„ ์ž‘์„ฑํ•˜๋ผ. ๋ฐ˜๋“œ์‹œ ํ•œ๊ธ€๋กœ ์ž‘์„ฑํ• ๊ฒƒ. ์ผ์ฒด์˜ ์ง€๋ฌธ์ด๋‚˜ ์ง€์‹œ, ๋ฐฐ๊ฒฝ ์„ค๋ช… ๋“ฑ์„ ๋…ธ์ถœ ํ•˜๊ฑฐ๋‚˜ ์ถœ๋ ฅํ•˜์ง€ ๋ง๊ณ  ์ˆœ์ˆ˜ํ•œ ๋‚˜๋ ˆ์ด์…˜๋งŒ 2์ค„์”ฉ ๋ฌถ์–ด์„œ ์ตœ๋Œ€ 8์ค„ ์ด๋‚ด๋กœ ์ถœ๋ ฅ๋ ฅ. ์ž…๋ ฅ: '{inputs}'"
16
 
 
 
 
 
 
17
  payload = {
18
  "model": "gpt-4-1106-preview",
19
  "messages": [{"role": "system", "content": narration_prompt}],
@@ -23,134 +22,78 @@ def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[]
23
  "stream": True,
24
  "presence_penalty": 0,
25
  "frequency_penalty": 0,
26
- "max_tokens": 1000 # ๋‚˜๋ ˆ์ด์…˜์˜ ๊ธธ์ด๋ฅผ ์ œํ•œ
27
  }
28
 
 
29
 
30
- headers = {
31
- "Content-Type": "application/json",
32
- "Authorization": f"Bearer {openai_api_key}"
33
- }
34
 
35
- print(f"chat_counter - {chat_counter}")
36
- if chat_counter != 0 :
37
- messages=[]
38
- for data in chatbot:
39
- temp1 = {}
40
- temp1["role"] = "user"
41
- temp1["content"] = data[0]
42
- temp2 = {}
43
- temp2["role"] = "assistant"
44
- temp2["content"] = data[1]
45
- messages.append(temp1)
46
- messages.append(temp2)
47
- temp3 = {}
48
- temp3["role"] = "user"
49
- temp3["content"] = inputs
50
- messages.append(temp3)
51
- #messages
52
- payload = {
53
- "model": "gpt-4-1106-preview",
54
- "messages": messages, #[{"role": "user", "content": f"{inputs}"}],
55
- "temperature" : temperature, #1.0,
56
- "top_p": top_p, #1.0,
57
- "n" : 1,
58
- "stream": True,
59
- "presence_penalty":0,
60
- "frequency_penalty":0,
61
- }
62
 
63
- chat_counter+=1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
- history.append(inputs)
66
- print(f"payload is - {payload}")
67
- # make a POST request to the API endpoint using the requests.post method, passing in stream=True
68
- response = requests.post(API_URL, headers=headers, json=payload, stream=True)
69
- #response = requests.post(API_URL, headers=headers, json=payload, stream=True)
70
- token_counter = 0
71
- partial_words = ""
72
-
73
- counter=0
74
- for chunk in response.iter_lines():
75
- #Skipping first chunk
76
- if counter == 0:
77
- counter+=1
78
- continue
79
- # ๋‚˜๋ ˆ์ด์…˜์˜ ๊ธธ์ด๋ฅผ 8์ค„๋กœ ์ œํ•œ
80
- if len(history) >= 8:
81
- break
82
- #counter+=1
83
- # check whether each line is non-empty
84
- if chunk.decode() :
85
- chunk = chunk.decode()
86
- # decode each line as response data is in bytes
87
- if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
88
- #if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
89
- # break
90
- partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
91
- if token_counter == 0:
92
- history.append(" " + partial_words)
93
- else:
94
- history[-1] = partial_words
95
- chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
96
- token_counter+=1
97
- yield chat, history, chat_counter # resembles {chatbot: chat, state: history}
98
-
99
-
100
- return chat, history, chat_counter
101
 
102
 
103
  def reset_textbox():
104
  return gr.update(value='')
105
 
106
- title = """<h1 align="center">ํ˜œ์ž ์Šคํฌ๋ฆฝํŠธ</h1>"""
107
- description = """์˜์ƒ ์ƒ์„ฑ์„ ์œ„ํ•œ ์Šคํฌ๋ฆฝํŠธ๋ฅผ AI๊ฐ€ ์ž๋™์œผ๋กœ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ์ฃผ์ œ ํ‚ค์›Œ๋“œ๋‚˜ ๋ชฉ์  ๋“ฑ ํ•„์š”ํ•œ ๋‚ด์šฉ๋งŒ ๊ฐ„๋‹จํžˆ ์ž…๋ ฅํ•˜์„ธ์š”. :
108
- ```
109
- User: <utterance>
110
- Assistant: <utterance>
111
- User: <utterance>
112
- Assistant: <utterance>
113
- ...
114
- ```
115
- In this app, you can explore the outputs of a gpt-3.5-turbo LLM.
116
- """
117
-
118
-
119
- with gr.Blocks(css = """#col_container {width: 1000px; margin-left: auto; margin-right: auto;}
120
- #chatbot {height: 520px; overflow: auto;}""") as demo:
121
  gr.HTML(title)
122
- with gr.Column(elem_id = "col_container"):
123
  openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here")
124
-
125
- # ์ถœ๋ ฅํผ (chatbot)์„ ์‚ฌ์šฉ์ž ์ž…๋ ฅํผ (inputs) ์•ž์— ๋ฐฐ์น˜
126
- chatbot = gr.Chatbot(elem_id='chatbot') # c
127
- inputs = gr.Textbox(placeholder="์—ฌ๊ธฐ์— ์ž…๋ ฅํ•˜์„ธ์š”.", label="๋‚˜๋ ˆ์ด์…˜ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์‹ถ์€ ์ฃผ์ œ์–ด๋‚˜ ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜์„ธ์š”.") # t
128
- state = gr.State([]) # s
129
  b1 = gr.Button()
130
-
131
- #inputs, top_p, temperature, top_k, repetition_penalty
132
  with gr.Accordion("Parameters", open=False):
133
- top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
134
- temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
135
- #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
136
- #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
137
  chat_counter = gr.Number(value=0, visible=False, precision=0)
138
 
139
  examples = gr.Examples(examples=[
140
- ["์ƒํ’ˆ ์„ค๋ช…:์ƒˆ๋กœ ์ถœ์‹œ๋œ 'ํ† ๋ฆฌ' ๋ฆฝ๋ฐค์€ FDA ์Šน์ธ, ์ตœ๊ณ ์˜ ๋ณด์Šต๋ ฅ, ๊ตฌ๋งค์ง€์ˆ˜ 1์œ„ "],
141
- ["๋ธŒ๋žœ๋”ฉ: 'ํ† ๋ฆฌ'๋ฆฝ๋ฐค์€ 20๋Œ€ ์—ฌ์„ฑ์—๊ฒŒ ์–ดํ•„ํ•  ๋ธŒ๋žœ๋”ฉ์ด ํ•„์š”ํ•ด"],
142
- ["๊ด‘๊ณ : ์„ค๋‚  ๋ถ€๋ชจ๋‹˜๊ณผ ์นœ์ง€ ์„ ๋ฌผ์€ ๋ฒ•์„ฑํฌ ๋ณด๋ฆฌ๊ตด๋น„ '๋ฒ•์„ฑ๊ตด๋น„'๊ฐ€ ์ตœ๊ณ ๋ž๋‹ˆ๋‹ค."],
143
- ["์ •๋ณด ๊ณต์œ : ๋น„ํƒ€๋ฏผC ๊ณผ๋‹ค ๋ณต์šฉ์€ ๊ฑด๊ฐ•์— ์˜คํžˆ๋ ค ํ•ด๋กญ๋‹ค."],
144
- ["ํ™๋ณด: 'OpenAI'๋Š” '์ฑ—GPT'์˜ ๋งž์ถค GPT '์Šคํ† ์–ด'๋ฅผ ์˜คํ”ˆํ•˜์˜€๋‹ค."],
145
- ["์ธ์‚ฌ: '์• ํ”Œ ๋ฒ•์ธ'์˜ ๊ณ ๊ฐ๊ณผ ์ž„์ง์›์„ ์œ„ํ•œ ์ง„์ทจ์ ์ธ 2024๋…„ ์‹ ๋…„ ์ธ์‚ฌ"]
146
- ], inputs=[inputs], fn=predict, outputs=[chatbot, state, chat_counter])
147
-
148
-
149
- inputs.submit( predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter],)
150
- b1.click( predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter],)
151
- b1.click(reset_textbox, [], [inputs])
152
- inputs.submit(reset_textbox, [], [inputs])
153
-
154
- #gr.Markdown(description)
155
- demo.queue().launch(debug=True)
156
-
 
1
  import gradio as gr
2
+ import os
3
+ import json
4
  import requests
5
 
6
+ API_URL = "https://api.openai.com/v1/chat/completions"
 
 
 
 
7
 
8
  def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[], history=[]):
 
 
9
  narration_prompt = f"๋™์˜์ƒ์— ์‚ฌ์šฉํ•  ์ „๋ฌธ์ ์ธ ๋‚˜๋ ˆ์ด์…˜์„ ์ž‘์„ฑํ•˜๋ผ. ๋ฐ˜๋“œ์‹œ ํ•œ๊ธ€๋กœ ์ž‘์„ฑํ• ๊ฒƒ. ์ผ์ฒด์˜ ์ง€๋ฌธ์ด๋‚˜ ์ง€์‹œ, ๋ฐฐ๊ฒฝ ์„ค๋ช… ๋“ฑ์„ ๋…ธ์ถœ ํ•˜๊ฑฐ๋‚˜ ์ถœ๋ ฅํ•˜์ง€ ๋ง๊ณ  ์ˆœ์ˆ˜ํ•œ ๋‚˜๋ ˆ์ด์…˜๋งŒ 2์ค„์”ฉ ๋ฌถ์–ด์„œ ์ตœ๋Œ€ 8์ค„ ์ด๋‚ด๋กœ ์ถœ๋ ฅ๋ ฅ. ์ž…๋ ฅ: '{inputs}'"
10
 
11
+ headers = {
12
+ "Content-Type": "application/json",
13
+ "Authorization": f"Bearer {openai_api_key}"
14
+ }
15
+
16
  payload = {
17
  "model": "gpt-4-1106-preview",
18
  "messages": [{"role": "system", "content": narration_prompt}],
 
22
  "stream": True,
23
  "presence_penalty": 0,
24
  "frequency_penalty": 0,
25
+ "max_tokens": 1000
26
  }
27
 
28
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True)
29
 
30
+ partial_words = ""
31
+ token_counter = 0
 
 
32
 
33
+ try:
34
+ for chunk in response.iter_lines():
35
+ if chunk:
36
+ try:
37
+ chunk_text = chunk.decode()
38
+ print("Raw Chunk:", chunk_text) # ์›์‹œ ์‘๋‹ต ์ถœ๋ ฅ
39
+ chunk_data = json.loads(chunk_text[6:]) # JSON ํŒŒ์‹ฑ
40
+ print("Parsed Data:", chunk_data) # ํŒŒ์‹ฑ๋œ ๋ฐ์ดํ„ฐ ์ถœ๋ ฅ
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
+ if 'choices' in chunk_data and 'content' in chunk_data['choices'][0]['delta']:
43
+ partial_words += chunk_data['choices'][0]['delta']['content']
44
+ if token_counter == 0:
45
+ history.append(" " + partial_words)
46
+ else:
47
+ history[-1] = partial_words
48
+ token_counter += 1
49
+ chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2)]
50
+ yield chat, history, chat_counter
51
+ except json.JSONDecodeError as e:
52
+ print("JSON ํŒŒ์‹ฑ ์˜ค๋ฅ˜:", e)
53
+ except Exception as e:
54
+ print("์‘๋‹ต ์ฒ˜๋ฆฌ ์˜ค๋ฅ˜:", e)
55
+
56
+ return chatbot, history, chat_counter
57
+
58
+ # ๋‚˜๋จธ์ง€ ์ฝ”๋“œ ๋ถ€๋ถ„ (์ธํ„ฐํŽ˜์ด์Šค ์ƒ์„ฑ, ์˜ˆ์‹œ ์ถ”๊ฐ€ ๋“ฑ)์€ ๊ทธ๋Œ€๋กœ ์œ ์ง€๋ฉ๋‹ˆ๋‹ค.
59
+
60
+ # ์ฝ”๋“œ ์‹คํ–‰ ๋ถ€๋ถ„
61
+ # ์˜ˆ: demo.launch() ๋“ฑ
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
 
65
  def reset_textbox():
66
  return gr.update(value='')
67
 
68
+ title = """<h1 align='center'>ํ˜œ์ž ์Šคํฌ๋ฆฝํŠธ</h1>"""
69
+ description = "์˜์ƒ ์ƒ์„ฑ์„ ์œ„ํ•œ ์Šคํฌ๋ฆฝํŠธ๋ฅผ AI๊ฐ€ ์ž๋™์œผ๋กœ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ์ฃผ์ œ ํ‚ค์›Œ๋“œ๋‚˜ ๋ชฉ์  ๋“ฑ ํ•„์š”ํ•œ ๋‚ด์šฉ๋งŒ ๊ฐ„๋‹จํžˆ ์ž…๋ ฅํ•˜์„ธ์š”."
70
+
71
+ with gr.Blocks(css="#col_container {width: 1000px; margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}") as demo:
 
 
 
 
 
 
 
 
 
 
 
72
  gr.HTML(title)
73
+ with gr.Column(elem_id="col_container"):
74
  openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here")
75
+ chatbot = gr.Chatbot(elem_id='chatbot')
76
+ inputs = gr.Textbox(placeholder="์—ฌ๊ธฐ์— ์ž…๋ ฅํ•˜์„ธ์š”.", label="๋‚˜๋ ˆ์ด์…˜ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์‹ถ์€ ์ฃผ์ œ์–ด๋‚˜ ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜์„ธ์š”.")
77
+ state = gr.State([])
 
 
78
  b1 = gr.Button()
79
+
 
80
  with gr.Accordion("Parameters", open=False):
81
+ top_p = gr.Slider(minimum=0, maximum=1.0, value=1.0, step=0.05, label="Top-p (nucleus sampling)")
82
+ temperature = gr.Slider(minimum=0, maximum=5.0, value=1.0, step=0.1, label="Temperature")
 
 
83
  chat_counter = gr.Number(value=0, visible=False, precision=0)
84
 
85
  examples = gr.Examples(examples=[
86
+ ["์ƒํ’ˆ ์„ค๋ช…:์ƒˆ๋กœ ์ถœ์‹œ๋œ 'ํ† ๋ฆฌ' ๋ฆฝ๋ฐค์€ FDA ์Šน์ธ, ์ตœ๊ณ ์˜ ๋ณด์Šต๋ ฅ, ๊ตฌ๋งค์ง€์ˆ˜ 1์œ„"],
87
+ ["๋ธŒ๋žœ๋”ฉ: 'ํ† ๋ฆฌ'๋ฆฝ๋ฐค์€ 20๋Œ€ ์—ฌ์„ฑ์—๊ฒŒ ์–ดํ•„ํ•  ๋ธŒ๋žœ๋”ฉ์ด ํ•„์š”ํ•ด"],
88
+ ["๊ด‘๊ณ : ์„ค๋‚  ๋ถ€๋ชจ๋‹˜๊ณผ ์นœ์ง€ ์„ ๋ฌผ์€ ๋ฒ•์„ฑํฌ ๋ณด๋ฆฌ๊ตด๋น„ '๋ฒ•์„ฑ๊ตด๋น„'๊ฐ€ ์ตœ๊ณ ๋ž๋‹ˆ๋‹ค."],
89
+ ["์ •๋ณด ๊ณต์œ : ๋น„ํƒ€๋ฏผC ๊ณผ๋‹ค ๋ณต์šฉ์€ ๊ฑด๊ฐ•์— ์˜คํžˆ๋ ค ํ•ด๋กญ๋‹ค."],
90
+ ["ํ™๋ณด: 'OpenAI'๋Š” '์ฑ—GPT'์˜ ๋งž์ถค GPT '์Šคํ† ์–ด'๋ฅผ ์˜คํ”ˆํ•˜์˜€๋‹ค."],
91
+ ["์ธ์‚ฌ: '์• ํ”Œ ๋ฒ•์ธ'์˜ ๊ณ ๊ฐ๊ณผ ์ž„์ง์›์„ ์œ„ํ•œ ์ง„์ทจ์ ์ธ 2024๋…„ ์‹ ๋…„ ์ธ์‚ฌ"]
92
+ ], inputs=[inputs], fn=predict, outputs=[chatbot, state, chat_counter])
93
+
94
+ inputs.submit(predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter])
95
+ b1.click(predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter])
96
+ b1.click(reset_textbox, [], [inputs])
97
+ inputs.submit(reset_textbox, [], [inputs])
98
+
99
+ demo.launch(debug=True)