seawolf2357 commited on
Commit
1840ee6
ยท
verified ยท
1 Parent(s): 16b1415

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -155
app.py CHANGED
@@ -1,156 +1,30 @@
1
- import gradio as gr
2
- import os
3
- import json
4
  import requests
5
-
6
- #Streaming endpoint
7
- API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
8
-
9
- #Testing with my Open AI Key
10
- OPENAI_API_KEY = os.getenv("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")
11
-
12
- def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[], history=[]):
13
-
14
- # ์‚ฌ์šฉ์ž์˜ ์ž…๋ ฅ์„ ๋‚˜๋ ˆ์ด์…˜ ์Šคํƒ€์ผ์˜ ํ”„๋กฌํ”„ํŠธ๋กœ ๋ณ€ํ™˜
15
- narration_prompt = f"๋™์˜์ƒ์— ์‚ฌ์šฉํ•  ์ „๋ฌธ์ ์ธ ๋‚˜๋ ˆ์ด์…˜์„ ์ž‘์„ฑํ•˜๋ผ. ๋ฐ˜๋“œ์‹œ ํ•œ๊ธ€๋กœ ์ž‘์„ฑํ• ๊ฒƒ. ์ผ์ฒด์˜ ์ง€๋ฌธ์ด๋‚˜ ์ง€์‹œ, ๋ฐฐ๊ฒฝ ์„ค๋ช… ๋“ฑ์„ ๋…ธ์ถœ ํ•˜๊ฑฐ๋‚˜ ์ถœ๋ ฅํ•˜์ง€ ๋ง๊ณ  ์ˆœ์ˆ˜ํ•œ ๋‚˜๋ ˆ์ด์…˜๋งŒ 2์ค„์”ฉ ๋ฌถ์–ด์„œ ์ตœ๋Œ€ 8์ค„ ์ด๋‚ด๋กœ ์ถœ๋ ฅ๋ ฅ. ์ž…๋ ฅ: '{inputs}'"
16
-
17
- payload = {
18
- "model": "gpt-4-1106-preview",
19
- "messages": [{"role": "system", "content": narration_prompt}],
20
- "temperature": temperature,
21
- "top_p": top_p,
22
- "n": 1,
23
- "stream": True,
24
- "presence_penalty": 0,
25
- "frequency_penalty": 0,
26
- "max_tokens": 1000 # ๋‚˜๋ ˆ์ด์…˜์˜ ๊ธธ์ด๋ฅผ ์ œํ•œ
27
- }
28
-
29
-
30
- headers = {
31
- "Content-Type": "application/json",
32
- "Authorization": f"Bearer {openai_api_key}"
33
- }
34
-
35
- print(f"chat_counter - {chat_counter}")
36
- if chat_counter != 0 :
37
- messages=[]
38
- for data in chatbot:
39
- temp1 = {}
40
- temp1["role"] = "user"
41
- temp1["content"] = data[0]
42
- temp2 = {}
43
- temp2["role"] = "assistant"
44
- temp2["content"] = data[1]
45
- messages.append(temp1)
46
- messages.append(temp2)
47
- temp3 = {}
48
- temp3["role"] = "user"
49
- temp3["content"] = inputs
50
- messages.append(temp3)
51
- #messages
52
- payload = {
53
- "model": "gpt-4-1106-preview",
54
- "messages": messages, #[{"role": "user", "content": f"{inputs}"}],
55
- "temperature" : temperature, #1.0,
56
- "top_p": top_p, #1.0,
57
- "n" : 1,
58
- "stream": True,
59
- "presence_penalty":0,
60
- "frequency_penalty":0,
61
- }
62
-
63
- chat_counter+=1
64
-
65
- history.append(inputs)
66
- print(f"payload is - {payload}")
67
- # make a POST request to the API endpoint using the requests.post method, passing in stream=True
68
- response = requests.post(API_URL, headers=headers, json=payload, stream=True)
69
- #response = requests.post(API_URL, headers=headers, json=payload, stream=True)
70
- token_counter = 0
71
- partial_words = ""
72
-
73
- counter=0
74
- for chunk in response.iter_lines():
75
- #Skipping first chunk
76
- if counter == 0:
77
- counter+=1
78
- continue
79
- # ๋‚˜๋ ˆ์ด์…˜์˜ ๊ธธ์ด๋ฅผ 8์ค„๋กœ ์ œํ•œ
80
- if len(history) >= 8:
81
- break
82
- #counter+=1
83
- # check whether each line is non-empty
84
- if chunk.decode() :
85
- chunk = chunk.decode()
86
- # decode each line as response data is in bytes
87
- if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
88
- #if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
89
- # break
90
- partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
91
- if token_counter == 0:
92
- history.append(" " + partial_words)
93
- else:
94
- history[-1] = partial_words
95
- chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
96
- token_counter+=1
97
- yield chat, history, chat_counter # resembles {chatbot: chat, state: history}
98
-
99
-
100
- return chat, history, chat_counter
101
-
102
-
103
- def reset_textbox():
104
- return gr.update(value='')
105
-
106
- title = """<h1 align="center">ํ˜œ์ž ์Šคํฌ๋ฆฝํŠธ</h1>"""
107
- description = """์˜์ƒ ์ƒ์„ฑ์„ ์œ„ํ•œ ์Šคํฌ๋ฆฝํŠธ๋ฅผ AI๊ฐ€ ์ž๋™์œผ๋กœ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค. ์ฃผ์ œ ํ‚ค์›Œ๋“œ๋‚˜ ๋ชฉ์  ๋“ฑ ํ•„์š”ํ•œ ๋‚ด์šฉ๋งŒ ๊ฐ„๋‹จํžˆ ์ž…๋ ฅํ•˜์„ธ์š”. :
108
- ```
109
- User: <utterance>
110
- Assistant: <utterance>
111
- User: <utterance>
112
- Assistant: <utterance>
113
- ...
114
- ```
115
- In this app, you can explore the outputs of a gpt-3.5-turbo LLM.
116
- """
117
-
118
-
119
- with gr.Blocks(css = """#col_container {width: 1000px; margin-left: auto; margin-right: auto;}
120
- #chatbot {height: 520px; overflow: auto;}""") as demo:
121
- gr.HTML(title)
122
- with gr.Column(elem_id = "col_container"):
123
- openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here")
124
-
125
- # ์ถœ๋ ฅํผ (chatbot)์„ ์‚ฌ์šฉ์ž ์ž…๋ ฅํผ (inputs) ์•ž์— ๋ฐฐ์น˜
126
- chatbot = gr.Chatbot(elem_id='chatbot') # c
127
- inputs = gr.Textbox(placeholder="์—ฌ๊ธฐ์— ์ž…๋ ฅํ•˜์„ธ์š”.", label="๋‚˜๋ ˆ์ด์…˜ ์Šคํฌ๋ฆฝํŠธ๋ฅผ ์ƒ์„ฑํ•˜๊ณ  ์‹ถ์€ ์ฃผ์ œ์–ด๋‚˜ ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜์„ธ์š”.") # t
128
- state = gr.State([]) # s
129
- b1 = gr.Button()
130
-
131
- #inputs, top_p, temperature, top_k, repetition_penalty
132
- with gr.Accordion("Parameters", open=False):
133
- top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
134
- temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
135
- #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
136
- #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
137
- chat_counter = gr.Number(value=0, visible=False, precision=0)
138
-
139
- examples = gr.Examples(examples=[
140
- ["์ƒํ’ˆ ์„ค๋ช…:์ƒˆ๋กœ ์ถœ์‹œ๋œ 'ํ† ๋ฆฌ' ๋ฆฝ๋ฐค์€ FDA ์Šน์ธ, ์ตœ๊ณ ์˜ ๋ณด์Šต๋ ฅ, ๊ตฌ๋งค์ง€์ˆ˜ 1์œ„ "],
141
- ["๋ธŒ๋žœ๋”ฉ: 'ํ† ๋ฆฌ'๋ฆฝ๋ฐค์€ 20๋Œ€ ์—ฌ์„ฑ์—๊ฒŒ ์–ดํ•„ํ•  ๋ธŒ๋žœ๋”ฉ์ด ํ•„์š”ํ•ด"],
142
- ["๊ด‘๊ณ : ์„ค๋‚  ๋ถ€๋ชจ๋‹˜๊ณผ ์นœ์ง€ ์„ ๋ฌผ์€ ๋ฒ•์„ฑํฌ ๋ณด๋ฆฌ๊ตด๋น„๊ฐ€ ์ตœ๊ณ ๋ž๋‹ˆ๋‹ค."],
143
- ["์ •๋ณด ๊ณต์œ : ๋น„ํƒ€๋ฏผC ๊ณผ๋‹ค ๋ณต์šฉ์€ ๊ฑด๊ฐ•์— ์˜คํžˆ๋ ค ํ•ด๋กญ๋‹ค."],
144
- ["ํ™๋ณด: 'OpenAI'๋Š” '์ฑ—GPT'์˜ ๋งž์ถค GPT '์Šคํ† ์–ด'๋ฅผ ์˜คํ”ˆํ•˜์˜€๋‹ค."],
145
- ["์ธ์‚ฌ: '์• ํ”Œ ๋ฒ•์ธ'์˜ ๊ณ ๊ฐ๊ณผ ์ž„์ง์›์„ ์œ„ํ•œ ์ง„์ทจ์ ์ธ 2024๋…„ ์‹ ๋…„ ์ธ์‚ฌ"]
146
- ], inputs=[inputs], fn=predict, outputs=[chatbot, state, chat_counter])
147
-
148
-
149
- inputs.submit( predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter],)
150
- b1.click( predict, [inputs, top_p, temperature, openai_api_key, chat_counter, chatbot, state], [chatbot, state, chat_counter],)
151
- b1.click(reset_textbox, [], [inputs])
152
- inputs.submit(reset_textbox, [], [inputs])
153
-
154
- #gr.Markdown(description)
155
- demo.queue().launch(debug=True)
156
-
 
 
 
 
1
  import requests
2
+ from bs4 import BeautifulSoup
3
+
4
+ def get_url_content(url):
5
+ response = requests.get(url)
6
+ if response.status_code == 200:
7
+ return response.text
8
+ else:
9
+ return "URL์—์„œ ์ฝ˜ํ…์ธ ๋ฅผ ๊ฐ€์ ธ์˜ค๋Š” ๋ฐ ์‹คํŒจํ–ˆ์Šต๋‹ˆ๋‹ค."
10
+
11
+ def parse_html(html_content):
12
+ soup = BeautifulSoup(html_content, 'html.parser')
13
+ # ์›ํ•˜๋Š” HTML ์š”์†Œ๋ฅผ ํŒŒ์‹ฑํ•˜์—ฌ ๋ฐ˜ํ™˜
14
+ # ์˜ˆ: soup.find_all('p') ๋“ฑ
15
+ return soup.prettify()
16
+
17
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค ํ•จ์ˆ˜
18
+ def gradio_fetch_and_parse(url):
19
+ html_content = get_url_content(url)
20
+ parsed_content = parse_html(html_content)
21
+ return parsed_content
22
+
23
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค ๊ตฌ์„ฑ
24
+ iface = gr.Interface(
25
+ fn=gradio_fetch_and_parse,
26
+ inputs=gr.Textbox(label="URL์„ ์ž…๋ ฅํ•˜์„ธ์š”"),
27
+ outputs=gr.Textbox(label="์›นํŽ˜์ด์ง€ ์ฝ˜ํ…์ธ ")
28
+ )
29
+
30
+ iface.launch()