Spaces:
Build error
Build error
Commit
·
f7c2b64
1
Parent(s):
c9a8866
Integrate GPT4
Browse files- app.py +146 -2
- requirements.txt +1 -0
app.py
CHANGED
@@ -1,4 +1,148 @@
|
|
1 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
|
3 |
+
from typing import Any, List, Mapping, Optional
|
4 |
+
from langchain.llms.base import LLM
|
5 |
+
from langchain.callbacks.manager import CallbackManagerForLLMRun
|
6 |
+
import langchain
|
7 |
+
import asyncio
|
8 |
+
from playwright.async_api import async_playwright
|
9 |
+
import time
|
10 |
+
import regex
|
11 |
+
import html2text
|
12 |
+
import os
|
13 |
|
14 |
+
|
15 |
+
global CurrentAgent
|
16 |
+
CurrentAgent = 'Structured Zero Short Agent'
|
17 |
+
|
18 |
+
|
19 |
+
class GPTRemote(LLM):
|
20 |
+
n: int
|
21 |
+
|
22 |
+
@property
|
23 |
+
def _llm_type(self) -> str:
|
24 |
+
return "custom"
|
25 |
+
|
26 |
+
def _call(
|
27 |
+
self,
|
28 |
+
prompt: str,
|
29 |
+
stop: Optional [List[str]] = None,
|
30 |
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
31 |
+
**kwargs: Any
|
32 |
+
) -> str:
|
33 |
+
print("prompt:", prompt)
|
34 |
+
|
35 |
+
output = asyncio.run(start_playwright(prompt))
|
36 |
+
|
37 |
+
if output is None:
|
38 |
+
output = "No Feedback"
|
39 |
+
print("-" * 20)
|
40 |
+
print('Raw: \n', output)
|
41 |
+
keywords = ['Action:', 'Action Input:', 'Observation:', 'Thought:', 'Final Answer:']
|
42 |
+
if 'Action:' in output and 'Observation:' in output:
|
43 |
+
output = output.split('Observation:')[0]
|
44 |
+
|
45 |
+
global CurrentAgent
|
46 |
+
|
47 |
+
if CurrentAgent == 'Structured Zero Short Agent':
|
48 |
+
try:
|
49 |
+
|
50 |
+
if output.strip()[-1] == '}' and 'Action:' in output:
|
51 |
+
print("valid command")
|
52 |
+
elif 'Action:' in output:
|
53 |
+
output = output + '}'
|
54 |
+
print("corrected command")
|
55 |
+
|
56 |
+
pattern = r'\{((?:[^{}]|(?R))*)\}'
|
57 |
+
temp = regex.search(pattern, output)
|
58 |
+
rrr = temp.group()
|
59 |
+
output = output.replace(rrr, '```'+ '\n' + rrr + '\n'+'```')
|
60 |
+
|
61 |
+
except Exception as e:
|
62 |
+
print("model internal error:", e)
|
63 |
+
print("-" * 20)
|
64 |
+
print("Treated output: \n", output)
|
65 |
+
return output
|
66 |
+
|
67 |
+
@property
|
68 |
+
def _identifying_params(self) -> Mapping[str, Any]:
|
69 |
+
return [("n", self.n)]
|
70 |
+
|
71 |
+
def treat_output(text):
|
72 |
+
|
73 |
+
keywords = ['Action:', 'Action Input:', 'Observation:', 'Thought:', 'Final Answer:']
|
74 |
+
for item in keywords:
|
75 |
+
if item in text:
|
76 |
+
text.replace(item, '\n'+item)
|
77 |
+
print("treat output: ", text)
|
78 |
+
return text
|
79 |
+
|
80 |
+
GPTfake = GPTRemote(n=0)
|
81 |
+
|
82 |
+
async def start_playwright(question: str):
|
83 |
+
start_t = time.time()
|
84 |
+
pw = await async_playwright().start()
|
85 |
+
browser = await pw.chromium.launch(headless=True)
|
86 |
+
end_t = time.time()
|
87 |
+
print("Init Browser Done:", end_t - start_t)
|
88 |
+
start_t = end_t
|
89 |
+
page = await browser.new_page()
|
90 |
+
|
91 |
+
# note all methods are async (use the "await" keyword)
|
92 |
+
await page.goto(os.environ["Endpoint_GPT4"])
|
93 |
+
# print("Title of Web: ", await page.title())
|
94 |
+
end_t = time.time()
|
95 |
+
print("New Page Done:", end_t - start_t)
|
96 |
+
start_t = end_t
|
97 |
+
await page.wait_for_timeout(200)
|
98 |
+
|
99 |
+
await page.locator("//textarea").fill(question)
|
100 |
+
await page.wait_for_timeout(200)
|
101 |
+
|
102 |
+
await page.locator("//textarea").press("Enter")
|
103 |
+
await page.wait_for_timeout(200)
|
104 |
+
|
105 |
+
output_history = "NOTHING"
|
106 |
+
for i in range(100):
|
107 |
+
output_text_old = await page.locator("//div[@aria-label='Chat message from assistant']").last.inner_text()
|
108 |
+
html_content = await page.locator("//div[@aria-label='Chat message from assistant']//div[@class='stMarkdown']").last.inner_html()
|
109 |
+
markdown_converter = html2text.HTML2Text()
|
110 |
+
output_text = markdown_converter.handle(html_content)
|
111 |
+
print("output_text... :")
|
112 |
+
|
113 |
+
if output_text == output_history and '▌' not in output_text and output_text != "":
|
114 |
+
end_t = time.time()
|
115 |
+
print("Output Done:", end_t - start_t)
|
116 |
+
return output_text
|
117 |
+
else:
|
118 |
+
await page.wait_for_timeout(500)
|
119 |
+
output_history = output_text
|
120 |
+
print("-------- Final Answer-----------\n", output_text)
|
121 |
+
|
122 |
+
await browser.close()
|
123 |
+
|
124 |
+
# GPTfake("hi")
|
125 |
+
|
126 |
+
|
127 |
+
st.title("STLA-BABY")
|
128 |
+
|
129 |
+
msgs = StreamlitChatMessageHistory()
|
130 |
+
|
131 |
+
if len(msgs.messages) == 0 or st.sidebar.button("Reset Chat History"):
|
132 |
+
msgs.clear()
|
133 |
+
msgs.add_ai_message("How can I help you ?")
|
134 |
+
st.session_state.steps = {}
|
135 |
+
|
136 |
+
avatars = {"human": "user", "ai": "assistant"}
|
137 |
+
|
138 |
+
for idx, msg in enumerate(msgs.messages):
|
139 |
+
with st.chat_message(avatars[msg.type]):
|
140 |
+
st.write(msg.content)
|
141 |
+
|
142 |
+
if prompt := st.chat_input(placeholder="Input Your Request"):
|
143 |
+
st.chat_message("user").write(prompt)
|
144 |
+
|
145 |
+
with st.chat_message("assistant"):
|
146 |
+
response = GPTfake(prompt)
|
147 |
+
st.write(response)
|
148 |
+
print(msgs.messages)
|
requirements.txt
CHANGED
@@ -30,3 +30,4 @@ pandas
|
|
30 |
regex
|
31 |
graphviz
|
32 |
streamlit
|
|
|
|
30 |
regex
|
31 |
graphviz
|
32 |
streamlit
|
33 |
+
streamlit_chat
|