Update app.py
Browse files
app.py
CHANGED
|
@@ -130,16 +130,47 @@ def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
|
|
| 130 |
#response = openai.ChatCompletion.create(model=model, messages=conversation)
|
| 131 |
|
| 132 |
# streaming response
|
| 133 |
-
result_textarea = st.empty()
|
| 134 |
-
results=[]
|
| 135 |
-
for responses in openai.Completion.create(model=model, prompt=conversation, stream=True):
|
| 136 |
# for responses in openai.ChatCompletion.create(model=model, messages=conversation, stream=True):
|
| 137 |
#results.append(str(responses.choices[0]))
|
| 138 |
-
|
| 139 |
#st.markdown(f'*{results}*')
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
|
| 144 |
#return response
|
| 145 |
#return response['choices'][0]['message']['content']
|
|
|
|
| 130 |
#response = openai.ChatCompletion.create(model=model, messages=conversation)
|
| 131 |
|
| 132 |
# streaming response
|
| 133 |
+
#result_textarea = st.empty()
|
| 134 |
+
#results=[]
|
| 135 |
+
#for responses in openai.Completion.create(model=model, prompt=conversation, stream=True):
|
| 136 |
# for responses in openai.ChatCompletion.create(model=model, messages=conversation, stream=True):
|
| 137 |
#results.append(str(responses.choices[0]))
|
| 138 |
+
# results.append(responses.choices[0].text)
|
| 139 |
#st.markdown(f'*{results}*')
|
| 140 |
+
# result = "".join(results).strip()
|
| 141 |
+
# result = result.replace('\n','')
|
| 142 |
+
# result_textarea.markdown(f'*{result}*')
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# record the time before the request is sent
|
| 146 |
+
start_time = time.time()
|
| 147 |
+
|
| 148 |
+
response = openai.ChatCompletion.create(
|
| 149 |
+
model='gpt-3.5-turbo',
|
| 150 |
+
messages=conversation,
|
| 151 |
+
temperature=0.5,
|
| 152 |
+
stream=True # again, we set stream=True
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
# create variables to collect the stream of chunks
|
| 156 |
+
collected_chunks = []
|
| 157 |
+
collected_messages = []
|
| 158 |
+
# iterate through the stream of events
|
| 159 |
+
for chunk in response:
|
| 160 |
+
chunk_time = time.time() - start_time # calculate the time delay of the chunk
|
| 161 |
+
collected_chunks.append(chunk) # save the event response
|
| 162 |
+
chunk_message = chunk['choices'][0]['delta'] # extract the message
|
| 163 |
+
collected_messages.append(chunk_message) # save the message
|
| 164 |
+
print(f"Message received {chunk_time:.2f} seconds after request: {chunk_message}") # print the delay and text
|
| 165 |
+
|
| 166 |
+
# print the time delay and text received
|
| 167 |
+
print(f"Full response received {chunk_time:.2f} seconds after request")
|
| 168 |
+
full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
|
| 169 |
+
print(f"Full conversation received: {full_reply_content}")
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
|
| 174 |
|
| 175 |
#return response
|
| 176 |
#return response['choices'][0]['message']['content']
|