Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -42,80 +42,47 @@
|
|
42 |
# if __name__ == "__main__":
|
43 |
# demo.launch()
|
44 |
|
45 |
-
|
46 |
-
# import gradio as gr
|
47 |
-
# from langchain.chat_models import ChatOpenAI
|
48 |
-
# from langchain.schema import AIMessage, HumanMessage
|
49 |
-
|
50 |
-
# # Set OpenAI API Key
|
51 |
-
# os.environ["OPENAI_API_KEY"] = "sk-3_mJiR5z9Q3XN-D33cgrAIYGffmMvHfu5Je1U0CW1ZT3BlbkFJA2vfSvDqZAVUyHo2JIcU91XPiAq424OSS8ci29tWMA" # Replace with your key
|
52 |
-
|
53 |
-
# # Initialize the ChatOpenAI model
|
54 |
-
# llm = ChatOpenAI(temperature=1.0, model="gpt-3.5-turbo-0613")
|
55 |
-
|
56 |
-
# # Function to predict response
|
57 |
-
# def get_text_response(message, history=None):
|
58 |
-
# # Ensure history is a list
|
59 |
-
# if history is None:
|
60 |
-
# history = []
|
61 |
-
|
62 |
-
# # Convert the Gradio history format to LangChain message format
|
63 |
-
# history_langchain_format = []
|
64 |
-
# for human, ai in history:
|
65 |
-
# history_langchain_format.append(HumanMessage(content=human))
|
66 |
-
# history_langchain_format.append(AIMessage(content=ai))
|
67 |
-
|
68 |
-
# # Add the new user message to the history
|
69 |
-
# history_langchain_format.append(HumanMessage(content=message))
|
70 |
-
|
71 |
-
# # Get the model's response
|
72 |
-
# gpt_response = llm(history_langchain_format)
|
73 |
-
|
74 |
-
# # Append AI response to history
|
75 |
-
# history.append((message, gpt_response.content))
|
76 |
-
|
77 |
-
# # Return the response and updated history
|
78 |
-
# return gpt_response.content, history
|
79 |
-
|
80 |
-
# # Create a Gradio chat interface
|
81 |
-
# demo = gr.Interface(
|
82 |
-
# fn=get_text_response,
|
83 |
-
# inputs=["text", "state"],
|
84 |
-
# outputs=["text", "state"]
|
85 |
-
# )
|
86 |
-
|
87 |
-
# if __name__ == "__main__":
|
88 |
-
# demo.launch()
|
89 |
-
|
90 |
-
import time
|
91 |
import gradio as gr
|
92 |
from langchain.chat_models import ChatOpenAI
|
93 |
from langchain.schema import AIMessage, HumanMessage
|
94 |
-
import openai
|
95 |
|
96 |
-
|
|
|
97 |
|
98 |
-
# Initialize ChatOpenAI
|
99 |
-
llm = ChatOpenAI(temperature=1.0, model=
|
100 |
|
101 |
-
|
102 |
-
|
|
|
|
|
|
|
|
|
|
|
103 |
history_langchain_format = []
|
104 |
for human, ai in history:
|
105 |
history_langchain_format.append(HumanMessage(content=human))
|
106 |
history_langchain_format.append(AIMessage(content=ai))
|
107 |
|
108 |
-
# Add
|
109 |
history_langchain_format.append(HumanMessage(content=message))
|
110 |
-
|
111 |
-
# Get
|
112 |
gpt_response = llm(history_langchain_format)
|
113 |
-
|
114 |
-
# Return response
|
115 |
-
return gpt_response.content
|
116 |
|
117 |
-
#
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
if __name__ == "__main__":
|
121 |
demo.launch()
|
@@ -129,3 +96,5 @@ if __name__ == "__main__":
|
|
129 |
|
130 |
|
131 |
|
|
|
|
|
|
42 |
# if __name__ == "__main__":
|
43 |
# demo.launch()
|
44 |
|
45 |
+
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
import gradio as gr
|
47 |
from langchain.chat_models import ChatOpenAI
|
48 |
from langchain.schema import AIMessage, HumanMessage
|
|
|
49 |
|
50 |
+
# Set OpenAI API Key
|
51 |
+
os.environ["OPENAI_API_KEY"] = "sk-3_mJiR5z9Q3XN-D33cgrAIYGffmMvHfu5Je1U0CW1ZT3BlbkFJA2vfSvDqZAVUyHo2JIcU91XPiAq424OSS8ci29tWMA" # Replace with your key
|
52 |
|
53 |
+
# Initialize the ChatOpenAI model
|
54 |
+
llm = ChatOpenAI(temperature=1.0, model="gpt-3.5-turbo-0613")
|
55 |
|
56 |
+
# Function to predict response
|
57 |
+
def get_text_response(message, history=None):
|
58 |
+
# Ensure history is a list
|
59 |
+
if history is None:
|
60 |
+
history = []
|
61 |
+
|
62 |
+
# Convert the Gradio history format to LangChain message format
|
63 |
history_langchain_format = []
|
64 |
for human, ai in history:
|
65 |
history_langchain_format.append(HumanMessage(content=human))
|
66 |
history_langchain_format.append(AIMessage(content=ai))
|
67 |
|
68 |
+
# Add the new user message to the history
|
69 |
history_langchain_format.append(HumanMessage(content=message))
|
70 |
+
|
71 |
+
# Get the model's response
|
72 |
gpt_response = llm(history_langchain_format)
|
|
|
|
|
|
|
73 |
|
74 |
+
# Append AI response to history
|
75 |
+
history.append((message, gpt_response.content))
|
76 |
+
|
77 |
+
# Return the response and updated history
|
78 |
+
return gpt_response.content, history
|
79 |
+
|
80 |
+
# Create a Gradio chat interface
|
81 |
+
demo = gr.ChatInterface(
|
82 |
+
fn=get_text_response,
|
83 |
+
inputs=["text", "state"],
|
84 |
+
outputs=["text", "state"]
|
85 |
+
)
|
86 |
|
87 |
if __name__ == "__main__":
|
88 |
demo.launch()
|
|
|
96 |
|
97 |
|
98 |
|
99 |
+
|
100 |
+
|