File size: 2,717 Bytes
0318caf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b60445
 
cac8f0f
 
d35c879
b307974
cac8f0f
d5a8216
cac8f0f
 
417a23b
cac8f0f
 
d141979
 
 
744d8dc
cac8f0f
 
 
 
 
744d8dc
cac8f0f
 
 
 
 
 
 
 
8f6ec4b
 
cac8f0f
 
 
 
 
 
 
 
 
d35c879
b307974
0318caf
4b46c96
417a23b
0d53d23
dda8f2a
d141979
25606a9
8f6ec4b
744d8dc
0b60445
cac8f0f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
# import os
# import gradio as gr
# from langchain.chat_models import ChatOpenAI
# from langchain.prompts import PromptTemplate
# from langchain.chains import LLMChain
# from langchain.memory import ConversationBufferMemory

# # Set OpenAI API Key
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')

# # Define the template for the chatbot's response
# template = """You are a helpful assistant to answer all user queries.
# {chat_history}
# User: {user_message}
# Chatbot:"""

# # Define the prompt template
# prompt = PromptTemplate(
#     input_variables=["chat_history", "user_message"], 
#     template=template
# )

# # Initialize conversation memory
# memory = ConversationBufferMemory(memory_key="chat_history")

# # Define the LLM chain with the ChatOpenAI model and conversation memory
# llm_chain = LLMChain(
#     llm=ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"),  # Use 'model' instead of 'model_name'
#     prompt=prompt,
#     verbose=True,
#     memory=memory,
# )

# # Function to get chatbot response
# def get_text_response(user_message, history):
#     response = llm_chain.predict(user_message=user_message)
#     return response

# # Create a Gradio chat interface
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text")

# if __name__ == "__main__":
#     demo.launch()

import os
import gradio as gr
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage

# Set OpenAI API Key
os.environ["OPENAI_API_KEY"] = "sk-3_mJiR5z9Q3XN-D33cgrAIYGffmMvHfu5Je1U0CW1ZT3BlbkFJA2vfSvDqZAVUyHo2JIcU91XPiAq424OSS8ci29tWMA"  # Replace with your key

# Initialize the ChatOpenAI model
llm = ChatOpenAI(temperature=1.0, model="gpt-3.5-turbo-0613")

# Function to predict response
def get_text_response(message, history=None):
    # Ensure history is a list
    if history is None:
        history = []
    
    # Convert the Gradio history format to LangChain message format
    history_langchain_format = []
    for human, ai in history:
        history_langchain_format.append(HumanMessage(content=human))
        history_langchain_format.append(AIMessage(content=ai))
    
    # Add the new user message to the history
    history_langchain_format.append(HumanMessage(content=message))

    # Get the model's response
    gpt_response = llm(history_langchain_format)

    # Append AI response to history
    history.append((message, gpt_response.content))

    # Return the response and updated history
    return gpt_response.content, history

# Create a Gradio chat interface
demo = gr.Interface(
    fn=get_text_response, 
    inputs=["text", "state"], 
    outputs=["text", "state"]
)

if __name__ == "__main__":
    demo.launch()