File size: 2,614 Bytes
0318caf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d35c879
 
0318caf
d5a8216
0318caf
d5a8216
0318caf
d35c879
b307974
 
d35c879
d5a8216
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0318caf
 
 
 
 
d5a8216
 
b307974
0318caf
 
 
d35c879
b307974
 
d35c879
 
b307974
0318caf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
# import os
# import gradio as gr
# from langchain.chat_models import ChatOpenAI
# from langchain.prompts import PromptTemplate
# from langchain.chains import LLMChain
# from langchain.memory import ConversationBufferMemory

# # Set OpenAI API Key
# OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')

# # Define the template for the chatbot's response
# template = """You are a helpful assistant to answer all user queries.
# {chat_history}
# User: {user_message}
# Chatbot:"""

# # Define the prompt template
# prompt = PromptTemplate(
#     input_variables=["chat_history", "user_message"], 
#     template=template
# )

# # Initialize conversation memory
# memory = ConversationBufferMemory(memory_key="chat_history")

# # Define the LLM chain with the ChatOpenAI model and conversation memory
# llm_chain = LLMChain(
#     llm=ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo"),  # Use 'model' instead of 'model_name'
#     prompt=prompt,
#     verbose=True,
#     memory=memory,
# )

# # Function to get chatbot response
# def get_text_response(user_message, history):
#     response = llm_chain.predict(user_message=user_message)
#     return response

# # Create a Gradio chat interface
# demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text")

# if __name__ == "__main__":
#     demo.launch()

import os
import gradio as gr
from langchain_openai import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.schema import BaseMemory
from langchain.memory import ConversationBufferMemory
from langchain.chains import RunnableSequence

# Set OpenAI API Key
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')

# Define the template for the chatbot's response
template = """You are a helpful assistant to answer all user queries.
{chat_history}
User: {user_message}
Chatbot:"""

# Define the prompt template
prompt = PromptTemplate(
    input_variables=["chat_history", "user_message"], 
    template=template
)

# Initialize conversation memory
memory = ConversationBufferMemory(memory_key="chat_history")

# Define the LLM (language model)
llm = ChatOpenAI(temperature=0.5, model="gpt-3.5-turbo")

# Define the chain using RunnableSequence (replace LLMChain)
llm_chain = prompt | llm  # Chaining the prompt and the LLM

# Function to get chatbot response
def get_text_response(user_message, history):
    inputs = {"chat_history": history, "user_message": user_message}
    response = llm_chain(inputs)
    return response['text']

# Create a Gradio chat interface
demo = gr.Interface(fn=get_text_response, inputs="text", outputs="text")

if __name__ == "__main__":
    demo.launch()