File size: 1,803 Bytes
2d31940 37c89ca 2d31940 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate
from langchain_core.messages import HumanMessage, AIMessage
from langchain_groq import ChatGroq
from typing import List
import os
from src.for_streamlit.prompts import ASSISTANT_PROMPT
from langchain.memory import ConversationSummaryMemory
from dotenv import load_dotenv
load_dotenv()
os.environ["GROQ_API_KEY"]=os.getenv("GROQ_API_KEY")
class ConversationHandler:
def __init__(self, model_name="llama-3.3-70b-versatile", temperature=0.7):
self.chat_model = ChatGroq(
model_name=model_name,
temperature=temperature
)
self.prompt = ChatPromptTemplate.from_messages([
("system", ASSISTANT_PROMPT)])
self.memory=ConversationSummaryMemory(
llm=self.chat_model,
max_token_limit=2000,
return_messages=True,
memory_key="chat_history"
)
def give_response(self,user_input):
chain= self.prompt|self.chat_model
memory_variables = self.memory.load_memory_variables({})
response=chain.invoke(
{
"user_query": user_input,
"chat_history": memory_variables["chat_history"]
}
)
print(response.content)
self.memory.save_context(
{"input": user_input},
{"output": response.content}
)
return response
def summarize_conversation(self) -> str:
memory_variables = self.memory.load_memory_variables({})
return self.memory.predict_new_summary(
messages=memory_variables["chat_history"],
existing_summary=""
)
def clear_memory(self):
self.memory.clear()
|