bmoxi / utils.py
HarshSanghavi's picture
improved tone and system prompt
8b79aed verified
raw
history blame
4.92 kB
import json
import time
from transformers import AutoTokenizer, AutoModel
from langchain_community.chat_models import ChatOpenAI
import pandas as pd
from config import settings
from langchain_core.utils.function_calling import convert_to_openai_function
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.memory import ConversationBufferWindowMemory
from langchain.schema.runnable import RunnablePassthrough
from langchain.agents.format_scratchpad import format_to_openai_functions
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.agents import AgentExecutor
from tools import moxicast, my_calender, my_journal, my_rewards, my_rituals, my_vibecheck, peptalks, sactury, power_zens, affirmations, horoscope, mentoring, influencer_post, recommand_podcast, set_chatbot_name
from database_functions import get_chat_bot_name,get_chat_history, get_last_conversion, get_last_session, get_mood_data,use_tools
def get_mood_summary(user_id):
data = get_mood_data(user_id)
system_prompt = """You are an descripting assistant that provides the breif description of the user data which is related to their mood tracking activity. Properly descibe the reason for their mood.Avoid times and dates in description
Here is the user data: {data}"""
llm = ChatOpenAI(model=settings.OPENAI_MODEL,
openai_api_key=settings.OPENAI_KEY, temperature=0.7)
return llm.invoke(system_prompt.format(data=data)).content
def deanonymizer(input, anonymizer):
input = anonymizer.deanonymize(input)
map = anonymizer.deanonymizer_mapping
if map:
for k in map["PERSON"]:
names = k.split(" ")
for i in names:
input = input.replace(i, map["PERSON"][k])
return input
def get_last_session_summary(last_session_id, second_last_session_id):
conversation = get_last_conversion(last_session_id,second_last_session_id)
if conversation:
system_prompt = """ summarize whole conversation in at max 2 sentence.
conversation: {conversation}
summary:
"""
llm = ChatOpenAI(model=settings.OPENAI_MODEL,
openai_api_key=settings.OPENAI_KEY, temperature=0.7)
response = llm.invoke(system_prompt.format(conversation=conversation)).content
# print("********************************* PREVIOUS PROBLEM *******************************************")
# print(response)
return response
else:
return ""
def create_agent(user_id):
# print("get user Id**********************",user_id)
previous_session_id = get_last_session(user_id)
# print(previous_session_id)
if use_tools(previous_session_id["last_session_id"]):
tools = [moxicast, my_calender, my_journal, my_rewards, my_rituals, my_vibecheck, peptalks, sactury, power_zens, affirmations, horoscope, mentoring, influencer_post, recommand_podcast, set_chatbot_name]
else:
tools = [set_chatbot_name]
functions = [convert_to_openai_function(f) for f in tools]
model = ChatOpenAI(model_name=settings.OPENAI_MODEL,
openai_api_key=settings.OPENAI_KEY, frequency_penalty= 1, temperature=settings.TEMPERATURE).bind(functions=functions)
chat_bot_name = get_chat_bot_name(user_id)
# print("CHABT NAME", chat_bot_name)
start = time.time()
mood_summary = get_mood_summary(user_id)
prevous_problem_summary = None
if previous_session_id['second_last_session_id']:
prevous_problem_summary = get_last_session_summary(previous_session_id['last_session_id'], previous_session_id['second_last_session_id'])
# print("**************************************** SUMMARY ***********************************************")
# print(prevous_problem_summary)
print("time require for mood summary: ",time.time()-start)
prompt = ChatPromptTemplate.from_messages([("system", settings.SYSTEM_PROMPT.format(name = chat_bot_name, mood="", previous_summary=prevous_problem_summary)),
MessagesPlaceholder(variable_name="chat_history"), ("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad")])
memory = ConversationBufferWindowMemory(memory_key="chat_history", chat_memory=get_chat_history(
previous_session_id['last_session_id']), return_messages=True, k=5)
# print("memory created")
chain = RunnablePassthrough.assign(agent_scratchpad=lambda x: format_to_openai_functions(x["intermediate_steps"])) | prompt | model | OpenAIFunctionsAgentOutputParser()
agent_executor = AgentExecutor(
agent=chain, tools=tools, memory=memory, verbose=True)
return agent_executor