code
stringlengths 141
97.3k
| apis
sequencelengths 1
24
| extract_api
stringlengths 113
214k
|
---|---|---|
import langchain as lc
import openai as ai
import datasets as ds
import tiktoken as tk
import os
from langchain_openai import ChatOpenAI
from dotenv import load_dotenv
import os
# Load environment variables from .env file
load_dotenv()
# Get the OpenAI API key from the environment variable
openai_api_key = os.getenv("OPENAI_API_KEY")
if openai_api_key is None:
raise ValueError("No OpenAI API key found. Please set it in the .env file.")
# Initialize the ChatOpenAI with the API key
chat = ChatOpenAI(open_api_key=openai_api_key, model="gpt-3.5-turbo")
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage
)
messages = [
SystemMessage(content="You are a helpful assistant."),
HumanMessage(content="Hi AI, how are you today?."),
AIMessage(content="I am great, thank you. How can I help you?"),
HumanMessage(content="I am looking for a restaurant in the center of Berlin."),
]
| [
"langchain.schema.SystemMessage",
"langchain.schema.AIMessage",
"langchain.schema.HumanMessage",
"langchain_openai.ChatOpenAI"
] | [((224, 237), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (235, 237), False, 'from dotenv import load_dotenv\n'), ((311, 338), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (320, 338), False, 'import os\n'), ((501, 563), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'open_api_key': 'openai_api_key', 'model': '"""gpt-3.5-turbo"""'}), "(open_api_key=openai_api_key, model='gpt-3.5-turbo')\n", (511, 563), False, 'from langchain_openai import ChatOpenAI\n'), ((667, 720), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a helpful assistant."""'}), "(content='You are a helpful assistant.')\n", (680, 720), False, 'from langchain.schema import SystemMessage, HumanMessage, AIMessage\n'), ((726, 776), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""Hi AI, how are you today?."""'}), "(content='Hi AI, how are you today?.')\n", (738, 776), False, 'from langchain.schema import SystemMessage, HumanMessage, AIMessage\n'), ((782, 845), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': '"""I am great, thank you. How can I help you?"""'}), "(content='I am great, thank you. How can I help you?')\n", (791, 845), False, 'from langchain.schema import SystemMessage, HumanMessage, AIMessage\n'), ((851, 929), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""I am looking for a restaurant in the center of Berlin."""'}), "(content='I am looking for a restaurant in the center of Berlin.')\n", (863, 929), False, 'from langchain.schema import SystemMessage, HumanMessage, AIMessage\n')] |
"""Push and pull to the LangChain Hub."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from langchain.load.dump import dumps
from langchain.load.load import loads
from langchain.utils import get_from_env
if TYPE_CHECKING:
from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client:
try:
from langchainhub import Client
except ImportError as e:
raise ImportError(
"Could not import langchainhub, please install with `pip install "
"langchainhub`."
) from e
api_url = api_url or get_from_env("api_url", "LANGCHAIN_HUB_API_URL")
api_key = api_key or get_from_env("api_key", "LANGCHAIN_HUB_API_KEY", default="")
api_key = api_key or get_from_env("api_key", "LANGCHAIN_API_KEY")
return Client(api_url, api_key=api_key)
def push(
repo_full_name: str,
object: Any,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = "latest",
) -> str:
"""
Pushes an object to the hub and returns the URL.
"""
client = _get_client(api_url=api_url, api_key=api_key)
manifest_json = dumps(object)
resp = client.push(
repo_full_name, manifest_json, parent_commit_hash=parent_commit_hash
)
commit_hash: str = resp["commit"]["commit_hash"]
return commit_hash
def pull(
owner_repo_commit: str,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pulls an object from the hub and returns it.
"""
client = _get_client(api_url=api_url, api_key=api_key)
resp: str = client.pull(owner_repo_commit)
return loads(resp)
| [
"langchainhub.Client",
"langchain.utils.get_from_env",
"langchain.load.load.loads",
"langchain.load.dump.dumps"
] | [((862, 894), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (868, 894), False, 'from langchainhub import Client\n'), ((1234, 1247), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1239, 1247), False, 'from langchain.load.dump import dumps\n'), ((1740, 1751), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (1745, 1751), False, 'from langchain.load.load import loads\n'), ((646, 694), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_url"""', '"""LANGCHAIN_HUB_API_URL"""'], {}), "('api_url', 'LANGCHAIN_HUB_API_URL')\n", (658, 694), False, 'from langchain.utils import get_from_env\n'), ((720, 780), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""LANGCHAIN_HUB_API_KEY"""'], {'default': '""""""'}), "('api_key', 'LANGCHAIN_HUB_API_KEY', default='')\n", (732, 780), False, 'from langchain.utils import get_from_env\n'), ((806, 850), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""LANGCHAIN_API_KEY"""'], {}), "('api_key', 'LANGCHAIN_API_KEY')\n", (818, 850), False, 'from langchain.utils import get_from_env\n')] |
from datetime import timedelta
import os
import subprocess
import whisper
import tempfile
import argparse
import langchain
from langchain.chat_models import ChatOpenAI, ChatGooglePalm
from langchain.schema import HumanMessage, SystemMessage, AIMessage
from langchain.prompts import (
ChatPromptTemplate,
PromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.chains import LLMChain
from langchain.callbacks import get_openai_callback
from tqdm import tqdm
def get_translate_chain(from_lang, to_lang):
template=f"You are a helpful assistant that translates {from_lang} to {to_lang}."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template="Please translate \"{text}\""+f" from {from_lang} to {to_lang}. Give me the translated {to_lang} directly without saying anything else, do not use \"."
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
# get a chat completion from the formatted messages
chat = ChatOpenAI()
chain = LLMChain(llm=chat, prompt=chat_prompt, verbose=True)
return chain
def gen_srt(video_path, model_name="medium", from_language="English", to_language="Chinese", embed=False, translate=True):
with tempfile.TemporaryDirectory() as temp_dir:
# 1. use ffmpeg to extract audio from video and save it to Temp folder
# Path to the temporary audio file
temp_audio_path = os.path.join(temp_dir, "extracted_audio.wav")
# Use ffmpeg to extract audio from video
print("Extracting audio from video...")
command = f"ffmpeg -i {video_path} -vn -ar 44100 -ac 2 -b:a 192k {temp_audio_path}"
# Execute the command
subprocess.call(command, shell=True)
model = whisper.load_model(model_name)
transcribe = model.transcribe(audio=temp_audio_path, language=from_language)
segments = transcribe['segments']
# 2. Use whisper to transcribe audio and save segments to srt file
if translate:
with get_openai_callback() as cb:
chain = get_translate_chain(from_language, to_language)
for segment in tqdm(segments):
segment['text'] = chain(segment['text'])['text']
print(cb)
# 3. Generate the SRT file
srtFilename = video_path.split(".")[0] + ".srt"
# overwrite the file if it already exists
if os.path.exists(srtFilename):
os.remove(srtFilename)
for segment in segments:
startTime = str(0)+str(timedelta(seconds=int(segment['start'])))+',000'
endTime = str(0)+str(timedelta(seconds=int(segment['end'])))+',000'
text = segment['text']
segmentId = segment['id']+1
segment = f"{segmentId}\n{startTime} --> {endTime}\n{text[1:] if text[0] == ' ' else text}\n\n"
with open(srtFilename, 'a', encoding='utf-8') as srtFile:
srtFile.write(segment)
# 4. Use FFMPEG to embed srt file into video
if not embed:
return
output_filename = video_path.split(".")[0] + "_subtitled.mp4"
if os.path.exists(output_filename):
os.remove(output_filename)
embed_command = f"ffmpeg -i {video_path} -vf subtitles={srtFilename} {output_filename}"
subprocess.call(embed_command, shell=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some arguments')
# Add the arguments
parser.add_argument('-i', type=str, required=True, dest='input_file',
help='Input file name')
parser.add_argument('-m', type=str, default='medium', dest='model_name',
help='Model type, default is "medium"')
parser.add_argument('-f', type=str, default='English', dest='from_lang',
help='Translate from language, default is "English"')
parser.add_argument('-t', type=str, default='Chinese', dest='to_lang',
help='Translate to language, default is "Chinese"')
parser.add_argument('--embed', dest='embed', action='store_true',
help='Whether to Embed subtitles, default is False')
parser.add_argument('--translate', dest='translate', action='store_true',
help='Whether to Translate, default is False')
args = parser.parse_args()
gen_srt(args.input_file, model_name=args.model_name, embed=args.embed, translate=args.translate, from_language=args.from_lang, to_language=args.to_lang)
| [
"langchain.chains.LLMChain",
"langchain.prompts.SystemMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.callbacks.get_openai_callback",
"langchain.prompts.HumanMessagePromptTemplate.from_template"
] | [((696, 747), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['template'], {}), '(template)\n', (737, 747), False, 'from langchain.prompts import ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((946, 1002), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['human_template'], {}), '(human_template)\n', (986, 1002), False, 'from langchain.prompts import ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((1021, 1100), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_message_prompt, human_message_prompt]'], {}), '([system_message_prompt, human_message_prompt])\n', (1053, 1100), False, 'from langchain.prompts import ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((1169, 1181), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1179, 1181), False, 'from langchain.chat_models import ChatOpenAI, ChatGooglePalm\n'), ((1194, 1246), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'chat', 'prompt': 'chat_prompt', 'verbose': '(True)'}), '(llm=chat, prompt=chat_prompt, verbose=True)\n', (1202, 1246), False, 'from langchain.chains import LLMChain\n'), ((3618, 3679), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some arguments"""'}), "(description='Process some arguments')\n", (3641, 3679), False, 'import argparse\n'), ((1398, 1427), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1425, 1427), False, 'import tempfile\n'), ((1589, 1634), 'os.path.join', 'os.path.join', (['temp_dir', '"""extracted_audio.wav"""'], {}), "(temp_dir, 'extracted_audio.wav')\n", (1601, 1634), False, 'import os\n'), ((1880, 1916), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (1895, 1916), False, 'import subprocess\n'), ((1934, 1964), 'whisper.load_model', 'whisper.load_model', (['model_name'], {}), '(model_name)\n', (1952, 1964), False, 'import whisper\n'), ((2619, 2646), 'os.path.exists', 'os.path.exists', (['srtFilename'], {}), '(srtFilename)\n', (2633, 2646), False, 'import os\n'), ((3357, 3388), 'os.path.exists', 'os.path.exists', (['output_filename'], {}), '(output_filename)\n', (3371, 3388), False, 'import os\n'), ((3533, 3575), 'subprocess.call', 'subprocess.call', (['embed_command'], {'shell': '(True)'}), '(embed_command, shell=True)\n', (3548, 3575), False, 'import subprocess\n'), ((2660, 2682), 'os.remove', 'os.remove', (['srtFilename'], {}), '(srtFilename)\n', (2669, 2682), False, 'import os\n'), ((3402, 3428), 'os.remove', 'os.remove', (['output_filename'], {}), '(output_filename)\n', (3411, 3428), False, 'import os\n'), ((2215, 2236), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2234, 2236), False, 'from langchain.callbacks import get_openai_callback\n'), ((2347, 2361), 'tqdm.tqdm', 'tqdm', (['segments'], {}), '(segments)\n', (2351, 2361), False, 'from tqdm import tqdm\n')] |
from langchain import OpenAI, LLMChain
from langchain.callbacks import StdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from src.agents.chat_chain import ChatChain
from src.agents.graphdb_traversal_chain import GraphDBTraversalChain, mem_query_template, mem_system_message
from src.memory.triple_modal_memory import TripleModalMemory
import os
from dotenv import load_dotenv
# Set up the cache
import langchain
from langchain.cache import SQLiteCache
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
# initialize the memory
load_dotenv()
uri = os.getenv("NEO4J_URI")
user = os.getenv("NEO4J_USER")
password = os.getenv("NEO4J_PASSWORD")
mem = TripleModalMemory(uri, user, password)
# Create memory from docks or load from file if it exists
ingested = os.path.exists('../data/triple_modal_memory.faiss')
if not ingested:
knowledge_path = r'C:\Users\colli\Documents\AIPapers'
mem.ingest_docs(knowledge_path)
mem.save()
print("Memory initialized and saved.")
else:
mem.load()
print("Memory loaded.")
handler = StdOutCallbackHandler()
llm = ChatOpenAI(
model_name="gpt-4", #"gpt-3.5-turbo"
temperature=0,
verbose=True
)
chain = ChatChain(llm=llm, prompt=mem_query_template, callbacks=[handler], system_message=mem_system_message)
knowledge_base_query_agent = GraphDBTraversalChain(llm_chain=chain, graph_vector_store=mem.vector_store)
# Example Research questions:
# What are different methods of providing language models with additional context to better answer questions?
# How can semantic search be used in conjunction with large language models in order to better answer questions?
# What are some techniques for achieving better general intelligence in language models?
def main_loop():
try:
while True:
question = input("Enter a question: ")
print(knowledge_base_query_agent.run(question))
except KeyboardInterrupt:
print("Shutdown: Saving...")
mem.save()
print("Shutdown: Complete")
else:
print("Completed all tasks.")
if __name__ == '__main__':
main_loop() | [
"langchain.callbacks.StdOutCallbackHandler",
"langchain.cache.SQLiteCache",
"langchain.chat_models.ChatOpenAI"
] | [((495, 537), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (506, 537), False, 'from langchain.cache import SQLiteCache\n'), ((563, 576), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (574, 576), False, 'from dotenv import load_dotenv\n'), ((583, 605), 'os.getenv', 'os.getenv', (['"""NEO4J_URI"""'], {}), "('NEO4J_URI')\n", (592, 605), False, 'import os\n'), ((613, 636), 'os.getenv', 'os.getenv', (['"""NEO4J_USER"""'], {}), "('NEO4J_USER')\n", (622, 636), False, 'import os\n'), ((648, 675), 'os.getenv', 'os.getenv', (['"""NEO4J_PASSWORD"""'], {}), "('NEO4J_PASSWORD')\n", (657, 675), False, 'import os\n'), ((683, 721), 'src.memory.triple_modal_memory.TripleModalMemory', 'TripleModalMemory', (['uri', 'user', 'password'], {}), '(uri, user, password)\n', (700, 721), False, 'from src.memory.triple_modal_memory import TripleModalMemory\n'), ((792, 843), 'os.path.exists', 'os.path.exists', (['"""../data/triple_modal_memory.faiss"""'], {}), "('../data/triple_modal_memory.faiss')\n", (806, 843), False, 'import os\n'), ((1074, 1097), 'langchain.callbacks.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (1095, 1097), False, 'from langchain.callbacks import StdOutCallbackHandler\n'), ((1105, 1164), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'temperature': '(0)', 'verbose': '(True)'}), "(model_name='gpt-4', temperature=0, verbose=True)\n", (1115, 1164), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1204, 1309), 'src.agents.chat_chain.ChatChain', 'ChatChain', ([], {'llm': 'llm', 'prompt': 'mem_query_template', 'callbacks': '[handler]', 'system_message': 'mem_system_message'}), '(llm=llm, prompt=mem_query_template, callbacks=[handler],\n system_message=mem_system_message)\n', (1213, 1309), False, 'from src.agents.chat_chain import ChatChain\n'), ((1335, 1410), 'src.agents.graphdb_traversal_chain.GraphDBTraversalChain', 'GraphDBTraversalChain', ([], {'llm_chain': 'chain', 'graph_vector_store': 'mem.vector_store'}), '(llm_chain=chain, graph_vector_store=mem.vector_store)\n', (1356, 1410), False, 'from src.agents.graphdb_traversal_chain import GraphDBTraversalChain, mem_query_template, mem_system_message\n')] |
from __future__ import annotations
import logging
from functools import lru_cache
from typing import List, Optional
import langchain
from langchain.agents import AgentExecutor, Tool, initialize_agent
from langchain.agents.agent_types import AgentType
from langchain.callbacks import get_openai_callback
from langchain.chat_models import ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema.messages import BaseMessage
from langchain_experimental.plan_and_execute import (
PlanAndExecute,
load_agent_executor,
load_chat_planner,
)
from expert_gpts.llms.agent import HUMAN_SUFFIX, SYSTEM_PREFIX, ConvoOutputCustomParser
from shared.llm_manager_base import BaseLLMManager, Cost
from shared.llms.openai import GPT_3_5_TURBO, GPT_4, TEXT_ADA_EMBEDDING
from shared.llms.system_prompts import PLANNER_SYSTEM_PROMPT
langchain.debug = True
logger = logging.getLogger(__name__)
COSTS = {
GPT_3_5_TURBO: Cost(prompt=0.0015, completion=0.002),
GPT_4: Cost(prompt=0.03, completion=0.05),
TEXT_ADA_EMBEDDING: Cost(prompt=0.0001, completion=0.0001),
}
class OpenAIApiManager(BaseLLMManager):
_agents = {}
def __init__(self):
super().__init__(COSTS)
def get_agent_executor(
self,
llm,
agent_type: AgentType = AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
memory: Optional[BaseChatMemory] = None,
tools: Optional[List[Tool]] = None,
system_message: Optional[str] = SYSTEM_PREFIX,
human_message: Optional[str] = HUMAN_SUFFIX,
) -> AgentExecutor:
agent_kwargs = {
"output_parser": ConvoOutputCustomParser(),
}
if system_message:
agent_kwargs["system_message"] = system_message
if human_message:
agent_kwargs["human_message"] = human_message
return initialize_agent(
tools=tools,
llm=llm,
agent=agent_type,
memory=memory,
agent_kwargs=agent_kwargs,
)
def create_chat_completion(
self,
messages: List[BaseMessage], # type: ignore
model: str | None = GPT_3_5_TURBO,
temperature: float = 0,
max_tokens: int | None = None,
deployment_id=None,
openai_api_key=None,
) -> str:
llm = self.get_llm(max_tokens, model, temperature)
with get_openai_callback() as cb:
response = llm(messages, callbacks=[self.callbacks_handler])
self.update_cost(cb)
return response.content
def create_chat_completion_with_agent(
self,
user_input: str, # type: ignore
agent_type: AgentType = AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
model: str | None = GPT_3_5_TURBO,
agent_key: str = "default",
temperature: float = 0,
max_tokens: int | None = None,
memory: Optional[BaseChatMemory] = None,
tools: Optional[List[Tool]] = None,
) -> str:
llm = self.get_llm(max_tokens, model, temperature)
if agent_key not in self._agents:
self._agents[agent_key] = self.get_agent_executor(
llm, agent_type, memory, tools
)
agent = self._agents[agent_key]
with get_openai_callback() as cb:
response = agent.run(input=user_input, callbacks=[self.callbacks_handler])
self.update_cost(cb)
return response
def execute_plan(
self,
user_input: str, # type: ignore
model: str | None = GPT_3_5_TURBO,
agent_key: str = "default_plan",
temperature: float = 0,
max_tokens: int | None = None,
tools: Optional[List[Tool]] = None,
) -> str:
llm = self.get_llm(max_tokens, model, temperature)
if agent_key not in self._agents:
planner = load_chat_planner(llm, system_prompt=PLANNER_SYSTEM_PROMPT)
executor = load_agent_executor(llm, tools, verbose=True)
agent = PlanAndExecute(planner=planner, executor=executor, verbose=True)
self._agents[agent_key] = agent
agent = self._agents[agent_key]
with get_openai_callback() as cb:
response = agent.run(input=user_input, callbacks=[self.callbacks_handler])
self.update_cost(cb)
return response
@lru_cache
def get_llm(
self, max_tokens, model, temperature, as_predictor: bool = False
) -> BaseChatModel:
llm = ChatOpenAI(
model_name=model,
temperature=temperature,
max_tokens=max_tokens,
)
return llm
| [
"langchain_experimental.plan_and_execute.load_chat_planner",
"langchain.agents.initialize_agent",
"langchain.chat_models.ChatOpenAI",
"langchain.callbacks.get_openai_callback",
"langchain_experimental.plan_and_execute.load_agent_executor",
"langchain_experimental.plan_and_execute.PlanAndExecute"
] | [((946, 973), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (963, 973), False, 'import logging\n'), ((1004, 1041), 'shared.llm_manager_base.Cost', 'Cost', ([], {'prompt': '(0.0015)', 'completion': '(0.002)'}), '(prompt=0.0015, completion=0.002)\n', (1008, 1041), False, 'from shared.llm_manager_base import BaseLLMManager, Cost\n'), ((1054, 1088), 'shared.llm_manager_base.Cost', 'Cost', ([], {'prompt': '(0.03)', 'completion': '(0.05)'}), '(prompt=0.03, completion=0.05)\n', (1058, 1088), False, 'from shared.llm_manager_base import BaseLLMManager, Cost\n'), ((1114, 1152), 'shared.llm_manager_base.Cost', 'Cost', ([], {'prompt': '(0.0001)', 'completion': '(0.0001)'}), '(prompt=0.0001, completion=0.0001)\n', (1118, 1152), False, 'from shared.llm_manager_base import BaseLLMManager, Cost\n'), ((1906, 2008), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'tools', 'llm': 'llm', 'agent': 'agent_type', 'memory': 'memory', 'agent_kwargs': 'agent_kwargs'}), '(tools=tools, llm=llm, agent=agent_type, memory=memory,\n agent_kwargs=agent_kwargs)\n', (1922, 2008), False, 'from langchain.agents import AgentExecutor, Tool, initialize_agent\n'), ((4514, 4590), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model', 'temperature': 'temperature', 'max_tokens': 'max_tokens'}), '(model_name=model, temperature=temperature, max_tokens=max_tokens)\n', (4524, 4590), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1683, 1708), 'expert_gpts.llms.agent.ConvoOutputCustomParser', 'ConvoOutputCustomParser', ([], {}), '()\n', (1706, 1708), False, 'from expert_gpts.llms.agent import HUMAN_SUFFIX, SYSTEM_PREFIX, ConvoOutputCustomParser\n'), ((2434, 2455), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2453, 2455), False, 'from langchain.callbacks import get_openai_callback\n'), ((3307, 3328), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (3326, 3328), False, 'from langchain.callbacks import get_openai_callback\n'), ((3890, 3949), 'langchain_experimental.plan_and_execute.load_chat_planner', 'load_chat_planner', (['llm'], {'system_prompt': 'PLANNER_SYSTEM_PROMPT'}), '(llm, system_prompt=PLANNER_SYSTEM_PROMPT)\n', (3907, 3949), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((3973, 4018), 'langchain_experimental.plan_and_execute.load_agent_executor', 'load_agent_executor', (['llm', 'tools'], {'verbose': '(True)'}), '(llm, tools, verbose=True)\n', (3992, 4018), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((4039, 4103), 'langchain_experimental.plan_and_execute.PlanAndExecute', 'PlanAndExecute', ([], {'planner': 'planner', 'executor': 'executor', 'verbose': '(True)'}), '(planner=planner, executor=executor, verbose=True)\n', (4053, 4103), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((4201, 4222), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (4220, 4222), False, 'from langchain.callbacks import get_openai_callback\n')] |
import os
import utils
import traceback
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chains import ConversationChain
from langchain.llms import OpenAI
import langchain
from langchain.cache import InMemoryCache
from langchain.llms import OpenAI
from langchain.chains.conversation.memory import ConversationSummaryBufferMemory,ConversationBufferMemory,ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from embeddings import EmbeddingsManager
from flask import Flask, send_from_directory
import json
import time
import threading
import secrets
import string
import hashlib
from flask import request
from langchain.cache import InMemoryCache,SQLiteCache
import re
import requests
from waitress import serve
from translator import Translator
import sys
from query.discoursequery import DiscourseQuery
from query.embeddingsquery import EmbeddingsQuery
from Summary import Summary
import uuid
from langchain.llms import NLPCloud
from langchain.llms import AI21
from langchain.llms import Cohere
from SmartCache import SmartCache
CONFIG=None
QUERIERS=[]
args=sys.argv
confiFile=args[1] if len(args)>1 else "config.json"
print("Use config file", confiFile)
with open(confiFile, "r") as f:
CONFIG=json.load(f)
EmbeddingsManager.init(CONFIG)
Summary.init(CONFIG)
QUERIERS=[
EmbeddingsQuery(CONFIG),
DiscourseQuery(
CONFIG,CONFIG["JME_HUB_URL"],
searchFilter=CONFIG["JME_HUB_SEARCH_FILTER"],
knowledgeCutoff=CONFIG["JME_HUB_KNOWLEDGE_CUTOFF"]
)
]
Translator.init(CONFIG)
def getAffineDocs(question,context,keywords,shortQuestion, wordSalad=None, unitFilter=None,
maxFragmentsToReturn=3, maxFragmentsToSelect=12,merge=False):
affineDocs=[]
for q in QUERIERS:
print("Get affine docs from",q,"using question",question,"with context",context,"and keywords",keywords)
t=time.time()
v=q.getAffineDocs(
question, context, keywords,shortQuestion, wordSalad, unitFilter,
maxFragmentsToReturn=maxFragmentsToReturn,
maxFragmentsToSelect=maxFragmentsToSelect,
merge=merge
)
print("Completed in",time.time()-t,"seconds.")
if v!=None:
affineDocs.extend(v)
return affineDocs
def rewriteError(error):
if error.startswith("Rate limit reached ") :
return "Rate limit."
def rewrite(question):
# replace app, applet, game, application with simple application
question=re.sub(r"\b(app|applet|game|application)\b", "simple application", question, flags=re.IGNORECASE)
return question
def createChain():
# Backward compatibility
model_name=CONFIG.get("OPENAI_MODEL","text-davinci-003")
llm_name="openai"
########
llmx=CONFIG.get("LLM_MODEL",None) # "openai:text-davinci-003" "cohere:xlarge"
if llmx!=None:
if ":" in llmx:
llm_name,model_name=llmx.split(":")
else:
llm_name,model_name=llmx.split(".")
template = ""
template_path="prompts/"+llm_name+"."+model_name+".txt"
if not os.path.exists(template_path):
template_path="prompts/openai.text-davinci-003.txt"
with open(template_path, "r") as f:
template=f.read()
prompt = PromptTemplate(
input_variables=[ "history", "question", "summaries"],
template=template
)
llm=None
history_length=700
if llm_name=="openai":
max_tokens=512
temperature=0.0
if model_name=="text-davinci-003":
max_tokens=512
elif model_name=="code-davinci-002":
max_tokens=1024
#history_length=1024
llm=OpenAI(
temperature=temperature,
model_name=model_name,
max_tokens=max_tokens,
)
elif llm_name=="cohere":
llm=Cohere(
model=model_name,
max_tokens=700
)
history_length=200
elif llm_name=="ai21":
llm=AI21(
temperature=0.7,
model=model_name,
)
elif llm_name=="nlpcloud":
llm=NLPCloud(
model_name=model_name,
)
else:
raise Exception("Unknown LLM "+llm_name)
print("Use model ",model_name,"from",llm_name)
memory=ConversationSummaryBufferMemory(llm=llm, max_token_limit=history_length,human_prefix="QUESTION",ai_prefix="ANSWER", memory_key="history", input_key="question")
chain = load_qa_with_sources_chain(
llm,
memory=memory,
prompt=prompt,
verbose=True,
)
return chain
def extractQuestionData(question,wordSalad):
shortQuestion=Summary.summarizeMarkdown(question,min_length=100,max_length=1024,withCodeBlocks=False)
context=Summary.summarizeText(wordSalad,min_length=20,max_length=32)
keywords=[]
keywords.extend(Summary.getKeywords(shortQuestion,2))
keywords.extend(Summary.getKeywords(Summary.summarizeText(wordSalad,min_length=10,max_length=20),3))
return [question,shortQuestion,context,keywords,wordSalad]
def queryChain(chain,question):
wordSalad=""
for h in chain.memory.buffer: wordSalad+=h+" "
wordSalad+=" "+question
[question,shortQuestion,context,keywords,wordSalad]=utils.enqueue(lambda :extractQuestionData(question,wordSalad))
affineDocs=utils.enqueue(lambda :getAffineDocs(question,context,keywords,shortQuestion,wordSalad))
print("Found ",len(affineDocs), " affine docs")
print("Q: ", shortQuestion)
output=chain({"input_documents": affineDocs, "question": shortQuestion}, return_only_outputs=True)
print("A :",output)
return output
sessions={}
langchain.llm_cache = SmartCache(CONFIG)#SQLiteCache(database_path=CONFIG["CACHE_PATH"]+"/langchain.db")
def clearSessions():
while True:
time.sleep(60*5)
for session in sessions:
if sessions[session]["timeout"] < time.time():
del sessions[session]
threading.Thread(target=clearSessions).start()
def createSessionSecret():
hex_chars = string.hexdigits
timeHash=hashlib.sha256(str(time.time()).encode("utf-8")).hexdigest()[:12]
return ''.join(secrets.choice(hex_chars) for i in range(64))+timeHash
app = Flask(__name__)
@app.route("/langs")
def langs():
return json.dumps(Translator.getLangs())
@app.route("/session",methods = ['POST'])
def session():
body=request.get_json()
lang=body["lang"] if "lang" in body else "en"
if lang=="auto":
lang="en"
if not "sessionSecret" in body or body["sessionSecret"].strip()=="":
sessionSecret=createSessionSecret()
else:
sessionSecret=body["sessionSecret"]
if sessionSecret not in sessions:
sessions[sessionSecret]={
"chain": createChain(),
"timeout": time.time()+60*30
}
else:
sessions[sessionSecret]["timeout"]=time.time()+60*30
welcomeText=""
welcomeText+=Translator.translate("en", lang,"Hi there! I'm an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics.")
welcomeText+="<br><br>"
welcomeText+="<footer><span class=\"material-symbols-outlined\">tips_and_updates</span><span>"+Translator.translate("en", lang,"This chat bot is intended to provide helpful information, but accuracy is not guaranteed.")+"</span></footer>"
return json.dumps( {
"sessionSecret": sessionSecret,
"helloText":Translator.translate("en",lang,"Who are you?"),
"welcomeText":welcomeText
})
@app.route("/query",methods = ['POST'])
def query():
try:
body=request.get_json()
question=rewrite(body["question"])
lang=body["lang"] if "lang" in body else "en"
if lang == "auto":
lang=Translator.detect(question)
if lang!="en":
question=Translator.translate(lang,"en",question)
if len(question)==0:
raise Exception("Question is empty")
sessionSecret=body["sessionSecret"]
if sessionSecret not in sessions:
return json.dumps({"error": "Session expired"})
chain=sessions[sessionSecret]["chain"]
output=queryChain(chain,question)
if lang!="en":
output["output_text"]=Translator.translate("en",lang,output["output_text"])
#print(chain.memory.buffer)
return json.dumps(output)
except Exception as e:
print(e)
print(traceback.format_exc())
errorStr=str(e)
errorStr=rewriteError(errorStr)
return json.dumps({"error": errorStr})
@app.route('/<path:filename>')
def serveFrontend(filename):
return send_from_directory('frontend/', filename)
@app.route('/')
def serveIndex():
return send_from_directory('frontend/', "index.html")
@app.route('/docs', methods=['POST'])
def docs():
body=request.get_json()
question=body["question"]
maxFragmentsToReturn=int(body.get("maxFragmentsToReturn",3))
maxFragmentsToSelect=int(body.get("maxFragmentsToReturn",6))
wordSalad=body.get("context","")+" "+question
[question,shortQuestion,context,keywords,wordSalad]=utils.enqueue(lambda : extractQuestionData(question,wordSalad))
affineDocs=utils.enqueue(lambda : getAffineDocs(
question,context,keywords,shortQuestion,wordSalad,
maxFragmentsToReturn=maxFragmentsToReturn,
maxFragmentsToSelect=maxFragmentsToSelect
))
plainDocs=[
{
"content":doc.page_content,
"metadata":doc.metadata
} for doc in affineDocs
]
return json.dumps(plainDocs)
serve(app, host="0.0.0.0", port=8080, connection_limit=1000)
| [
"langchain.chains.qa_with_sources.load_qa_with_sources_chain",
"langchain.llms.Cohere",
"langchain.llms.OpenAI",
"langchain.prompts.PromptTemplate",
"langchain.llms.AI21",
"langchain.llms.NLPCloud",
"langchain.chains.conversation.memory.ConversationSummaryBufferMemory"
] | [((5785, 5803), 'SmartCache.SmartCache', 'SmartCache', (['CONFIG'], {}), '(CONFIG)\n', (5795, 5803), False, 'from SmartCache import SmartCache\n'), ((6330, 6345), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (6335, 6345), False, 'from flask import Flask, send_from_directory\n'), ((9830, 9890), 'waitress.serve', 'serve', (['app'], {'host': '"""0.0.0.0"""', 'port': '(8080)', 'connection_limit': '(1000)'}), "(app, host='0.0.0.0', port=8080, connection_limit=1000)\n", (9835, 9890), False, 'from waitress import serve\n'), ((1263, 1275), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1272, 1275), False, 'import json\n'), ((1280, 1310), 'embeddings.EmbeddingsManager.init', 'EmbeddingsManager.init', (['CONFIG'], {}), '(CONFIG)\n', (1302, 1310), False, 'from embeddings import EmbeddingsManager\n'), ((1315, 1335), 'Summary.Summary.init', 'Summary.init', (['CONFIG'], {}), '(CONFIG)\n', (1327, 1335), False, 'from Summary import Summary\n'), ((1591, 1614), 'translator.Translator.init', 'Translator.init', (['CONFIG'], {}), '(CONFIG)\n', (1606, 1614), False, 'from translator import Translator\n'), ((2557, 2659), 're.sub', 're.sub', (['"""\\\\b(app|applet|game|application)\\\\b"""', '"""simple application"""', 'question'], {'flags': 're.IGNORECASE'}), "('\\\\b(app|applet|game|application)\\\\b', 'simple application',\n question, flags=re.IGNORECASE)\n", (2563, 2659), False, 'import re\n'), ((3341, 3432), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['history', 'question', 'summaries']", 'template': 'template'}), "(input_variables=['history', 'question', 'summaries'],\n template=template)\n", (3355, 3432), False, 'from langchain.prompts import PromptTemplate\n'), ((4371, 4540), 'langchain.chains.conversation.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'llm': 'llm', 'max_token_limit': 'history_length', 'human_prefix': '"""QUESTION"""', 'ai_prefix': '"""ANSWER"""', 'memory_key': '"""history"""', 'input_key': '"""question"""'}), "(llm=llm, max_token_limit=history_length,\n human_prefix='QUESTION', ai_prefix='ANSWER', memory_key='history',\n input_key='question')\n", (4402, 4540), False, 'from langchain.chains.conversation.memory import ConversationSummaryBufferMemory, ConversationBufferMemory, ConversationBufferWindowMemory\n'), ((4543, 4618), 'langchain.chains.qa_with_sources.load_qa_with_sources_chain', 'load_qa_with_sources_chain', (['llm'], {'memory': 'memory', 'prompt': 'prompt', 'verbose': '(True)'}), '(llm, memory=memory, prompt=prompt, verbose=True)\n', (4569, 4618), False, 'from langchain.chains.qa_with_sources import load_qa_with_sources_chain\n'), ((4748, 4842), 'Summary.Summary.summarizeMarkdown', 'Summary.summarizeMarkdown', (['question'], {'min_length': '(100)', 'max_length': '(1024)', 'withCodeBlocks': '(False)'}), '(question, min_length=100, max_length=1024,\n withCodeBlocks=False)\n', (4773, 4842), False, 'from Summary import Summary\n'), ((4849, 4911), 'Summary.Summary.summarizeText', 'Summary.summarizeText', (['wordSalad'], {'min_length': '(20)', 'max_length': '(32)'}), '(wordSalad, min_length=20, max_length=32)\n', (4870, 4911), False, 'from Summary import Summary\n'), ((6497, 6515), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (6513, 6515), False, 'from flask import request\n'), ((7046, 7280), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""Hi there! I\'m an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics."""'], {}), '(\'en\', lang,\n "Hi there! I\'m an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics."\n )\n', (7066, 7280), False, 'from translator import Translator\n'), ((8890, 8932), 'flask.send_from_directory', 'send_from_directory', (['"""frontend/"""', 'filename'], {}), "('frontend/', filename)\n", (8909, 8932), False, 'from flask import Flask, send_from_directory\n'), ((8979, 9025), 'flask.send_from_directory', 'send_from_directory', (['"""frontend/"""', '"""index.html"""'], {}), "('frontend/', 'index.html')\n", (8998, 9025), False, 'from flask import Flask, send_from_directory\n'), ((9086, 9104), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (9102, 9104), False, 'from flask import request\n'), ((9806, 9827), 'json.dumps', 'json.dumps', (['plainDocs'], {}), '(plainDocs)\n', (9816, 9827), False, 'import json\n'), ((1359, 1382), 'query.embeddingsquery.EmbeddingsQuery', 'EmbeddingsQuery', (['CONFIG'], {}), '(CONFIG)\n', (1374, 1382), False, 'from query.embeddingsquery import EmbeddingsQuery\n'), ((1392, 1545), 'query.discoursequery.DiscourseQuery', 'DiscourseQuery', (['CONFIG', "CONFIG['JME_HUB_URL']"], {'searchFilter': "CONFIG['JME_HUB_SEARCH_FILTER']", 'knowledgeCutoff': "CONFIG['JME_HUB_KNOWLEDGE_CUTOFF']"}), "(CONFIG, CONFIG['JME_HUB_URL'], searchFilter=CONFIG[\n 'JME_HUB_SEARCH_FILTER'], knowledgeCutoff=CONFIG[\n 'JME_HUB_KNOWLEDGE_CUTOFF'])\n", (1406, 1545), False, 'from query.discoursequery import DiscourseQuery\n'), ((1943, 1954), 'time.time', 'time.time', ([], {}), '()\n', (1952, 1954), False, 'import time\n'), ((3165, 3194), 'os.path.exists', 'os.path.exists', (['template_path'], {}), '(template_path)\n', (3179, 3194), False, 'import os\n'), ((3764, 3841), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': 'temperature', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, model_name=model_name, max_tokens=max_tokens)\n', (3770, 3841), False, 'from langchain.llms import OpenAI\n'), ((4946, 4983), 'Summary.Summary.getKeywords', 'Summary.getKeywords', (['shortQuestion', '(2)'], {}), '(shortQuestion, 2)\n', (4965, 4983), False, 'from Summary import Summary\n'), ((5914, 5932), 'time.sleep', 'time.sleep', (['(60 * 5)'], {}), '(60 * 5)\n', (5924, 5932), False, 'import time\n'), ((6061, 6099), 'threading.Thread', 'threading.Thread', ([], {'target': 'clearSessions'}), '(target=clearSessions)\n', (6077, 6099), False, 'import threading\n'), ((6407, 6428), 'translator.Translator.getLangs', 'Translator.getLangs', ([], {}), '()\n', (6426, 6428), False, 'from translator import Translator\n'), ((7801, 7819), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (7817, 7819), False, 'from flask import request\n'), ((8605, 8623), 'json.dumps', 'json.dumps', (['output'], {}), '(output)\n', (8615, 8623), False, 'import json\n'), ((3930, 3970), 'langchain.llms.Cohere', 'Cohere', ([], {'model': 'model_name', 'max_tokens': '(700)'}), '(model=model_name, max_tokens=700)\n', (3936, 3970), False, 'from langchain.llms import Cohere\n'), ((5024, 5086), 'Summary.Summary.summarizeText', 'Summary.summarizeText', (['wordSalad'], {'min_length': '(10)', 'max_length': '(20)'}), '(wordSalad, min_length=10, max_length=20)\n', (5045, 5086), False, 'from Summary import Summary\n'), ((6992, 7003), 'time.time', 'time.time', ([], {}), '()\n', (7001, 7003), False, 'import time\n'), ((7398, 7532), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""This chat bot is intended to provide helpful information, but accuracy is not guaranteed."""'], {}), "('en', lang,\n 'This chat bot is intended to provide helpful information, but accuracy is not guaranteed.'\n )\n", (7418, 7532), False, 'from translator import Translator\n'), ((7636, 7684), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""Who are you?"""'], {}), "('en', lang, 'Who are you?')\n", (7656, 7684), False, 'from translator import Translator\n'), ((7971, 7998), 'translator.Translator.detect', 'Translator.detect', (['question'], {}), '(question)\n', (7988, 7998), False, 'from translator import Translator\n'), ((8044, 8086), 'translator.Translator.translate', 'Translator.translate', (['lang', '"""en"""', 'question'], {}), "(lang, 'en', question)\n", (8064, 8086), False, 'from translator import Translator\n'), ((8290, 8330), 'json.dumps', 'json.dumps', (["{'error': 'Session expired'}"], {}), "({'error': 'Session expired'})\n", (8300, 8330), False, 'import json\n'), ((8499, 8554), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', "output['output_text']"], {}), "('en', lang, output['output_text'])\n", (8519, 8554), False, 'from translator import Translator\n'), ((8785, 8816), 'json.dumps', 'json.dumps', (["{'error': errorStr}"], {}), "({'error': errorStr})\n", (8795, 8816), False, 'import json\n'), ((2241, 2252), 'time.time', 'time.time', ([], {}), '()\n', (2250, 2252), False, 'import time\n'), ((4072, 4111), 'langchain.llms.AI21', 'AI21', ([], {'temperature': '(0.7)', 'model': 'model_name'}), '(temperature=0.7, model=model_name)\n', (4076, 4111), False, 'from langchain.llms import AI21\n'), ((6010, 6021), 'time.time', 'time.time', ([], {}), '()\n', (6019, 6021), False, 'import time\n'), ((6267, 6292), 'secrets.choice', 'secrets.choice', (['hex_chars'], {}), '(hex_chars)\n', (6281, 6292), False, 'import secrets\n'), ((6911, 6922), 'time.time', 'time.time', ([], {}), '()\n', (6920, 6922), False, 'import time\n'), ((8682, 8704), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8702, 8704), False, 'import traceback\n'), ((4193, 4224), 'langchain.llms.NLPCloud', 'NLPCloud', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (4201, 4224), False, 'from langchain.llms import NLPCloud\n'), ((6201, 6212), 'time.time', 'time.time', ([], {}), '()\n', (6210, 6212), False, 'import time\n')] |
import os
import streamlit as st
from PyPDF2 import PdfReader
import langchain
langchain.verbose = False
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
import requests
from bs4 import BeautifulSoup
def webscrap(name):
# Replace this URL with the one you want to scrape
url = f'https://www.{name}.com'
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
page_text = soup.get_text()
return page_text
else:
return None
def main():
print(os.getenv('OPENAI_API_KEY'))
st.set_page_config(page_title="Webscrap chatbot")
st.header("Webscrap chatbot")
name = st.text_input("enter website name")
web_data= webscrap(name)
if web_data is not None:
text = web_data
# for page in pdf_reader.pages:
# text += page.extract_text()
max_length = 1800
original_string = text
temp_string = ""
strings_list = []
for character in original_string:
if len(temp_string) < max_length:
temp_string += character
else:
strings_list.append(temp_string)
temp_string = ""
if temp_string:
strings_list.append(temp_string)
#split into chunks
# create embeddings
embeddings = OpenAIEmbeddings()
knowledge_base = FAISS.from_texts(strings_list, embedding=embeddings)
user_question = st.text_input("Ask a question about your PDF")
if user_question:
docs = knowledge_base.similarity_search(user_question)
llm = OpenAI(model_name="gpt-3.5-turbo", temperature=0.9)
chain = load_qa_chain(llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents = docs, question = user_question)
print(cb)
st.write(response)
if __name__ == '__main__':
main()
| [
"langchain.vectorstores.FAISS.from_texts",
"langchain.callbacks.get_openai_callback",
"langchain.llms.OpenAI",
"langchain.chains.question_answering.load_qa_chain",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((583, 600), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (595, 600), False, 'import requests\n'), ((853, 902), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Webscrap chatbot"""'}), "(page_title='Webscrap chatbot')\n", (871, 902), True, 'import streamlit as st\n'), ((907, 936), 'streamlit.header', 'st.header', (['"""Webscrap chatbot"""'], {}), "('Webscrap chatbot')\n", (916, 936), True, 'import streamlit as st\n'), ((949, 984), 'streamlit.text_input', 'st.text_input', (['"""enter website name"""'], {}), "('enter website name')\n", (962, 984), True, 'import streamlit as st\n'), ((653, 696), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (666, 696), False, 'from bs4 import BeautifulSoup\n'), ((815, 842), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (824, 842), False, 'import os\n'), ((1660, 1678), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1676, 1678), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1704, 1756), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', (['strings_list'], {'embedding': 'embeddings'}), '(strings_list, embedding=embeddings)\n', (1720, 1756), False, 'from langchain.vectorstores import FAISS\n'), ((1782, 1828), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about your PDF"""'], {}), "('Ask a question about your PDF')\n", (1795, 1828), True, 'import streamlit as st\n'), ((1954, 2005), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.9)'}), "(model_name='gpt-3.5-turbo', temperature=0.9)\n", (1960, 2005), False, 'from langchain.llms import OpenAI\n'), ((2027, 2065), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (2040, 2065), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((2238, 2256), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (2246, 2256), True, 'import streamlit as st\n'), ((2083, 2104), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2102, 2104), False, 'from langchain.callbacks import get_openai_callback\n')] |
# Wrapper for Hugging Face APIs for llmlib
from llmlib.base_model_wrapper import BaseModelWrapper
from llama_index import ListIndex, SimpleDirectoryReader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding, ServiceContext
from llama_index import ListIndex, PromptHelper, SimpleDirectoryReader
from transformers import pipeline
import torch
from langchain.llms.base import LLM
from llama_index import LLMPredictor
from pprint import pprint
class CustomLLM(LLM):
model_name = "facebook/opt-iml-1.3b"
# I am not using a GPU, but you can add device="cuda:0"
# to the pipeline call if you have a local GPU or
# are running this on Google Colab:
pipeline = pipeline("text-generation",
model=model_name,
model_kwargs={"torch_dtype":torch.bfloat16})
def _call(self, prompt, stop = None):
prompt_length = len(prompt)
response = self.pipeline(prompt, max_new_tokens=200)
pprint(response)
first_response = response[0]["generated_text"]
# only return newly generated tokens
returned_text = first_response[prompt_length:]
return returned_text
@property
def _identifying_params(self):
return {"name_of_model": self.model_name}
@property
def _llm_type(self):
return "custom"
class HuggingFaceAiWrapper(BaseModelWrapper):
def __init__(self, key=None, embeddings_dir="./db_embeddings"):
super().__init__(embeddings_dir=embeddings_dir)
self.llm_predictor = LLMPredictor(llm=CustomLLM())
self.embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
self.service_context = \
ServiceContext.from_defaults(llm_predictor=self.llm_predictor,
embed_model=self.embed_model)
max_input_size = 512
num_output = 64
max_chunk_overlap = 0 # 10
self.prompt_helper = PromptHelper(max_input_size, num_output,
max_chunk_overlap)
self.pipeline = None
# complete text:
def get_completion(self, prompt, max_tokens=64):
if self.pipeline is None:
self.pipeline = pipeline("text-generation",
model="facebook/opt-iml-1.3b",
model_kwargs={"torch_dtype":torch.bfloat16})
c = self.pipeline(prompt, max_new_tokens=max_tokens)
pprint(c)
try:
return c[0]["generated_text"]
except Exception as e:
print(e)
return ""
def create_local_embeddings_files_in_dir(self, path):
" path is a directory "
self.documents = SimpleDirectoryReader(path).load_data()
self.index = ListIndex.from_documents(documents=self.documents,
llm_predictor=self.llm_predictor,
prompt_helper=self.prompt_helper)
self.index = self.index.as_query_engine(llm_predictor=self.llm_predictor)
# query local embeddings:
def query_local_embeddings(self, query, n=10):
answer = self.index.query(query)
return answer | [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings"
] | [((735, 830), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'model_name', 'model_kwargs': "{'torch_dtype': torch.bfloat16}"}), "('text-generation', model=model_name, model_kwargs={'torch_dtype':\n torch.bfloat16})\n", (743, 830), False, 'from transformers import pipeline\n'), ((1022, 1038), 'pprint.pprint', 'pprint', (['response'], {}), '(response)\n', (1028, 1038), False, 'from pprint import pprint\n'), ((1731, 1828), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'self.llm_predictor', 'embed_model': 'self.embed_model'}), '(llm_predictor=self.llm_predictor, embed_model=\n self.embed_model)\n', (1759, 1828), False, 'from llama_index import LangchainEmbedding, ServiceContext\n'), ((1980, 2039), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (1992, 2039), False, 'from llama_index import ListIndex, PromptHelper, SimpleDirectoryReader\n'), ((2498, 2507), 'pprint.pprint', 'pprint', (['c'], {}), '(c)\n', (2504, 2507), False, 'from pprint import pprint\n'), ((2823, 2946), 'llama_index.ListIndex.from_documents', 'ListIndex.from_documents', ([], {'documents': 'self.documents', 'llm_predictor': 'self.llm_predictor', 'prompt_helper': 'self.prompt_helper'}), '(documents=self.documents, llm_predictor=self.\n llm_predictor, prompt_helper=self.prompt_helper)\n', (2847, 2946), False, 'from llama_index import ListIndex, PromptHelper, SimpleDirectoryReader\n'), ((1663, 1686), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {}), '()\n', (1684, 1686), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((2248, 2357), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': '"""facebook/opt-iml-1.3b"""', 'model_kwargs': "{'torch_dtype': torch.bfloat16}"}), "('text-generation', model='facebook/opt-iml-1.3b', model_kwargs={\n 'torch_dtype': torch.bfloat16})\n", (2256, 2357), False, 'from transformers import pipeline\n'), ((2762, 2789), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {}), '(path)\n', (2783, 2789), False, 'from llama_index import ListIndex, PromptHelper, SimpleDirectoryReader\n')] |
import logging
import ConsoleInterface
import langchain.schema
from langchain.agents import initialize_agent, AgentType #create_pandas_dataframe_agent
logger = logging.getLogger('ConsoleInterface')
'''
def PandasDataframeAgent(llm, Dataframe):
"""
Create a PandasDataframeAgent object.
Parameters:
llm (str): The llm parameter.
Dataframe (pandas.DataFrame): The DataFrame parameter.
Returns:
PandasDataframeAgent: The created PandasDataframeAgent object.
"""
PandasDataframeAgent = create_pandas_dataframe_agent(llm, df=Dataframe, verbose=True)
return PandasDataframeAgent
'''
def RunConversationalAgent(llm, Tools, Memory):
"""
Run the conversational agent.
Args:
llm: The language model used by the agent.
Tools: The tools available to the agent.
Memory: The memory used by the agent.
Returns:
None
"""
initialize_agent
Agent = initialize_agent(agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, llm=llm, tools=Tools, memory=Memory, verbose=True)
logger.info("Agent initialized successfully!\n")
while True:
query = input("Enter your query: ")
if query.lower() == "exit" or query.lower() == "quit":
break
try:
Agent.run(str(query))
except langchain.schema.OutputParserException as e:
# Extract the message from the exception
message = str(e)
# The message is in the form "Could not parse LLM output: `...`"
# So, we can split it by the backticks and take the second element
answer = message.split('`')[1]
logger.warning("\nError occured in retrieving answer from language model. Please check your query and try again. Answer stored in error message will be printed:\n")
logger.warning("\nAnswer: ", answer) | [
"langchain.agents.initialize_agent"
] | [((165, 202), 'logging.getLogger', 'logging.getLogger', (['"""ConsoleInterface"""'], {}), "('ConsoleInterface')\n", (182, 202), False, 'import logging\n'), ((946, 1067), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'agent': 'AgentType.CONVERSATIONAL_REACT_DESCRIPTION', 'llm': 'llm', 'tools': 'Tools', 'memory': 'Memory', 'verbose': '(True)'}), '(agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, llm=llm,\n tools=Tools, memory=Memory, verbose=True)\n', (962, 1067), False, 'from langchain.agents import initialize_agent, AgentType\n')] |
import csv
from ctypes import Array
from typing import Any, Coroutine, List, Tuple
import io
import time
import re
import os
from fastapi import UploadFile
import asyncio
import langchain
from langchain.chat_models import ChatOpenAI
from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent
from langchain.tools import HumanInputRun, PythonAstREPLTool
from langchain.callbacks.tracers import ConsoleCallbackHandler
from langchain.callbacks import HumanApprovalCallbackHandler
from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory
from langchain import PromptTemplate
import pandas as pd
from langchain.output_parsers import PydanticOutputParser, OutputFixingParser
from util.tools import SessionHumanInputRun
import util.config as config
from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue
import redis
r = redis.from_url(os.environ.get("REDIS_URL"))
#r = redis.from_url('redis://:password@localhost:6379')
class Processor:
def __init__(self, session):
self.session = session
async def extract_csv_description(self, df: UploadFile|str, llm, memory) -> Coroutine[Any, Any, Tuple[pd.DataFrame, str]] :
df = pd.read_csv(df)
agent = create_pandas_dataframe_agent(llm=llm,df=df, agent_executor_kwargs={'handle_parsing_errors':True, 'memory':memory},
early_stopping_method="generate", verbose=True,
temperature=0,agent_type=AgentType.OPENAI_FUNCTIONS,)
descriptions = agent.run("""Describe what is the column name of each of the column table in detail in the following format:
<name of column 1>: <description of column 1>\n
<name of column 2>: <description of column 2>""", callbacks=[ConsoleCallbackHandler()])
return df, descriptions
async def _human_prompt(prompt, session):
r.publish(f'human_prompt_{session}', prompt)
async def _human_input(session):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe(f'human_input_{session}')
message = None
while True:
message = p.get_message()
if message and message['type']=='message':
break
print("waiting for human input")
await asyncio.sleep(1)
return message['data'].decode('utf-8')
async def process_files(self, table_file, template_file, file_guid):
table_string = table_file.decode('utf-8')
template_string = template_file.decode('utf-8')
llm = ChatOpenAI(openai_api_key=config.OPENAI_API_KEY, temperature=0, model="gpt-3.5-turbo-0613", )
memory = ConversationSummaryBufferMemory(llm=llm,memory_key="chat_history", return_messages=True, max_token_limit=1500)
table_df, table_descriptions = await self.extract_csv_description(io.StringIO(table_string), llm, memory=memory)
r.publish(f'{self.session}_response', 'table_descriptions')
r.publish(f'{self.session}_response', table_descriptions)
template_df, template_descriptions = await self.extract_csv_description(io.StringIO(template_string), llm, memory=memory)
r.publish(f'{self.session}_response', 'template_descriptions')
r.publish(f'{self.session}_response', template_descriptions)
dfs =[table_df, template_df]
human_tool = SessionHumanInputRun(session=self.session)
human_tool.description = '''
Use this tool to take human input.
If the mapping is ambiguous, ask 'human' a question with options in the following format.
Make the human confirm the mapping by selecting the appropriate number.
- Question: The template column <template column name> should be mapped to which one of the table columns
(1: <table column name 1>, 2: <table column name 2> (Recommended), 3:<table column name 3>, ...)? Select the appropriate number or specify the column name.
'''
human_tool.prompt_func= Processor._human_prompt
human_tool.input_func = Processor._human_input
mappings = await self.get_mappings(llm, table_descriptions, template_descriptions, human_tool)
codes = await self.get_template_formatting_code(llm, table_df, template_df, human_tool, mappings, memory)
new_table_df = table_df.loc[:,[code.table_column for code in codes]]
for code in codes:
new_table_df[code.table_column].apply(lambda x: self.format_value(x,code=code.code))
r.set(f"{self.session}_{file_guid}", new_table_df.to_msgpack(compress='zlib'))
r.publish(f'{self.session}_response', f'file_guid:{file_guid}')
def format_value(self, source_value, code):
value = TransformValue(source=source_value,destination=source_value)
try:
exec(code, {'value':value})
except Exception as e:
r.publish(f'{self.session}_response',f'ERROR: \nCode: \n {code} \n Failed with error: \n{e}')
print(e)
return value.destination
async def get_mappings(self,llm, table_descriptions, template_descriptions, human_tool):
parser = PydanticOutputParser(pydantic_object=TemplateMappingList)
new_parser = OutputFixingParser.from_llm(parser=parser,llm=llm)
agent = initialize_agent(
[human_tool],
llm,
agent=AgentType.OPENAI_FUNCTIONS,
handle_parsing_errors=True,
early_stopping_method="force",
temperature=0.3,
output_parser=new_parser,
)
descriptions = await agent.arun("""Map all the columns of the Template descriptions to columns of the table Descriptions:
- Table Descriptions:
""" + table_descriptions + """
- Template Descriptions:
""" + template_descriptions + """
Use the table and template descriptions above to determine the mapping based on similarity, formats and distribution.
If the table column names are ambiguous take human input.
""",callbacks=[ConsoleCallbackHandler()],)
print(descriptions)
mappings = new_parser.parse(descriptions)
return mappings
async def get_template_formatting_code(self, llm, table_df, template_df, human_tool, mappings: TemplateMappingList, memory):
dfs = []
dfs.append(table_df)
dfs.append(template_df)
df_locals = {}
df_locals[f"table_df"] = table_df
df_locals[f"template_df"] = template_df
parser = PydanticOutputParser(pydantic_object=TemplateMappingCode)
new_parser = OutputFixingParser.from_llm(parser=parser,llm=llm)
codes=[]
#The code should be in the format of a Python function taking as input a string and returning a string.
for mapping in mappings.template_mappings:
human_tool.description = f'''
Use this tool to get human approval. Always show the samples and code. The human can edit the code and approve it.
'''
table_df_samples = table_df[mapping.table_column].sample(5).to_list()
template_df_samples = template_df[mapping.template_column].sample(5).to_list()
agent = initialize_agent(
[PythonAstREPLTool(locals=df_locals)],
llm,
agent=AgentType.OPENAI_FUNCTIONS,
handle_parsing_errors=True,
early_stopping_method="force",
temperature=0.3,
output_parser=new_parser,
memory = memory,
memory_key = 'chat_history'
)
#The AI can determine the format of the column values only after sampling.
#As shown in the output below, generate the code as a Python function taking as input a string and returning a string and also include a call to the generated function.
code = agent.run(f'''Provide the code to bring the format of values in table_df column '{mapping.table_column}'
to the format of values in template_df column '{mapping.template_column}' based off the values, data types and formats.
Additional samples to be used to generate the code:
'{mapping.table_column}' sample values: [{table_df_samples}]
'{mapping.template_column}' samples values: [{template_df_samples}]
The input to the code will be a value object with the following attributes:
- source: The value of the table_df column '{mapping.table_column}'.
- destination: The value of the template_df column '{mapping.template_column}'.
Show the sample values using which the code is generated.
For example, for date columns, they may be in different formats, and it is necessary to change the format from dd.mm.yyyy to mm.dd.yyyy.
Final Answer:
```
```python
def format_value(source_value):
<code to transform source_value into destination_value>
return destination_value
value.destination = format_value(value.source)
```
```
Final Answer should contain the samples and code.
''', callbacks=[ConsoleCallbackHandler(), ])
print(code)
human_code = await human_tool.arun(code + '\nSpecify the code with ```python``` tags.')
regex = r"```python((.|\n|\t)*?)```"
code = human_code if re.match(regex, human_code) else code
matches = re.findall(regex, code)
code = ''
for match in matches:
code = code + '\n'+ '\n'.join(match)
codes.append(TemplateMappingCode(template_column=mapping.template_column,
table_column=mapping.table_column,
code=code))
return codes | [
"langchain.agents.initialize_agent",
"langchain.chat_models.ChatOpenAI",
"langchain.tools.PythonAstREPLTool",
"langchain.memory.ConversationSummaryBufferMemory",
"langchain.callbacks.tracers.ConsoleCallbackHandler",
"langchain.agents.create_pandas_dataframe_agent",
"langchain.output_parsers.OutputFixingParser.from_llm",
"langchain.output_parsers.PydanticOutputParser"
] | [((963, 990), 'os.environ.get', 'os.environ.get', (['"""REDIS_URL"""'], {}), "('REDIS_URL')\n", (977, 990), False, 'import os\n'), ((1270, 1285), 'pandas.read_csv', 'pd.read_csv', (['df'], {}), '(df)\n', (1281, 1285), True, 'import pandas as pd\n'), ((1302, 1537), 'langchain.agents.create_pandas_dataframe_agent', 'create_pandas_dataframe_agent', ([], {'llm': 'llm', 'df': 'df', 'agent_executor_kwargs': "{'handle_parsing_errors': True, 'memory': memory}", 'early_stopping_method': '"""generate"""', 'verbose': '(True)', 'temperature': '(0)', 'agent_type': 'AgentType.OPENAI_FUNCTIONS'}), "(llm=llm, df=df, agent_executor_kwargs={\n 'handle_parsing_errors': True, 'memory': memory}, early_stopping_method\n ='generate', verbose=True, temperature=0, agent_type=AgentType.\n OPENAI_FUNCTIONS)\n", (1331, 1537), False, 'from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent\n'), ((2683, 2779), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'config.OPENAI_API_KEY', 'temperature': '(0)', 'model': '"""gpt-3.5-turbo-0613"""'}), "(openai_api_key=config.OPENAI_API_KEY, temperature=0, model=\n 'gpt-3.5-turbo-0613')\n", (2693, 2779), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2794, 2909), 'langchain.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'llm': 'llm', 'memory_key': '"""chat_history"""', 'return_messages': '(True)', 'max_token_limit': '(1500)'}), "(llm=llm, memory_key='chat_history',\n return_messages=True, max_token_limit=1500)\n", (2825, 2909), False, 'from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory\n'), ((3498, 3540), 'util.tools.SessionHumanInputRun', 'SessionHumanInputRun', ([], {'session': 'self.session'}), '(session=self.session)\n', (3518, 3540), False, 'from util.tools import SessionHumanInputRun\n'), ((4868, 4929), 'util.model.TransformValue', 'TransformValue', ([], {'source': 'source_value', 'destination': 'source_value'}), '(source=source_value, destination=source_value)\n', (4882, 4929), False, 'from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue\n'), ((5288, 5345), 'langchain.output_parsers.PydanticOutputParser', 'PydanticOutputParser', ([], {'pydantic_object': 'TemplateMappingList'}), '(pydantic_object=TemplateMappingList)\n', (5308, 5345), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((5367, 5418), 'langchain.output_parsers.OutputFixingParser.from_llm', 'OutputFixingParser.from_llm', ([], {'parser': 'parser', 'llm': 'llm'}), '(parser=parser, llm=llm)\n', (5394, 5418), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((5434, 5614), 'langchain.agents.initialize_agent', 'initialize_agent', (['[human_tool]', 'llm'], {'agent': 'AgentType.OPENAI_FUNCTIONS', 'handle_parsing_errors': '(True)', 'early_stopping_method': '"""force"""', 'temperature': '(0.3)', 'output_parser': 'new_parser'}), "([human_tool], llm, agent=AgentType.OPENAI_FUNCTIONS,\n handle_parsing_errors=True, early_stopping_method='force', temperature=\n 0.3, output_parser=new_parser)\n", (5450, 5614), False, 'from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent\n'), ((6899, 6956), 'langchain.output_parsers.PydanticOutputParser', 'PydanticOutputParser', ([], {'pydantic_object': 'TemplateMappingCode'}), '(pydantic_object=TemplateMappingCode)\n', (6919, 6956), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((6978, 7029), 'langchain.output_parsers.OutputFixingParser.from_llm', 'OutputFixingParser.from_llm', ([], {'parser': 'parser', 'llm': 'llm'}), '(parser=parser, llm=llm)\n', (7005, 7029), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((10532, 10555), 're.findall', 're.findall', (['regex', 'code'], {}), '(regex, code)\n', (10542, 10555), False, 'import re\n'), ((2425, 2441), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (2438, 2441), False, 'import asyncio\n'), ((2988, 3013), 'io.StringIO', 'io.StringIO', (['table_string'], {}), '(table_string)\n', (2999, 3013), False, 'import io\n'), ((3250, 3278), 'io.StringIO', 'io.StringIO', (['template_string'], {}), '(template_string)\n', (3261, 3278), False, 'import io\n'), ((10459, 10486), 're.match', 're.match', (['regex', 'human_code'], {}), '(regex, human_code)\n', (10467, 10486), False, 'import re\n'), ((10690, 10801), 'util.model.TemplateMappingCode', 'TemplateMappingCode', ([], {'template_column': 'mapping.template_column', 'table_column': 'mapping.table_column', 'code': 'code'}), '(template_column=mapping.template_column, table_column=\n mapping.table_column, code=code)\n', (10709, 10801), False, 'from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue\n'), ((1905, 1929), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (1927, 1929), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n'), ((7662, 7697), 'langchain.tools.PythonAstREPLTool', 'PythonAstREPLTool', ([], {'locals': 'df_locals'}), '(locals=df_locals)\n', (7679, 7697), False, 'from langchain.tools import HumanInputRun, PythonAstREPLTool\n'), ((6408, 6432), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (6430, 6432), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n'), ((10224, 10248), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (10246, 10248), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n')] |
from typing import Dict, List, Optional
from langchain.agents.load_tools import (
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from langflow.custom import customs
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.tools.constants import (
ALL_TOOLS_NAMES,
CUSTOM_TOOLS,
FILE_TOOLS,
OTHER_TOOLS,
)
from langflow.interface.tools.util import get_tool_params
from langflow.settings import settings
from langflow.template.field.base import TemplateField
from langflow.template.template.base import Template
from langflow.utils import util
from langflow.utils.util import build_template_from_class
TOOL_INPUTS = {
"str": TemplateField(
field_type="str",
required=True,
is_list=False,
show=True,
placeholder="",
value="",
),
"llm": TemplateField(
field_type="BaseLanguageModel", required=True, is_list=False, show=True
),
"func": TemplateField(
field_type="function",
required=True,
is_list=False,
show=True,
multiline=True,
),
"code": TemplateField(
field_type="str",
required=True,
is_list=False,
show=True,
value="",
multiline=True,
),
"path": TemplateField(
field_type="file",
required=True,
is_list=False,
show=True,
value="",
suffixes=[".json", ".yaml", ".yml"],
fileTypes=["json", "yaml", "yml"],
),
}
class ToolCreator(LangChainTypeCreator):
type_name: str = "tools"
tools_dict: Optional[Dict] = None
@property
def type_to_loader_dict(self) -> Dict:
if self.tools_dict is None:
all_tools = {}
for tool, tool_fcn in ALL_TOOLS_NAMES.items():
tool_params = get_tool_params(tool_fcn)
tool_name = tool_params.get("name") or tool
if tool_name in settings.tools or settings.dev:
if tool_name == "JsonSpec":
tool_params["path"] = tool_params.pop("dict_") # type: ignore
all_tools[tool_name] = {
"type": tool,
"params": tool_params,
"fcn": tool_fcn,
}
self.tools_dict = all_tools
return self.tools_dict
def get_signature(self, name: str) -> Optional[Dict]:
"""Get the signature of a tool."""
base_classes = ["Tool", "BaseTool"]
fields = []
params = []
tool_params = {}
# Raise error if name is not in tools
if name not in self.type_to_loader_dict.keys():
raise ValueError("Tool not found")
tool_type: str = self.type_to_loader_dict[name]["type"] # type: ignore
# if tool_type in _BASE_TOOLS.keys():
# params = []
if tool_type in _LLM_TOOLS.keys():
params = ["llm"]
elif tool_type in _EXTRA_LLM_TOOLS.keys():
extra_keys = _EXTRA_LLM_TOOLS[tool_type][1]
params = ["llm"] + extra_keys
elif tool_type in _EXTRA_OPTIONAL_TOOLS.keys():
extra_keys = _EXTRA_OPTIONAL_TOOLS[tool_type][1]
params = extra_keys
# elif tool_type == "Tool":
# params = ["name", "description", "func"]
elif tool_type in CUSTOM_TOOLS:
# Get custom tool params
params = self.type_to_loader_dict[name]["params"] # type: ignore
base_classes = ["function"]
if node := customs.get_custom_nodes("tools").get(tool_type):
return node
elif tool_type in FILE_TOOLS:
params = self.type_to_loader_dict[name]["params"] # type: ignore
base_classes += [name]
elif tool_type in OTHER_TOOLS:
tool_dict = build_template_from_class(tool_type, OTHER_TOOLS)
fields = tool_dict["template"]
# Pop unnecessary fields and add name
fields.pop("_type") # type: ignore
fields.pop("return_direct") # type: ignore
fields.pop("verbose") # type: ignore
tool_params = {
"name": fields.pop("name")["value"], # type: ignore
"description": fields.pop("description")["value"], # type: ignore
}
fields = [
TemplateField(name=name, field_type=field["type"], **field)
for name, field in fields.items() # type: ignore
]
base_classes += tool_dict["base_classes"]
# Copy the field and add the name
for param in params:
field = TOOL_INPUTS.get(param, TOOL_INPUTS["str"]).copy()
field.name = param
field.advanced = False
if param == "aiosession":
field.show = False
field.required = False
fields.append(field)
template = Template(fields=fields, type_name=tool_type)
tool_params = {**tool_params, **self.type_to_loader_dict[name]["params"]}
return {
"template": util.format_dict(template.to_dict()),
**tool_params,
"base_classes": base_classes,
}
def to_list(self) -> List[str]:
"""List all load tools"""
return list(self.type_to_loader_dict.keys())
tool_creator = ToolCreator()
| [
"langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys",
"langchain.agents.load_tools._LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys"
] | [((690, 792), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'placeholder': '""""""', 'value': '""""""'}), "(field_type='str', required=True, is_list=False, show=True,\n placeholder='', value='')\n", (703, 792), False, 'from langflow.template.field.base import TemplateField\n'), ((856, 946), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""BaseLanguageModel"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)'}), "(field_type='BaseLanguageModel', required=True, is_list=False,\n show=True)\n", (869, 946), False, 'from langflow.template.field.base import TemplateField\n'), ((970, 1068), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""function"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'multiline': '(True)'}), "(field_type='function', required=True, is_list=False, show=\n True, multiline=True)\n", (983, 1068), False, 'from langflow.template.field.base import TemplateField\n'), ((1124, 1226), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'multiline': '(True)'}), "(field_type='str', required=True, is_list=False, show=True,\n value='', multiline=True)\n", (1137, 1226), False, 'from langflow.template.field.base import TemplateField\n'), ((1291, 1454), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""file"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'suffixes': "['.json', '.yaml', '.yml']", 'fileTypes': "['json', 'yaml', 'yml']"}), "(field_type='file', required=True, is_list=False, show=True,\n value='', suffixes=['.json', '.yaml', '.yml'], fileTypes=['json',\n 'yaml', 'yml'])\n", (1304, 1454), False, 'from langflow.template.field.base import TemplateField\n'), ((4975, 5019), 'langflow.template.template.base.Template', 'Template', ([], {'fields': 'fields', 'type_name': 'tool_type'}), '(fields=fields, type_name=tool_type)\n', (4983, 5019), False, 'from langflow.template.template.base import Template\n'), ((1779, 1802), 'langflow.interface.tools.constants.ALL_TOOLS_NAMES.items', 'ALL_TOOLS_NAMES.items', ([], {}), '()\n', (1800, 1802), False, 'from langflow.interface.tools.constants import ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS\n'), ((2927, 2944), 'langchain.agents.load_tools._LLM_TOOLS.keys', '_LLM_TOOLS.keys', ([], {}), '()\n', (2942, 2944), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((1834, 1859), 'langflow.interface.tools.util.get_tool_params', 'get_tool_params', (['tool_fcn'], {}), '(tool_fcn)\n', (1849, 1859), False, 'from langflow.interface.tools.util import get_tool_params\n'), ((3001, 3024), 'langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys', '_EXTRA_LLM_TOOLS.keys', ([], {}), '()\n', (3022, 3024), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3150, 3178), 'langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys', '_EXTRA_OPTIONAL_TOOLS.keys', ([], {}), '()\n', (3176, 3178), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3874, 3923), 'langflow.utils.util.build_template_from_class', 'build_template_from_class', (['tool_type', 'OTHER_TOOLS'], {}), '(tool_type, OTHER_TOOLS)\n', (3899, 3923), False, 'from langflow.utils.util import build_template_from_class\n'), ((3582, 3615), 'langflow.custom.customs.get_custom_nodes', 'customs.get_custom_nodes', (['"""tools"""'], {}), "('tools')\n", (3606, 3615), False, 'from langflow.custom import customs\n'), ((4407, 4466), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'name': 'name', 'field_type': "field['type']"}), "(name=name, field_type=field['type'], **field)\n", (4420, 4466), False, 'from langflow.template.field.base import TemplateField\n')] |
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.question_answering import load_qa_chain
from langchain.embeddings.openai import OpenAIEmbeddings
from streamlit_option_menu import option_menu
from deep_translator import GoogleTranslator
from langchain.vectorstores import Pinecone
import streamlit_authenticator as stauth
from yaml.loader import SafeLoader
from langchain.llms import OpenAI
from dotenv import load_dotenv
from langchain import OpenAI
from PyPDF2 import PdfReader
import streamlit as st
import langchain
load_dotenv()
import pinecone
import openai
import time
import yaml
import os
# Initialization
pinecone.init(api_key="db6b2a8c-d59e-48e1-8d5c-4c2704622937",environment="gcp-starter")
llm=OpenAI(model_name="gpt-3.5-turbo-instruct")
chain=load_qa_chain(llm,chain_type="stuff")
index_name="langchainvector"
# Home Page
def home():
st.title("This is my Home page")
# Login Page
def login():
st.title("Login page")
with open('./config.yaml') as file:
config = yaml.load(file, Loader=SafeLoader)
authenticator = stauth.Authenticate(
config['credentials'],
config['cookie']['name'],
config['cookie']['key'],
config['cookie']['expiry_days'],
config['preauthorized']
)
authenticator.login('Login', location = 'main')
if st.session_state["authentication_status"]:
st.title(f'Welcome *{st.session_state["name"]}*')
st.subheader('Click on the Chat to upload document and access AI chatbot')
user_name = st.session_state["name"]
parent = os.getcwd()
path = os.path.join(parent, user_name)
if not os.path.exists(path):
os.mkdir(path)
with st.sidebar:
authenticator.logout("Logout", "sidebar")
elif st.session_state["authentication_status"] is False:
st.error('Username/password is incorrect')
elif st.session_state["authentication_status"] is None:
st.warning('Please enter your username and password')
# Register Page
def register():
st.title("Register page")
with open('./config.yaml') as file:
config = yaml.load(file, Loader=SafeLoader)
authenticator = stauth.Authenticate(
config['credentials'],
config['cookie']['name'],
config['cookie']['key'],
config['cookie']['expiry_days'],
config['preauthorized']
)
if authenticator.register_user('Register user', preauthorization=False):
st.success('User registration successfully')
with open('./config.yaml', 'a') as file:
yaml.dump(config, file, default_flow_style=False)
def forgot_pass():
with open('./config.yaml') as file:
config = yaml.load(file, Loader=SafeLoader)
authenticator = stauth.Authenticate(
config['credentials'],
config['cookie']['name'],
config['cookie']['key'],
config['cookie']['expiry_days'],
config['preauthorized']
)
username_forgot_pw, email, random_password = authenticator.forgot_password('Forgot password')
if username_forgot_pw:
st.success(f'New random password is : {random_password}.. Change it in next login')
elif username_forgot_pw == False:
st.error('Username not found')
with open('./config.yaml', 'w') as file:
yaml.dump(config, file, default_flow_style=False)
def change_pass():
with open('./config.yaml') as file:
config = yaml.load(file, Loader=SafeLoader)
authenticator = stauth.Authenticate(
config['credentials'],
config['cookie']['name'],
config['cookie']['key'],
config['cookie']['expiry_days'],
config['preauthorized']
)
if st.session_state["authentication_status"]:
if authenticator.reset_password(st.session_state["username"], 'Reset password'):
st.success('New password changed')
if not st.session_state["authentication_status"]:
st.subheader('You need to login to change the password')
with open('./config.yaml', 'w') as file:
yaml.dump(config, file, default_flow_style=False)
def update_profile():
with open('./config.yaml') as file:
config = yaml.load(file, Loader=SafeLoader)
authenticator = stauth.Authenticate(
config['credentials'],
config['cookie']['name'],
config['cookie']['key'],
config['cookie']['expiry_days'],
config['preauthorized']
)
if st.session_state["authentication_status"]:
if authenticator.update_user_details(st.session_state["username"], 'Update user details'):
st.success('Entries updated successfully')
if not st.session_state["authentication_status"]:
st.subheader('You need to login to update the profile')
with open('./config.yaml', 'a') as file:
yaml.dump(config, file, default_flow_style=False)
# Translatiton
def translate_text(text, source='auto', target='hi'):
return GoogleTranslator(source=source, target=target).translate(text)
# Extract document and create embeddings
def process_text():
text = ""
if not os.path.exists(st.session_state.txt_path):
os.mkdir(st.session_state.txt_path)
if st.session_state.doc_type == 'PDF':
for file in st.session_state.upload_folder:
pdfdata = PdfReader(file)
for page in pdfdata.pages:
text += page.extract_text()
else:
for file in pdf_folder:
for line in file:
text += str(line, encoding = 'utf-8')
file = open(st.session_state.txt_path + '/' + 'raw_text.txt' , 'w')
file.write(text)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=100,
length_function=len
)
chunks = text_splitter.split_text(text)
embeddings = OpenAIEmbeddings()
st.info('Creating OpenAI embeddings with PINECONE.... Please wait', icon="ℹ️")
st.session_state.vector_db = Pinecone.from_texts(chunks,embeddings,index_name=index_name)
st.success('Embeddings generated... Start the conversations', icon="✅")
def query_answer(query):
docs = st.session_state.vector_db.similarity_search(query, k=2)
response = chain.run(input_documents=docs, question=query)
return response
def chatbox():
for message in st.session_state.messages:
with st.chat_message(message['role']):
st.markdown(message['content'])
if prompt := st.chat_input('Ask question about PDF content'):
st.session_state.messages.append({'role' : 'user', 'content' : prompt})
with st.chat_message('user'):
st.markdown(prompt)
with st.chat_message('assistant'):
message_placeholder = st.empty()
raw_prompt = translate_text(prompt, 'auto', 'en')
result = query_answer(prompt)
result2 = ""
for chunk in result.split():
result2 += chunk + " "
time.sleep(0.1)
message_placeholder.markdown(result2 + "▌")
st.session_state.messages.append({"role": "assistant", "content": result})
def about(key):
selection = st.session_state[key]
if selection == 'Home':
home()
if selection == 'Login':
login()
if selection == 'Register':
register()
if selection == 'Forgot Password':
forgot_pass()
def tasks():
st.write('Tasks')
def main():
if 'vector_db' not in st.session_state:
st.session_state.vector_db = None
if 'txt_path' not in st.session_state:
st.session_state.txt_path = None
if 'doc_type' not in st.session_state:
st.session_state.doc_type = None
if 'upload_folder' not in st.session_state:
st.session_state.upload_folder = None
if 'messages' not in st.session_state:
st.session_state.messages = []
st.session_state.txt_path = os.path.join(os.getcwd(), 'extract_text')
with st.sidebar:
selected5 = option_menu(None, ["Home", "Login", "Register", 'Forgot Passoword'],
icons=['house', 'login', "register", 'gear'],
on_change=about, key='menu_5', orientation="vertical")
st.session_state.doc_type = st.selectbox('Document type', ('None','PDF','TXT', 'RST','MD'))
st.session_state.upload_folder = st.file_uploader('Upload files', type = ['pdf', 'txt', 'rst','md'], accept_multiple_files=True)
submitBtn = st.button('Submit')
if submitBtn:
process_text()
chatbox()
if __name__ == '__main__':
main()
| [
"langchain.vectorstores.Pinecone.from_texts",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.chains.question_answering.load_qa_chain",
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.OpenAI"
] | [((560, 573), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (571, 573), False, 'from dotenv import load_dotenv\n'), ((656, 749), 'pinecone.init', 'pinecone.init', ([], {'api_key': '"""db6b2a8c-d59e-48e1-8d5c-4c2704622937"""', 'environment': '"""gcp-starter"""'}), "(api_key='db6b2a8c-d59e-48e1-8d5c-4c2704622937', environment=\n 'gcp-starter')\n", (669, 749), False, 'import pinecone\n'), ((748, 791), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""'}), "(model_name='gpt-3.5-turbo-instruct')\n", (754, 791), False, 'from langchain import OpenAI\n'), ((798, 836), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (811, 836), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((895, 927), 'streamlit.title', 'st.title', (['"""This is my Home page"""'], {}), "('This is my Home page')\n", (903, 927), True, 'import streamlit as st\n'), ((960, 982), 'streamlit.title', 'st.title', (['"""Login page"""'], {}), "('Login page')\n", (968, 982), True, 'import streamlit as st\n'), ((1096, 1257), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (1115, 1257), True, 'import streamlit_authenticator as stauth\n'), ((2076, 2101), 'streamlit.title', 'st.title', (['"""Register page"""'], {}), "('Register page')\n", (2084, 2101), True, 'import streamlit as st\n'), ((2215, 2376), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (2234, 2376), True, 'import streamlit_authenticator as stauth\n'), ((2780, 2941), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (2799, 2941), True, 'import streamlit_authenticator as stauth\n'), ((3509, 3670), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (3528, 3670), True, 'import streamlit_authenticator as stauth\n'), ((4252, 4413), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (4271, 4413), True, 'import streamlit_authenticator as stauth\n'), ((5562, 5653), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(100)', 'length_function': 'len'}), '(chunk_size=1000, chunk_overlap=100,\n length_function=len)\n', (5592, 5653), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((5715, 5733), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (5731, 5733), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((5735, 5813), 'streamlit.info', 'st.info', (['"""Creating OpenAI embeddings with PINECONE.... Please wait"""'], {'icon': '"""ℹ️"""'}), "('Creating OpenAI embeddings with PINECONE.... Please wait', icon='ℹ️')\n", (5742, 5813), True, 'import streamlit as st\n'), ((5844, 5906), 'langchain.vectorstores.Pinecone.from_texts', 'Pinecone.from_texts', (['chunks', 'embeddings'], {'index_name': 'index_name'}), '(chunks, embeddings, index_name=index_name)\n', (5863, 5906), False, 'from langchain.vectorstores import Pinecone\n'), ((5906, 5977), 'streamlit.success', 'st.success', (['"""Embeddings generated... Start the conversations"""'], {'icon': '"""✅"""'}), "('Embeddings generated... Start the conversations', icon='✅')\n", (5916, 5977), True, 'import streamlit as st\n'), ((6014, 6070), 'streamlit.session_state.vector_db.similarity_search', 'st.session_state.vector_db.similarity_search', (['query'], {'k': '(2)'}), '(query, k=2)\n', (6058, 6070), True, 'import streamlit as st\n'), ((7087, 7104), 'streamlit.write', 'st.write', (['"""Tasks"""'], {}), "('Tasks')\n", (7095, 7104), True, 'import streamlit as st\n'), ((1040, 1074), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (1049, 1074), False, 'import yaml\n'), ((1404, 1453), 'streamlit.title', 'st.title', (['f"""Welcome *{st.session_state[\'name\']}*"""'], {}), '(f"Welcome *{st.session_state[\'name\']}*")\n', (1412, 1453), True, 'import streamlit as st\n'), ((1462, 1536), 'streamlit.subheader', 'st.subheader', (['"""Click on the Chat to upload document and access AI chatbot"""'], {}), "('Click on the Chat to upload document and access AI chatbot')\n", (1474, 1536), True, 'import streamlit as st\n'), ((1599, 1610), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1608, 1610), False, 'import os\n'), ((1626, 1657), 'os.path.join', 'os.path.join', (['parent', 'user_name'], {}), '(parent, user_name)\n', (1638, 1657), False, 'import os\n'), ((2159, 2193), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (2168, 2193), False, 'import yaml\n'), ((2498, 2542), 'streamlit.success', 'st.success', (['"""User registration successfully"""'], {}), "('User registration successfully')\n", (2508, 2542), True, 'import streamlit as st\n'), ((2596, 2645), 'yaml.dump', 'yaml.dump', (['config', 'file'], {'default_flow_style': '(False)'}), '(config, file, default_flow_style=False)\n', (2605, 2645), False, 'import yaml\n'), ((2724, 2758), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (2733, 2758), False, 'import yaml\n'), ((3111, 3199), 'streamlit.success', 'st.success', (['f"""New random password is : {random_password}.. Change it in next login"""'], {}), "(\n f'New random password is : {random_password}.. Change it in next login')\n", (3121, 3199), True, 'import streamlit as st\n'), ((3325, 3374), 'yaml.dump', 'yaml.dump', (['config', 'file'], {'default_flow_style': '(False)'}), '(config, file, default_flow_style=False)\n', (3334, 3374), False, 'import yaml\n'), ((3453, 3487), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (3462, 3487), False, 'import yaml\n'), ((3955, 4011), 'streamlit.subheader', 'st.subheader', (['"""You need to login to change the password"""'], {}), "('You need to login to change the password')\n", (3967, 4011), True, 'import streamlit as st\n'), ((4065, 4114), 'yaml.dump', 'yaml.dump', (['config', 'file'], {'default_flow_style': '(False)'}), '(config, file, default_flow_style=False)\n', (4074, 4114), False, 'import yaml\n'), ((4196, 4230), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (4205, 4230), False, 'import yaml\n'), ((4716, 4771), 'streamlit.subheader', 'st.subheader', (['"""You need to login to update the profile"""'], {}), "('You need to login to update the profile')\n", (4728, 4771), True, 'import streamlit as st\n'), ((4825, 4874), 'yaml.dump', 'yaml.dump', (['config', 'file'], {'default_flow_style': '(False)'}), '(config, file, default_flow_style=False)\n', (4834, 4874), False, 'import yaml\n'), ((5102, 5143), 'os.path.exists', 'os.path.exists', (['st.session_state.txt_path'], {}), '(st.session_state.txt_path)\n', (5116, 5143), False, 'import os\n'), ((5147, 5182), 'os.mkdir', 'os.mkdir', (['st.session_state.txt_path'], {}), '(st.session_state.txt_path)\n', (5155, 5182), False, 'import os\n'), ((6298, 6345), 'streamlit.chat_input', 'st.chat_input', (['"""Ask question about PDF content"""'], {}), "('Ask question about PDF content')\n", (6311, 6345), True, 'import streamlit as st\n'), ((6349, 6418), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (6381, 6418), True, 'import streamlit as st\n'), ((6780, 6854), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': result}"], {}), "({'role': 'assistant', 'content': result})\n", (6812, 6854), True, 'import streamlit as st\n'), ((7545, 7556), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7554, 7556), False, 'import os\n'), ((7606, 7784), 'streamlit_option_menu.option_menu', 'option_menu', (['None', "['Home', 'Login', 'Register', 'Forgot Passoword']"], {'icons': "['house', 'login', 'register', 'gear']", 'on_change': 'about', 'key': '"""menu_5"""', 'orientation': '"""vertical"""'}), "(None, ['Home', 'Login', 'Register', 'Forgot Passoword'], icons=\n ['house', 'login', 'register', 'gear'], on_change=about, key='menu_5',\n orientation='vertical')\n", (7617, 7784), False, 'from streamlit_option_menu import option_menu\n'), ((7856, 7922), 'streamlit.selectbox', 'st.selectbox', (['"""Document type"""', "('None', 'PDF', 'TXT', 'RST', 'MD')"], {}), "('Document type', ('None', 'PDF', 'TXT', 'RST', 'MD'))\n", (7868, 7922), True, 'import streamlit as st\n'), ((7955, 8053), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload files"""'], {'type': "['pdf', 'txt', 'rst', 'md']", 'accept_multiple_files': '(True)'}), "('Upload files', type=['pdf', 'txt', 'rst', 'md'],\n accept_multiple_files=True)\n", (7971, 8053), True, 'import streamlit as st\n'), ((8065, 8084), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (8074, 8084), True, 'import streamlit as st\n'), ((1673, 1693), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1687, 1693), False, 'import os\n'), ((1707, 1721), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (1715, 1721), False, 'import os\n'), ((1873, 1915), 'streamlit.error', 'st.error', (['"""Username/password is incorrect"""'], {}), "('Username/password is incorrect')\n", (1881, 1915), True, 'import streamlit as st\n'), ((3241, 3271), 'streamlit.error', 'st.error', (['"""Username not found"""'], {}), "('Username not found')\n", (3249, 3271), True, 'import streamlit as st\n'), ((3858, 3892), 'streamlit.success', 'st.success', (['"""New password changed"""'], {}), "('New password changed')\n", (3868, 3892), True, 'import streamlit as st\n'), ((4611, 4653), 'streamlit.success', 'st.success', (['"""Entries updated successfully"""'], {}), "('Entries updated successfully')\n", (4621, 4653), True, 'import streamlit as st\n'), ((4957, 5003), 'deep_translator.GoogleTranslator', 'GoogleTranslator', ([], {'source': 'source', 'target': 'target'}), '(source=source, target=target)\n', (4973, 5003), False, 'from deep_translator import GoogleTranslator\n'), ((5283, 5298), 'PyPDF2.PdfReader', 'PdfReader', (['file'], {}), '(file)\n', (5292, 5298), False, 'from PyPDF2 import PdfReader\n'), ((6215, 6247), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (6230, 6247), True, 'import streamlit as st\n'), ((6252, 6283), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (6263, 6283), True, 'import streamlit as st\n'), ((6428, 6451), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (6443, 6451), True, 'import streamlit as st\n'), ((6456, 6475), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (6467, 6475), True, 'import streamlit as st\n'), ((6483, 6511), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (6498, 6511), True, 'import streamlit as st\n'), ((6538, 6548), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (6546, 6548), True, 'import streamlit as st\n'), ((1984, 2037), 'streamlit.warning', 'st.warning', (['"""Please enter your username and password"""'], {}), "('Please enter your username and password')\n", (1994, 2037), True, 'import streamlit as st\n'), ((6714, 6729), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6724, 6729), False, 'import time\n')] |
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
# invoking custom retriever
from redundant_filter_retriever import RedundantFilterRetriever
from dotenv import load_dotenv
import langchain
langchain.debug = True
load_dotenv()
# create our chat model
chat = ChatOpenAI()
embeddings = OpenAIEmbeddings()
# instance of chroma for similarity_search but not add contents to db
db = Chroma(
persist_directory="emb",
embedding_function=embeddings
)
# set RetrievalQA construct in langchain
# retriever -> object that take in string & return relevant docs
# call our custom retriever -> RedundantFilterRetriever instead of db.as_retriever()
retriever = RedundantFilterRetriever(
# pass in customized attributes -> embeddings & chroma
embeddings=embeddings,
chroma=db
)
# retriever = db.as_retriever()
chain = RetrievalQA.from_chain_type(
llm=chat,
retriever=retriever,
chain_type="stuff" # refine -> build an initial response, then give the LLM an opport. to update it with further context
# "map_reduce" -> build a summary of each doc, then feed each summary to final qn
# "stuff" -> take some context from the vector store & "stuff" it into the prompt
# "map_rerank" -> find relevant part of each doc & give it a score of how relevant it is
)
result = chain.run("What is an interesting fact about the English language")
print(result)
| [
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.vectorstores.Chroma",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.chat_models.ChatOpenAI"
] | [((344, 357), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (355, 357), False, 'from dotenv import load_dotenv\n'), ((392, 404), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (402, 404), False, 'from langchain.chat_models import ChatOpenAI\n'), ((418, 436), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (434, 436), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((513, 575), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': '"""emb"""', 'embedding_function': 'embeddings'}), "(persist_directory='emb', embedding_function=embeddings)\n", (519, 575), False, 'from langchain.vectorstores import Chroma\n'), ((792, 850), 'redundant_filter_retriever.RedundantFilterRetriever', 'RedundantFilterRetriever', ([], {'embeddings': 'embeddings', 'chroma': 'db'}), '(embeddings=embeddings, chroma=db)\n', (816, 850), False, 'from redundant_filter_retriever import RedundantFilterRetriever\n'), ((962, 1040), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'chat', 'retriever': 'retriever', 'chain_type': '"""stuff"""'}), "(llm=chat, retriever=retriever, chain_type='stuff')\n", (989, 1040), False, 'from langchain.chains import RetrievalQA\n')] |
import os
import logging
import pickle
import ssl
import dill
import langchain
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI, GooglePalm
from langchain.chains import LLMChain, RetrievalQAWithSourcesChain, AnalyzeDocumentChain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.document_loaders import TextLoader, UnstructuredURLLoader
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import StrOutputParser
from dotenv import load_dotenv
class Vectorizer():
llm = OpenAI(temperature=0.7, max_tokens=1024)
embeddings = OpenAIEmbeddings()
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logging.getLogger("httpx").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
def __init__(self, file_path: str):
self.file_path = os.path.join(os.getcwd(), 'vectors', f'{file_path[:-4]}.pkl')
def vector(self, split_docs: list, ) -> bool:
self.logger.info('docs: %s', len(split_docs))
# Using OpenAIEmbeddings models to provide further correlational data for our resulting vector for better semantic relationship identification
vector_index = FAISS.from_documents(split_docs, self.embeddings)
self.logger.info('Vector embedding created')
# Exclude SSLContext from pickling
dill._dill._reverse_typemap[type(ssl.create_default_context())] = None
with open(self.file_path, 'wb') as f:
dill.dump(vector_index, f)
self.logger.info('Vector index saved')
return True
def load_index(self):
if os.path.exists(self.file_path):
with open(self.file_path, 'rb') as f:
vector_index = dill.load(f)
self.logger.info('Vector index loaded')
return vector_index
else:
self.logger.info('Vector index not found at the provided file path')
return False
| [
"langchain.vectorstores.FAISS.from_documents",
"langchain.llms.OpenAI",
"langchain.embeddings.OpenAIEmbeddings"
] | [((670, 710), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.7)', 'max_tokens': '(1024)'}), '(temperature=0.7, max_tokens=1024)\n', (676, 710), False, 'from langchain.llms import OpenAI, GooglePalm\n'), ((728, 746), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (744, 746), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((751, 858), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n", (770, 858), False, 'import logging\n'), ((924, 951), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (941, 951), False, 'import logging\n'), ((1371, 1420), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['split_docs', 'self.embeddings'], {}), '(split_docs, self.embeddings)\n', (1391, 1420), False, 'from langchain.vectorstores import FAISS\n'), ((1821, 1851), 'os.path.exists', 'os.path.exists', (['self.file_path'], {}), '(self.file_path)\n', (1835, 1851), False, 'import os\n'), ((858, 884), 'logging.getLogger', 'logging.getLogger', (['"""httpx"""'], {}), "('httpx')\n", (875, 884), False, 'import logging\n'), ((1035, 1046), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1044, 1046), False, 'import os\n'), ((1672, 1698), 'dill.dump', 'dill.dump', (['vector_index', 'f'], {}), '(vector_index, f)\n', (1681, 1698), False, 'import dill\n'), ((1567, 1595), 'ssl.create_default_context', 'ssl.create_default_context', ([], {}), '()\n', (1593, 1595), False, 'import ssl\n'), ((1934, 1946), 'dill.load', 'dill.load', (['f'], {}), '(f)\n', (1943, 1946), False, 'import dill\n')] |
# imports
import os, shutil, json, re
import pathlib
from langchain.document_loaders.unstructured import UnstructuredFileLoader
from langchain.document_loaders.unstructured import UnstructuredAPIFileLoader
from langchain.document_loaders import UnstructuredURLLoader
from langchain.docstore.document import Document
from google.cloud import storage
import base64
import langchain.text_splitter as text_splitter
from dotenv import load_dotenv
import tempfile
import hashlib
from langchain.schema import Document
import logging
from my_llm.pubsub_manager import PubSubManager
import datetime
from .database import setup_database
from .database import delete_row_from_source
from .database import return_sources_last24
load_dotenv()
def contains_url(message_data):
url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
if url_pattern.search(message_data):
return True
else:
return False
def extract_urls(text):
url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
urls = url_pattern.findall(text)
return urls
# utility functions
def convert_to_txt(file_path):
file_dir, file_name = os.path.split(file_path)
file_base, file_ext = os.path.splitext(file_name)
txt_file = os.path.join(file_dir, f"{file_base}.txt")
shutil.copyfile(file_path, txt_file)
return txt_file
def compute_sha1_from_file(file_path):
with open(file_path, "rb") as file:
bytes = file.read()
readable_hash = hashlib.sha1(bytes).hexdigest()
return readable_hash
def compute_sha1_from_content(content):
readable_hash = hashlib.sha1(content).hexdigest()
return readable_hash
def add_file_to_gcs(filename: str, vector_name:str, bucket_name: str=None, metadata:dict=None):
storage_client = storage.Client()
bucket_name = bucket_name if bucket_name is not None else os.getenv('GCS_BUCKET', None)
if bucket_name is None:
raise ValueError("No bucket found to upload to: GCS_BUCKET returned None")
if bucket_name.startswith("gs://"):
bucket_name = bucket_name.removeprefix("gs://")
logging.info(f"Bucket_name: {bucket_name}")
bucket = storage_client.get_bucket(bucket_name)
now = datetime.datetime.now()
year = now.strftime("%Y")
month = now.strftime("%m")
day = now.strftime("%d")
hour = now.strftime("%H")
bucket_filepath = f"{vector_name}/{year}/{month}/{day}/{hour}/{os.path.basename(filename)}"
blob = bucket.blob(bucket_filepath)
the_metadata = {
"vector_name": vector_name,
}
if metadata is not None:
the_metadata.update(metadata)
blob.metadata = the_metadata
#TODO: create cloud storage pubsub subscription?
blob.upload_from_filename(filename)
logging.info(f"File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}")
# create pubsub topic and subscription if necessary to receive notifications from cloud storage
pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"app_to_pubsub_{vector_name}")
sub_name = f"pubsub_to_store_{vector_name}"
sub_exists = pubsub_manager.subscription_exists(sub_name)
if not sub_exists:
pubsub_manager.create_subscription(sub_name,
push_endpoint=f"/pubsub_to_store/{vector_name}")
setup_database(vector_name)
return f"gs://{bucket_name}/{bucket_filepath}"
def read_url_to_document(url: str, metadata: dict = None):
loader = UnstructuredURLLoader(urls=[url])
docs = loader.load()
if metadata is not None:
for doc in docs:
doc.metadata.update(metadata)
logging.info(f"UnstructuredURLLoader docs: {docs}")
return docs
def read_file_to_document(gs_file: pathlib.Path, split=False, metadata: dict = None):
#file_sha1 = compute_sha1_from_file(gs_file.name)
try:
#TODO: Use UnstructuredAPIFileLoader instead?
logging.info(f"Sending {gs_file} to UnstructuredAPIFileLoader")
loader = UnstructuredAPIFileLoader(gs_file, mode="elements", api_key="FAKE_API_KEY")
if split:
# only supported for some file types
docs = loader.load_and_split()
else:
docs = loader.load()
logging.info(f"Loaded docs for {gs_file} from UnstructuredAPIFileLoader")
except ValueError as e:
logging.info(f"Error for {gs_file} from UnstructuredAPIFileLoader: {str(e)}")
if "file type is not supported in partition" in str(e):
logging.info("trying locally via .txt conversion")
txt_file = None
try:
# Convert the file to .txt and try again
txt_file = convert_to_txt(gs_file)
loader = UnstructuredFileLoader(txt_file, mode="elements")
if split:
docs = loader.load_and_split()
else:
docs = loader.load()
except Exception as inner_e:
raise Exception("An error occurred during txt conversion or loading.") from inner_e
finally:
# Ensure cleanup happens if txt_file was created
if txt_file is not None and os.path.exists(txt_file):
os.remove(txt_file)
else:
raise
except Exception as e:
logging.error(f"An unexpected error occurred for {gs_file}: {str(e)}")
raise
for doc in docs:
#doc.metadata["file_sha1"] = file_sha1
logging.info(f"doc_content: {doc.page_content[:30]}")
if metadata is not None:
doc.metadata.update(metadata)
logging.info(f"gs_file: {gs_file} turned into {len(docs)} documents")
return docs
def choose_splitter(extension: str, chunk_size: int=1024, chunk_overlap:int=0):
if extension == ".py":
return text_splitter.PythonCodeTextSplitter()
elif extension == ".md":
return text_splitter.MarkdownTextSplitter()
return text_splitter.RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
def remove_whitespace(page_content: str):
return page_content.replace("\n", " ").replace("\r", " ").replace("\t", " ").replace(" ", " ")
def chunk_doc_to_docs(documents: list, extension: str = ".md"):
"""Turns a Document object into a list of many Document chunks"""
source_chunks = []
for document in documents:
splitter = choose_splitter(extension)
for chunk in splitter.split_text(remove_whitespace(document.page_content)):
source_chunks.append(Document(page_content=chunk, metadata=document.metadata))
return source_chunks
def data_to_embed_pubsub(data: dict, vector_name:str="documents"):
"""Triggered from a message on a Cloud Pub/Sub topic.
Args:
data JSON
"""
#hash = data['message']['data']
message_data = base64.b64decode(data['message']['data']).decode('utf-8')
attributes = data['message'].get('attributes', {})
messageId = data['message'].get('messageId')
publishTime = data['message'].get('publishTime')
logging.info(f"data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}")
logging.info(f"data_to_embed_pubsub data: {message_data}")
# pubsub from a Google Cloud Storage push topic
if attributes.get("eventType", None) is not None and attributes.get("payloadFormat", None) is not None:
eventType = attributes.get("eventType")
payloadFormat = attributes.get("payloadFormat")
if eventType == "OBJECT_FINALIZE" and payloadFormat == "JSON_API_V1":
logging.info("Got valid event from Google Cloud Storage")
the_object = attributes.get("objectId", None)
if the_object is None:
logging.info("No object found")
return attributes
if the_object.endswith("/"):
logging.info("GCS object is a directory only")
return attributes
# https://cloud.google.com/storage/docs/json_api/v1/objects#resource-representations
message_data = 'gs://' + attributes.get("bucketId") + '/' + the_object
if '/' in the_object:
bucket_vector_name = the_object.split('/')[0]
if len(bucket_vector_name) > 0 and vector_name != bucket_vector_name:
logging.info(f"Overwriting vector_name {vector_name} with {bucket_vector_name}")
vector_name = bucket_vector_name
attributes["attrs"] = f"namespace:{vector_name}"
logging.info(f"Constructed message_data: {message_data}")
metadata = attributes
logging.info(f"Found metadata in pubsub: {metadata}")
chunks = []
if message_data.startswith('"gs://'):
message_data = message_data.strip('\"')
if message_data.startswith("gs://"):
logging.info("Detected gs://")
bucket_name, file_name = message_data[5:].split("/", 1)
# Create a client
storage_client = storage.Client()
# Download the file from GCS
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(file_name)
file_name=pathlib.Path(file_name)
with tempfile.TemporaryDirectory() as temp_dir:
tmp_file_path = os.path.join(temp_dir, file_name.name)
blob.download_to_filename(tmp_file_path)
the_metadata = {
"source": message_data,
"type": "file_load_gcs",
"bucket_name": bucket_name
}
metadata.update(the_metadata)
docs = read_file_to_document(tmp_file_path, metadata=metadata)
chunks = chunk_doc_to_docs(docs, file_name.suffix)
logging.info(f"Split {file_name} into {len(chunks)} chunks")
elif message_data.startswith("http"):
logging.info(f"Got http message: {message_data}")
# just in case, extract the URL again
urls = extract_urls(message_data)
docs = []
for url in urls:
metadata["source"] = url
metadata["url"] = url
metadata["type"] = "url_load"
doc = read_url_to_document(url, metadata=metadata)
docs.extend(doc)
chunks = chunk_doc_to_docs(docs)
logging.info(f"Split {url} into {len(chunks)} chunks")
else:
logging.info("No gs:// detected")
the_json = json.loads(message_data)
the_metadata = the_json.get("metadata", {})
metadata.update(the_metadata)
the_content = the_json.get("page_content", None)
if metadata.get("source", None) is not None:
metadata["source"] = "No source embedded"
if the_content is None:
logging.info("No content found")
return {"metadata": "No content found"}
docs = [Document(page_content=the_content, metadata=metadata)]
publish_if_urls(the_content, vector_name)
chunks = chunk_doc_to_docs(docs)
logging.info(f"Split content into {len(chunks)} chunks")
publish_chunks(chunks, vector_name=vector_name)
logging.info(f"data_to_embed_pubsub published chunks with metadata: {metadata}")
pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"pubsub_state_messages")
pubsub_manager.publish_message(f"pubsub_chunk - Added doc with metadata: {metadata} to {vector_name}")
return metadata
def publish_if_urls(the_content, vector_name):
"""
Extracts URLs and puts them in a queue for processing on PubSub
"""
if contains_url(the_content):
logging.info("Detected http://")
urls = extract_urls(the_content)
for url in urls:
publish_text(url, vector_name)
def publish_chunks(chunks: list[Document], vector_name: str):
logging.info("Publishing chunks to embed_chunk")
pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"embed_chunk_{vector_name}")
sub_name = f"pubsub_chunk_to_store_{vector_name}"
sub_exists = pubsub_manager.subscription_exists(sub_name)
if not sub_exists:
pubsub_manager.create_subscription(sub_name,
push_endpoint=f"/pubsub_chunk_to_store/{vector_name}")
setup_database(vector_name)
for chunk in chunks:
# Convert chunk to string, as Pub/Sub messages must be strings or bytes
chunk_str = chunk.json()
pubsub_manager.publish_message(chunk_str)
def publish_text(text:str, vector_name: str):
logging.info(f"Publishing text to app_to_pubsub_{vector_name}")
pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"app_to_pubsub_{vector_name}")
sub_name = f"pubsub_to_store_{vector_name}"
sub_exists = pubsub_manager.subscription_exists(sub_name)
if not sub_exists:
pubsub_manager.create_subscription(sub_name,
push_endpoint=f"/pubsub_chunk_to_store/{vector_name}")
setup_database(vector_name)
pubsub_manager.publish_message(text)
def delete_source(source:str, vector_name:str):
logging.info(f"Deleting source: {source} from {vector_name}")
delete_row_from_source(source, vector_name)
logging.info(f"Deleted source: {source} from {vector_name}")
def return_sources_last24_(vector_name:str):
logging.info(f"Returning sources last 24")
rows = return_sources_last24(vector_name)
return rows
| [
"langchain.document_loaders.unstructured.UnstructuredFileLoader",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.text_splitter.PythonCodeTextSplitter",
"langchain.document_loaders.unstructured.UnstructuredAPIFileLoader",
"langchain.document_loaders.UnstructuredURLLoader",
"langchain.text_splitter.MarkdownTextSplitter",
"langchain.schema.Document"
] | [((719, 732), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (730, 732), False, 'from dotenv import load_dotenv\n'), ((784, 892), 're.compile', 're.compile', (['"""http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"""'], {}), "(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n )\n", (794, 892), False, 'import os, shutil, json, re\n'), ((1015, 1123), 're.compile', 're.compile', (['"""http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"""'], {}), "(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n )\n", (1025, 1123), False, 'import os, shutil, json, re\n'), ((1242, 1266), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (1255, 1266), False, 'import os, shutil, json, re\n'), ((1293, 1320), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (1309, 1320), False, 'import os, shutil, json, re\n'), ((1336, 1378), 'os.path.join', 'os.path.join', (['file_dir', 'f"""{file_base}.txt"""'], {}), "(file_dir, f'{file_base}.txt')\n", (1348, 1378), False, 'import os, shutil, json, re\n'), ((1383, 1419), 'shutil.copyfile', 'shutil.copyfile', (['file_path', 'txt_file'], {}), '(file_path, txt_file)\n', (1398, 1419), False, 'import os, shutil, json, re\n'), ((1869, 1885), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (1883, 1885), False, 'from google.cloud import storage\n'), ((2200, 2243), 'logging.info', 'logging.info', (['f"""Bucket_name: {bucket_name}"""'], {}), "(f'Bucket_name: {bucket_name}')\n", (2212, 2243), False, 'import logging\n'), ((2306, 2329), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2327, 2329), False, 'import datetime\n'), ((2853, 2939), 'logging.info', 'logging.info', (['f"""File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}"""'], {}), "(\n f'File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}')\n", (2865, 2939), False, 'import logging\n'), ((3058, 3129), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""app_to_pubsub_{vector_name}"""'}), "(vector_name, pubsub_topic=f'app_to_pubsub_{vector_name}')\n", (3071, 3129), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((3584, 3617), 'langchain.document_loaders.UnstructuredURLLoader', 'UnstructuredURLLoader', ([], {'urls': '[url]'}), '(urls=[url])\n', (3605, 3617), False, 'from langchain.document_loaders import UnstructuredURLLoader\n'), ((3748, 3799), 'logging.info', 'logging.info', (['f"""UnstructuredURLLoader docs: {docs}"""'], {}), "(f'UnstructuredURLLoader docs: {docs}')\n", (3760, 3799), False, 'import logging\n'), ((6112, 6212), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'text_splitter.RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n', (6156, 6212), True, 'import langchain.text_splitter as text_splitter\n'), ((7229, 7342), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}"""'], {}), "(\n f'data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}'\n )\n", (7241, 7342), False, 'import logging\n'), ((7337, 7395), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub data: {message_data}"""'], {}), "(f'data_to_embed_pubsub data: {message_data}')\n", (7349, 7395), False, 'import logging\n'), ((8835, 8888), 'logging.info', 'logging.info', (['f"""Found metadata in pubsub: {metadata}"""'], {}), "(f'Found metadata in pubsub: {metadata}')\n", (8847, 8888), False, 'import logging\n'), ((11324, 11409), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub published chunks with metadata: {metadata}"""'], {}), "(f'data_to_embed_pubsub published chunks with metadata: {metadata}'\n )\n", (11336, 11409), False, 'import logging\n'), ((11426, 11491), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""pubsub_state_messages"""'}), "(vector_name, pubsub_topic=f'pubsub_state_messages')\n", (11439, 11491), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((12019, 12067), 'logging.info', 'logging.info', (['"""Publishing chunks to embed_chunk"""'], {}), "('Publishing chunks to embed_chunk')\n", (12031, 12067), False, 'import logging\n'), ((12094, 12163), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""embed_chunk_{vector_name}"""'}), "(vector_name, pubsub_topic=f'embed_chunk_{vector_name}')\n", (12107, 12163), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((12749, 12812), 'logging.info', 'logging.info', (['f"""Publishing text to app_to_pubsub_{vector_name}"""'], {}), "(f'Publishing text to app_to_pubsub_{vector_name}')\n", (12761, 12812), False, 'import logging\n'), ((12834, 12905), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""app_to_pubsub_{vector_name}"""'}), "(vector_name, pubsub_topic=f'app_to_pubsub_{vector_name}')\n", (12847, 12905), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((13331, 13392), 'logging.info', 'logging.info', (['f"""Deleting source: {source} from {vector_name}"""'], {}), "(f'Deleting source: {source} from {vector_name}')\n", (13343, 13392), False, 'import logging\n'), ((13445, 13505), 'logging.info', 'logging.info', (['f"""Deleted source: {source} from {vector_name}"""'], {}), "(f'Deleted source: {source} from {vector_name}')\n", (13457, 13505), False, 'import logging\n'), ((13557, 13599), 'logging.info', 'logging.info', (['f"""Returning sources last 24"""'], {}), "(f'Returning sources last 24')\n", (13569, 13599), False, 'import logging\n'), ((1949, 1978), 'os.getenv', 'os.getenv', (['"""GCS_BUCKET"""', 'None'], {}), "('GCS_BUCKET', None)\n", (1958, 1978), False, 'import os, shutil, json, re\n'), ((4044, 4107), 'logging.info', 'logging.info', (['f"""Sending {gs_file} to UnstructuredAPIFileLoader"""'], {}), "(f'Sending {gs_file} to UnstructuredAPIFileLoader')\n", (4056, 4107), False, 'import logging\n'), ((4125, 4200), 'langchain.document_loaders.unstructured.UnstructuredAPIFileLoader', 'UnstructuredAPIFileLoader', (['gs_file'], {'mode': '"""elements"""', 'api_key': '"""FAKE_API_KEY"""'}), "(gs_file, mode='elements', api_key='FAKE_API_KEY')\n", (4150, 4200), False, 'from langchain.document_loaders.unstructured import UnstructuredAPIFileLoader\n'), ((5632, 5685), 'logging.info', 'logging.info', (['f"""doc_content: {doc.page_content[:30]}"""'], {}), "(f'doc_content: {doc.page_content[:30]}')\n", (5644, 5685), False, 'import logging\n'), ((5976, 6014), 'langchain.text_splitter.PythonCodeTextSplitter', 'text_splitter.PythonCodeTextSplitter', ([], {}), '()\n', (6012, 6014), True, 'import langchain.text_splitter as text_splitter\n'), ((9047, 9077), 'logging.info', 'logging.info', (['"""Detected gs://"""'], {}), "('Detected gs://')\n", (9059, 9077), False, 'import logging\n'), ((9194, 9210), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (9208, 9210), False, 'from google.cloud import storage\n'), ((9362, 9385), 'pathlib.Path', 'pathlib.Path', (['file_name'], {}), '(file_name)\n', (9374, 9385), False, 'import pathlib\n'), ((11795, 11827), 'logging.info', 'logging.info', (['"""Detected http://"""'], {}), "('Detected http://')\n", (11807, 11827), False, 'import logging\n'), ((1691, 1712), 'hashlib.sha1', 'hashlib.sha1', (['content'], {}), '(content)\n', (1703, 1712), False, 'import hashlib\n'), ((2518, 2544), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (2534, 2544), False, 'import os, shutil, json, re\n'), ((4379, 4452), 'logging.info', 'logging.info', (['f"""Loaded docs for {gs_file} from UnstructuredAPIFileLoader"""'], {}), "(f'Loaded docs for {gs_file} from UnstructuredAPIFileLoader')\n", (4391, 4452), False, 'import logging\n'), ((6059, 6095), 'langchain.text_splitter.MarkdownTextSplitter', 'text_splitter.MarkdownTextSplitter', ([], {}), '()\n', (6093, 6095), True, 'import langchain.text_splitter as text_splitter\n'), ((7009, 7050), 'base64.b64decode', 'base64.b64decode', (["data['message']['data']"], {}), "(data['message']['data'])\n", (7025, 7050), False, 'import base64\n'), ((7751, 7808), 'logging.info', 'logging.info', (['"""Got valid event from Google Cloud Storage"""'], {}), "('Got valid event from Google Cloud Storage')\n", (7763, 7808), False, 'import logging\n'), ((8741, 8798), 'logging.info', 'logging.info', (['f"""Constructed message_data: {message_data}"""'], {}), "(f'Constructed message_data: {message_data}')\n", (8753, 8798), False, 'import logging\n'), ((9400, 9429), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (9427, 9429), False, 'import tempfile\n'), ((9471, 9509), 'os.path.join', 'os.path.join', (['temp_dir', 'file_name.name'], {}), '(temp_dir, file_name.name)\n', (9483, 9509), False, 'import os, shutil, json, re\n'), ((10036, 10085), 'logging.info', 'logging.info', (['f"""Got http message: {message_data}"""'], {}), "(f'Got http message: {message_data}')\n", (10048, 10085), False, 'import logging\n'), ((10548, 10581), 'logging.info', 'logging.info', (['"""No gs:// detected"""'], {}), "('No gs:// detected')\n", (10560, 10581), False, 'import logging\n'), ((10610, 10634), 'json.loads', 'json.loads', (['message_data'], {}), '(message_data)\n', (10620, 10634), False, 'import os, shutil, json, re\n'), ((1573, 1592), 'hashlib.sha1', 'hashlib.sha1', (['bytes'], {}), '(bytes)\n', (1585, 1592), False, 'import hashlib\n'), ((4643, 4693), 'logging.info', 'logging.info', (['"""trying locally via .txt conversion"""'], {}), "('trying locally via .txt conversion')\n", (4655, 4693), False, 'import logging\n'), ((6705, 6761), 'langchain.schema.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'document.metadata'}), '(page_content=chunk, metadata=document.metadata)\n', (6713, 6761), False, 'from langchain.schema import Document\n'), ((7919, 7950), 'logging.info', 'logging.info', (['"""No object found"""'], {}), "('No object found')\n", (7931, 7950), False, 'import logging\n'), ((8055, 8101), 'logging.info', 'logging.info', (['"""GCS object is a directory only"""'], {}), "('GCS object is a directory only')\n", (8067, 8101), False, 'import logging\n'), ((10935, 10967), 'logging.info', 'logging.info', (['"""No content found"""'], {}), "('No content found')\n", (10947, 10967), False, 'import logging\n'), ((11045, 11098), 'langchain.schema.Document', 'Document', ([], {'page_content': 'the_content', 'metadata': 'metadata'}), '(page_content=the_content, metadata=metadata)\n', (11053, 11098), False, 'from langchain.schema import Document\n'), ((4872, 4921), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['txt_file'], {'mode': '"""elements"""'}), "(txt_file, mode='elements')\n", (4894, 4921), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((8533, 8618), 'logging.info', 'logging.info', (['f"""Overwriting vector_name {vector_name} with {bucket_vector_name}"""'], {}), "(f'Overwriting vector_name {vector_name} with {bucket_vector_name}'\n )\n", (8545, 8618), False, 'import logging\n'), ((5335, 5359), 'os.path.exists', 'os.path.exists', (['txt_file'], {}), '(txt_file)\n', (5349, 5359), False, 'import os, shutil, json, re\n'), ((5381, 5400), 'os.remove', 'os.remove', (['txt_file'], {}), '(txt_file)\n', (5390, 5400), False, 'import os, shutil, json, re\n')] |
from langchain.llms import LlamaCpp
from langchain.chat_models import ChatOpenAI
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.cache import SQLiteCache
import langchain
import itertools
from utils import setup_logger
from dotenv import load_dotenv
import os
# Load the .env file
load_dotenv()
OPEN_AI_KEY = os.getenv("OPEN_AI_KEY")
os.environ["OPENAI_API_KEY"] = OPEN_AI_KEY
logger = setup_logger('contr_detector_logger', 'app.log')
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
llm_llama = LlamaCpp(
# model_path="llama-2-7b.Q4_K_M.gguf",
model_path="models/OpenOrca-Platypus2-13B-Q4_K_M.gguf",
temperature=0,
max_tokens=1000,
top_p=3,
callback_manager=callback_manager,
verbose=True, # Verbose is required to pass to the callback manager
)
# TODO: move the prompt to a file to be configured
prompt_template = """
Statement 1: {doc1}
Statement 2: {doc2}
Question: Are these two statements contradictory? Answer "yes" or "no".
"""
prompt = PromptTemplate.from_template(prompt_template)
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_key=OPEN_AI_KEY)
llm_chain = LLMChain(llm=llm, prompt=prompt)
def detect_contradictions(documents, metadatas, model_type: str):
contrs = []
for doc1, doc2 in itertools.combinations(zip(documents, metadatas), 2):
# print(doc1)
doc1, meta1 = doc1
doc2, meta2 = doc2
if model_type == "openAI":
llm = llm_chain
result = llm_chain({"doc1": doc1, "doc2": doc2}, return_only_outputs=True)
print(result)
if "yes" in result['text'].lower():
logger.info(f"Contradiction: {doc1} {doc2}")
print(f"Contradiction: {doc1} {doc2}")
contrs.append(((doc1, meta1), (doc2, meta2)))
# break # TODO: remove
else:
logger.info(f"No contradiction: {doc1} {doc2}")
print(f"No contradiction: {doc1} {doc2}")
else:
llm = llm_llama
prompt = f"""
Statement 1: {doc1}
Statement 2: {doc2}
Question: Are these two statements contradictory? Answer "yes" or "no".
"""
if "yes" in llm(prompt).lower():
logger.info(f"Contradiction: {doc1} {doc2}")
print(f"Contradiction: {doc1} {doc2}")
contrs.append(((doc1, meta1), (doc2, meta2)))
else:
logger.info(f"No contradiction: {doc1} {doc2}")
print(f"No contradiction: {doc1} {doc2}")
print("Done with checking for contradictions")
print(contrs)
return contrs | [
"langchain.chains.llm.LLMChain",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.PromptTemplate.from_template",
"langchain.cache.SQLiteCache",
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler",
"langchain.llms.LlamaCpp"
] | [((476, 489), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (487, 489), False, 'from dotenv import load_dotenv\n'), ((505, 529), 'os.getenv', 'os.getenv', (['"""OPEN_AI_KEY"""'], {}), "('OPEN_AI_KEY')\n", (514, 529), False, 'import os\n'), ((584, 632), 'utils.setup_logger', 'setup_logger', (['"""contr_detector_logger"""', '"""app.log"""'], {}), "('contr_detector_logger', 'app.log')\n", (596, 632), False, 'from utils import setup_logger\n'), ((655, 697), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (666, 697), False, 'from langchain.cache import SQLiteCache\n'), ((783, 946), 'langchain.llms.LlamaCpp', 'LlamaCpp', ([], {'model_path': '"""models/OpenOrca-Platypus2-13B-Q4_K_M.gguf"""', 'temperature': '(0)', 'max_tokens': '(1000)', 'top_p': '(3)', 'callback_manager': 'callback_manager', 'verbose': '(True)'}), "(model_path='models/OpenOrca-Platypus2-13B-Q4_K_M.gguf',\n temperature=0, max_tokens=1000, top_p=3, callback_manager=\n callback_manager, verbose=True)\n", (791, 946), False, 'from langchain.llms import LlamaCpp\n'), ((1340, 1385), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (1368, 1385), False, 'from langchain.prompts import PromptTemplate\n'), ((1393, 1479), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'OPEN_AI_KEY'}), "(temperature=0, model_name='gpt-3.5-turbo', openai_api_key=\n OPEN_AI_KEY)\n", (1403, 1479), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1487, 1519), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1495, 1519), False, 'from langchain.chains.llm import LLMChain\n'), ((736, 768), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (766, 768), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')] |
import streamlit as st
import torch
from transformers import (
AutoTokenizer, AutoModelForCausalLM,
BitsAndBytesConfig,
TextStreamer,
)
import whisper
import os
############ config ############
# general config
whisper_model_names=["tiny", "base", "small", "medium", "large"]
data_root_path = os.path.join('.','data')
file_types = ['pdf','png','jpg','wav']
for filetype in file_types:
if not os.path.exists(os.path.join(data_root_path,filetype)):
os.makedirs(os.path.join(data_root_path,filetype))
# streamlit config
## Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = [{"role": "assistant", "content": "Type a message to start a conversation"}]
############ User Interface ############
# Title
st.title('LLAMA RAG Demo')
st.divider()
st.title('Model name and auth token')
# Configs
model_name = st.text_input('Enter your Hugging Face model name', value="meta-llama/Llama-2-7b-chat-hf")
auth_token = st.text_input('Enter your Hugging Face auth token', value="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")
system_prompt = st.text_area('Enter your system prompt', value="You are a helpful, respectful and honest assistant.")
whisper_model_name = st.selectbox('Select your whisper model',options=whisper_model_names)
use_cuda = st.checkbox('Use CUDA', value=True)
isfile = False
## File uploader
from streamlit import file_uploader
uploadedfile = file_uploader("Choose a \"PDF\" file (now support only pdf)")
if uploadedfile is not None:
isfile = True
with open(os.path.join(data_root_path,'pdf',uploadedfile.name),"wb") as f:
f.write(uploadedfile.getbuffer())
st.success("File uploaded successfully : {}".format(uploadedfile.name))
st.divider()
############ function ############
def clear_chat_history():
st.session_state.messages = [{"role": "assistant", "content": "Type a message to start a conversation"}]
for file in os.listdir(os.path.join(data_root_path,'pdf')):
os.remove(os.path.join(data_root_path,'pdf',file))
st.button('Clear Chat History', on_click=clear_chat_history)
# Load Tokenizer and Model
@st.cache_resource
def get_tokenizer_model():
# Create tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir='./model/', token=auth_token)
# Create model
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type='nf4',
bnb_4bit_compute_dtype=torch.float16,
)
model = AutoModelForCausalLM.from_pretrained(model_name,
cache_dir='./model/', token=auth_token,
quantization_config=quantization_config,
# rope_scaling={"type":"dynamic", "factor":2},
max_memory=f'{int(torch.cuda.mem_get_info()[0]/1024**3)-2}GB'
)
return tokenizer, model
# RAG engine
def get_rag_queryengine(_tokenizer, model, system_prompt):
from llama_index.prompts.prompts import SimpleInputPrompt
from llama_index.llms import HuggingFaceLLM
system_prompt_ = f"[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n"
query_wrapper_prompt = SimpleInputPrompt("{query_str} [/INST]")
llm = HuggingFaceLLM(context_window=4096,
max_new_tokens=256,
system_prompt=system_prompt_,
query_wrapper_prompt=query_wrapper_prompt,
model=model,
tokenizer=_tokenizer
)
# Create embeddings
from llama_index.embeddings import LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
embeddings=LangchainEmbedding(
HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
)
from llama_index import ServiceContext
from llama_index import set_global_service_context
service_context = ServiceContext.from_defaults(
chunk_size=1024,
llm=llm,
embed_model=embeddings
)
set_global_service_context(service_context)
from llama_index import VectorStoreIndex, download_loader
PyMuPDFReader = download_loader("PyMuPDFReader")
loader = PyMuPDFReader()
for file in os.listdir(os.path.join(data_root_path,'pdf')):
# !!! This is not a good way to load data. I will fix this later
# this makes the only last file in the folder to be loaded
documents = loader.load_data(file_path=os.path.join(data_root_path,'pdf',file), metadata=True)
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
return query_engine
# whisper
def whisper_stt(*,model, device, audio_path)->str:
# load model
# # model : model name of whisper. default is base
# # devie : argument from args. default is cpu
audio_model = whisper.load_model(model,device)
# stt - audio.wav
result = audio_model.transcribe(audio_path)
# return result : str list
return result["text"]
############ main ############
# Load Tokenizer and Model, RAG engine
tokenizer, model = get_tokenizer_model()
if isfile:
engine = get_rag_queryengine(tokenizer, model, system_prompt)
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
prompt = st.chat_input('User: ')
if prompt:
# update(append) chat history
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
# Here... text streamer does not work as well as I intended with streamlit
# I will try to fix this later
if st.session_state.messages[-1]["role"] == "user":
if isfile:
with st.chat_message("assistant"):
# model inference
output_text = engine.query(prompt)
placeholder = st.empty()
placeholder.markdown(output_text)
st.session_state.messages.append({"role": "assistant", "content": output_text})
else:
with st.chat_message("assistant"):
# model inference
output_text = "Please upload a file first"
placeholder = st.empty()
placeholder.markdown(output_text)
st.session_state.messages.append({"role": "assistant", "content": output_text})
| [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings"
] | [((306, 331), 'os.path.join', 'os.path.join', (['"""."""', '"""data"""'], {}), "('.', 'data')\n", (318, 331), False, 'import os\n'), ((772, 798), 'streamlit.title', 'st.title', (['"""LLAMA RAG Demo"""'], {}), "('LLAMA RAG Demo')\n", (780, 798), True, 'import streamlit as st\n'), ((799, 811), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (809, 811), True, 'import streamlit as st\n'), ((813, 850), 'streamlit.title', 'st.title', (['"""Model name and auth token"""'], {}), "('Model name and auth token')\n", (821, 850), True, 'import streamlit as st\n'), ((874, 969), 'streamlit.text_input', 'st.text_input', (['"""Enter your Hugging Face model name"""'], {'value': '"""meta-llama/Llama-2-7b-chat-hf"""'}), "('Enter your Hugging Face model name', value=\n 'meta-llama/Llama-2-7b-chat-hf')\n", (887, 969), True, 'import streamlit as st\n'), ((978, 1081), 'streamlit.text_input', 'st.text_input', (['"""Enter your Hugging Face auth token"""'], {'value': '"""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"""'}), "('Enter your Hugging Face auth token', value=\n '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')\n", (991, 1081), True, 'import streamlit as st\n'), ((1093, 1199), 'streamlit.text_area', 'st.text_area', (['"""Enter your system prompt"""'], {'value': '"""You are a helpful, respectful and honest assistant."""'}), "('Enter your system prompt', value=\n 'You are a helpful, respectful and honest assistant.')\n", (1105, 1199), True, 'import streamlit as st\n'), ((1216, 1286), 'streamlit.selectbox', 'st.selectbox', (['"""Select your whisper model"""'], {'options': 'whisper_model_names'}), "('Select your whisper model', options=whisper_model_names)\n", (1228, 1286), True, 'import streamlit as st\n'), ((1297, 1332), 'streamlit.checkbox', 'st.checkbox', (['"""Use CUDA"""'], {'value': '(True)'}), "('Use CUDA', value=True)\n", (1308, 1332), True, 'import streamlit as st\n'), ((1416, 1475), 'streamlit.file_uploader', 'file_uploader', (['"""Choose a "PDF" file (now support only pdf)"""'], {}), '(\'Choose a "PDF" file (now support only pdf)\')\n', (1429, 1475), False, 'from streamlit import file_uploader\n'), ((1722, 1734), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (1732, 1734), True, 'import streamlit as st\n'), ((2038, 2098), 'streamlit.button', 'st.button', (['"""Clear Chat History"""'], {'on_click': 'clear_chat_history'}), "('Clear Chat History', on_click=clear_chat_history)\n", (2047, 2098), True, 'import streamlit as st\n'), ((5259, 5282), 'streamlit.chat_input', 'st.chat_input', (['"""User: """'], {}), "('User: ')\n", (5272, 5282), True, 'import streamlit as st\n'), ((2212, 2298), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {'cache_dir': '"""./model/"""', 'token': 'auth_token'}), "(model_name, cache_dir='./model/', token=\n auth_token)\n", (2241, 2298), False, 'from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TextStreamer\n'), ((2339, 2445), 'transformers.BitsAndBytesConfig', 'BitsAndBytesConfig', ([], {'load_in_4bit': '(True)', 'bnb_4bit_quant_type': '"""nf4"""', 'bnb_4bit_compute_dtype': 'torch.float16'}), "(load_in_4bit=True, bnb_4bit_quant_type='nf4',\n bnb_4bit_compute_dtype=torch.float16)\n", (2357, 2445), False, 'from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TextStreamer\n'), ((3074, 3114), 'llama_index.prompts.prompts.SimpleInputPrompt', 'SimpleInputPrompt', (['"""{query_str} [/INST]"""'], {}), "('{query_str} [/INST]')\n", (3091, 3114), False, 'from llama_index.prompts.prompts import SimpleInputPrompt\n'), ((3125, 3297), 'llama_index.llms.HuggingFaceLLM', 'HuggingFaceLLM', ([], {'context_window': '(4096)', 'max_new_tokens': '(256)', 'system_prompt': 'system_prompt_', 'query_wrapper_prompt': 'query_wrapper_prompt', 'model': 'model', 'tokenizer': '_tokenizer'}), '(context_window=4096, max_new_tokens=256, system_prompt=\n system_prompt_, query_wrapper_prompt=query_wrapper_prompt, model=model,\n tokenizer=_tokenizer)\n', (3139, 3297), False, 'from llama_index.llms import HuggingFaceLLM\n'), ((3786, 3864), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(1024)', 'llm': 'llm', 'embed_model': 'embeddings'}), '(chunk_size=1024, llm=llm, embed_model=embeddings)\n', (3814, 3864), False, 'from llama_index import ServiceContext\n'), ((3899, 3942), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (3925, 3942), False, 'from llama_index import set_global_service_context\n'), ((4026, 4058), 'llama_index.download_loader', 'download_loader', (['"""PyMuPDFReader"""'], {}), "('PyMuPDFReader')\n", (4041, 4058), False, 'from llama_index import VectorStoreIndex, download_loader\n'), ((4407, 4449), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (4438, 4449), False, 'from llama_index import VectorStoreIndex, download_loader\n'), ((4720, 4753), 'whisper.load_model', 'whisper.load_model', (['model', 'device'], {}), '(model, device)\n', (4738, 4753), False, 'import whisper\n'), ((5332, 5401), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (5364, 5401), True, 'import streamlit as st\n'), ((1937, 1972), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""'], {}), "(data_root_path, 'pdf')\n", (1949, 1972), False, 'import os\n'), ((3607, 3659), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""all-MiniLM-L6-v2"""'}), "(model_name='all-MiniLM-L6-v2')\n", (3628, 3659), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((4115, 4150), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""'], {}), "(data_root_path, 'pdf')\n", (4127, 4150), False, 'import os\n'), ((5175, 5207), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (5190, 5207), True, 'import streamlit as st\n'), ((5217, 5248), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (5228, 5248), True, 'import streamlit as st\n'), ((5411, 5434), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (5426, 5434), True, 'import streamlit as st\n'), ((5444, 5460), 'streamlit.write', 'st.write', (['prompt'], {}), '(prompt)\n', (5452, 5460), True, 'import streamlit as st\n'), ((5847, 5926), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': output_text}"], {}), "({'role': 'assistant', 'content': output_text})\n", (5879, 5926), True, 'import streamlit as st\n'), ((6157, 6236), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': output_text}"], {}), "({'role': 'assistant', 'content': output_text})\n", (6189, 6236), True, 'import streamlit as st\n'), ((424, 462), 'os.path.join', 'os.path.join', (['data_root_path', 'filetype'], {}), '(data_root_path, filetype)\n', (436, 462), False, 'import os\n'), ((484, 522), 'os.path.join', 'os.path.join', (['data_root_path', 'filetype'], {}), '(data_root_path, filetype)\n', (496, 522), False, 'import os\n'), ((1539, 1593), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""', 'uploadedfile.name'], {}), "(data_root_path, 'pdf', uploadedfile.name)\n", (1551, 1593), False, 'import os\n'), ((1992, 2033), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""', 'file'], {}), "(data_root_path, 'pdf', file)\n", (2004, 2033), False, 'import os\n'), ((5648, 5676), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (5663, 5676), True, 'import streamlit as st\n'), ((5782, 5792), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (5790, 5792), True, 'import streamlit as st\n'), ((5951, 5979), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (5966, 5979), True, 'import streamlit as st\n'), ((6092, 6102), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (6100, 6102), True, 'import streamlit as st\n'), ((4339, 4380), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""', 'file'], {}), "(data_root_path, 'pdf', file)\n", (4351, 4380), False, 'import os\n'), ((2716, 2741), 'torch.cuda.mem_get_info', 'torch.cuda.mem_get_info', ([], {}), '()\n', (2739, 2741), False, 'import torch\n')] |
import streamlit as st
import langchain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain import OpenAI, VectorDBQA
from langchain.chains import RetrievalQAWithSourcesChain
import PyPDF2
#This function will go through pdf and extract and return list of page texts.
def read_and_textify(files):
text_list = []
sources_list = []
for file in files:
pdfReader = PyPDF2.PdfReader(file)
#print("Page Number:", len(pdfReader.pages))
for i in range(len(pdfReader.pages)):
pageObj = pdfReader.pages[i]
text = pageObj.extract_text()
pageObj.clear()
text_list.append(text)
sources_list.append(file.name + "_page_"+str(i))
return [text_list,sources_list]
st.set_page_config(layout="centered", page_title="Multidoc_QnA")
st.header("Multidoc_QnA")
st.write("---")
#file uploader
uploaded_files = st.file_uploader("Upload documents",accept_multiple_files=True, type=["txt","pdf"])
st.write("---")
if uploaded_files is None:
st.info(f"""Upload files to analyse""")
elif uploaded_files:
st.write(str(len(uploaded_files)) + " document(s) loaded..")
textify_output = read_and_textify(uploaded_files)
documents = textify_output[0]
sources = textify_output[1]
#extract embeddings
embeddings = OpenAIEmbeddings(openai_api_key = st.secrets["openai_api_key"])
#vstore with metadata. Here we will store page numbers.
vStore = Chroma.from_texts(documents, embeddings, metadatas=[{"source": s} for s in sources])
#deciding model
model_name = "gpt-3.5-turbo"
# model_name = "gpt-4"
retriever = vStore.as_retriever()
retriever.search_kwargs = {'k':2}
#initiate model
llm = OpenAI(model_name=model_name, openai_api_key = st.secrets["openai_api_key"], streaming=True)
model = RetrievalQAWithSourcesChain.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
st.header("Ask your data")
user_q = st.text_area("Enter your questions here")
if st.button("Get Response"):
try:
with st.spinner("Model is working on it..."):
result = model({"question":user_q}, return_only_outputs=True)
st.subheader('Your response:')
st.write(result['answer'])
st.subheader('Source pages:')
st.write(result['sources'])
except Exception as e:
st.error(f"An error occurred: {e}")
st.error('Oops, the GPT response resulted in an error :( Please try again with a different question.')
| [
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.chains.RetrievalQAWithSourcesChain.from_chain_type",
"langchain.OpenAI",
"langchain.vectorstores.Chroma.from_texts"
] | [((868, 932), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""centered"""', 'page_title': '"""Multidoc_QnA"""'}), "(layout='centered', page_title='Multidoc_QnA')\n", (886, 932), True, 'import streamlit as st\n'), ((933, 958), 'streamlit.header', 'st.header', (['"""Multidoc_QnA"""'], {}), "('Multidoc_QnA')\n", (942, 958), True, 'import streamlit as st\n'), ((959, 974), 'streamlit.write', 'st.write', (['"""---"""'], {}), "('---')\n", (967, 974), True, 'import streamlit as st\n'), ((1010, 1100), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload documents"""'], {'accept_multiple_files': '(True)', 'type': "['txt', 'pdf']"}), "('Upload documents', accept_multiple_files=True, type=[\n 'txt', 'pdf'])\n", (1026, 1100), True, 'import streamlit as st\n'), ((1094, 1109), 'streamlit.write', 'st.write', (['"""---"""'], {}), "('---')\n", (1102, 1109), True, 'import streamlit as st\n'), ((1140, 1175), 'streamlit.info', 'st.info', (['f"""Upload files to analyse"""'], {}), "(f'Upload files to analyse')\n", (1147, 1175), True, 'import streamlit as st\n'), ((510, 532), 'PyPDF2.PdfReader', 'PyPDF2.PdfReader', (['file'], {}), '(file)\n', (526, 532), False, 'import PyPDF2\n'), ((1424, 1485), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': "st.secrets['openai_api_key']"}), "(openai_api_key=st.secrets['openai_api_key'])\n", (1440, 1485), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1557, 1645), 'langchain.vectorstores.Chroma.from_texts', 'Chroma.from_texts', (['documents', 'embeddings'], {'metadatas': "[{'source': s} for s in sources]"}), "(documents, embeddings, metadatas=[{'source': s} for s in\n sources])\n", (1574, 1645), False, 'from langchain.vectorstores import Chroma\n'), ((1816, 1910), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': 'model_name', 'openai_api_key': "st.secrets['openai_api_key']", 'streaming': '(True)'}), "(model_name=model_name, openai_api_key=st.secrets['openai_api_key'],\n streaming=True)\n", (1822, 1910), False, 'from langchain import OpenAI, VectorDBQA\n'), ((1919, 2016), 'langchain.chains.RetrievalQAWithSourcesChain.from_chain_type', 'RetrievalQAWithSourcesChain.from_chain_type', ([], {'llm': 'llm', 'chain_type': '"""stuff"""', 'retriever': 'retriever'}), "(llm=llm, chain_type='stuff',\n retriever=retriever)\n", (1962, 2016), False, 'from langchain.chains import RetrievalQAWithSourcesChain\n'), ((2018, 2044), 'streamlit.header', 'st.header', (['"""Ask your data"""'], {}), "('Ask your data')\n", (2027, 2044), True, 'import streamlit as st\n'), ((2056, 2097), 'streamlit.text_area', 'st.text_area', (['"""Enter your questions here"""'], {}), "('Enter your questions here')\n", (2068, 2097), True, 'import streamlit as st\n'), ((2106, 2131), 'streamlit.button', 'st.button', (['"""Get Response"""'], {}), "('Get Response')\n", (2115, 2131), True, 'import streamlit as st\n'), ((2153, 2192), 'streamlit.spinner', 'st.spinner', (['"""Model is working on it..."""'], {}), "('Model is working on it...')\n", (2163, 2192), True, 'import streamlit as st\n'), ((2272, 2302), 'streamlit.subheader', 'st.subheader', (['"""Your response:"""'], {}), "('Your response:')\n", (2284, 2302), True, 'import streamlit as st\n'), ((2311, 2337), 'streamlit.write', 'st.write', (["result['answer']"], {}), "(result['answer'])\n", (2319, 2337), True, 'import streamlit as st\n'), ((2346, 2375), 'streamlit.subheader', 'st.subheader', (['"""Source pages:"""'], {}), "('Source pages:')\n", (2358, 2375), True, 'import streamlit as st\n'), ((2384, 2411), 'streamlit.write', 'st.write', (["result['sources']"], {}), "(result['sources'])\n", (2392, 2411), True, 'import streamlit as st\n'), ((2445, 2480), 'streamlit.error', 'st.error', (['f"""An error occurred: {e}"""'], {}), "(f'An error occurred: {e}')\n", (2453, 2480), True, 'import streamlit as st\n'), ((2487, 2599), 'streamlit.error', 'st.error', (['"""Oops, the GPT response resulted in an error :( Please try again with a different question."""'], {}), "(\n 'Oops, the GPT response resulted in an error :( Please try again with a different question.'\n )\n", (2495, 2599), True, 'import streamlit as st\n')] |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
Type,
TypeVar,
Union,
cast,
)
from uuid import UUID, uuid4
import langchain
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
ChainManagerMixin,
LLMManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.callbacks.tracers.wandb import WandbTracer
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMessage,
LLMResult,
get_buffer_string,
)
logger = logging.getLogger(__name__)
Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]]
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar(
"openai_callback", default=None
)
tracing_callback_var: ContextVar[
Optional[LangChainTracerV1]
] = ContextVar( # noqa: E501
"tracing_callback", default=None
)
wandb_tracing_callback_var: ContextVar[
Optional[WandbTracer]
] = ContextVar( # noqa: E501
"tracing_wandb_callback", default=None
)
tracing_v2_callback_var: ContextVar[
Optional[LangChainTracer]
] = ContextVar( # noqa: E501
"tracing_callback_v2", default=None
)
def _get_debug() -> bool:
return langchain.debug
@contextmanager
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
"""Get OpenAI callback handler in a context manager."""
cb = OpenAICallbackHandler()
openai_callback_var.set(cb)
yield cb
openai_callback_var.set(None)
@contextmanager
def tracing_enabled(
session_name: str = "default",
) -> Generator[TracerSessionV1, None, None]:
"""Get Tracer in a context manager."""
cb = LangChainTracerV1()
session = cast(TracerSessionV1, cb.load_session(session_name))
tracing_callback_var.set(cb)
yield session
tracing_callback_var.set(None)
@contextmanager
def wandb_tracing_enabled(
session_name: str = "default",
) -> Generator[None, None, None]:
"""Get WandbTracer in a context manager."""
cb = WandbTracer()
wandb_tracing_callback_var.set(cb)
yield None
wandb_tracing_callback_var.set(None)
@contextmanager
def tracing_v2_enabled(
session_name: Optional[str] = None,
*,
example_id: Optional[Union[str, UUID]] = None,
) -> Generator[None, None, None]:
"""Get the experimental tracer handler in a context manager."""
# Issue a warning that this is experimental
warnings.warn(
"The tracing v2 API is in development. "
"This is not yet stable and may change in the future."
)
if isinstance(example_id, str):
example_id = UUID(example_id)
cb = LangChainTracer(
example_id=example_id,
session_name=session_name,
)
tracing_v2_callback_var.set(cb)
yield
tracing_v2_callback_var.set(None)
@contextmanager
def trace_as_chain_group(
group_name: str,
*,
session_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
) -> Generator[CallbackManager, None, None]:
"""Get a callback manager for a chain group in a context manager."""
cb = LangChainTracer(
session_name=session_name,
example_id=example_id,
)
cm = CallbackManager.configure(
inheritable_callbacks=[cb],
)
run_manager = cm.on_chain_start({"name": group_name}, {})
yield run_manager.get_child()
run_manager.on_chain_end({})
@asynccontextmanager
async def atrace_as_chain_group(
group_name: str,
*,
session_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
) -> AsyncGenerator[AsyncCallbackManager, None]:
"""Get a callback manager for a chain group in a context manager."""
cb = LangChainTracer(
session_name=session_name,
example_id=example_id,
)
cm = AsyncCallbackManager.configure(
inheritable_callbacks=[cb],
)
run_manager = await cm.on_chain_start({"name": group_name}, {})
try:
yield run_manager.get_child()
finally:
await run_manager.on_chain_end({})
def _handle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for CallbackManager."""
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(
handler, ignore_condition_name
):
getattr(handler, event_name)(*args, **kwargs)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
_handle_event(
[handler],
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(f"Error in {event_name} callback: {e}")
except Exception as e:
if handler.raise_error:
raise e
logger.warning(f"Error in {event_name} callback: {e}")
async def _ahandle_event_for_handler(
handler: BaseCallbackHandler,
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
event = getattr(handler, event_name)
if asyncio.iscoroutinefunction(event):
await event(*args, **kwargs)
else:
await asyncio.get_event_loop().run_in_executor(
None, functools.partial(event, *args, **kwargs)
)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
message_strings = [get_buffer_string(m) for m in args[1]]
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(f"Error in {event_name} callback: {e}")
except Exception as e:
if handler.raise_error:
raise e
logger.warning(f"Error in {event_name} callback: {e}")
async def _ahandle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for AsyncCallbackManager."""
await asyncio.gather(
*(
_ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
for handler in handlers
)
)
BRM = TypeVar("BRM", bound="BaseRunManager")
class BaseRunManager(RunManagerMixin):
"""Base class for run manager (a bound callback manager)."""
def __init__(
self,
run_id: UUID,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler],
parent_run_id: Optional[UUID] = None,
) -> None:
"""Initialize run manager."""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
@classmethod
def get_noop_manager(cls: Type[BRM]) -> BRM:
"""Return a manager that doesn't perform any operations."""
return cls(uuid4(), [], [])
class RunManager(BaseRunManager):
"""Sync Run Manager."""
def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received."""
_handle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncRunManager(BaseRunManager):
"""Async Run Manager."""
async def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received."""
await _ahandle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
"""Callback manager for LLM run."""
def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token."""
_handle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token=token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
_handle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors."""
_handle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
"""Async callback manager for LLM run."""
async def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token."""
await _ahandle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
await _ahandle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors."""
await _ahandle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForChainRun(RunManager, ChainManagerMixin):
"""Callback manager for chain run."""
def get_child(self) -> CallbackManager:
"""Get a child callback manager."""
manager = CallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
_handle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_chain_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when chain errors."""
_handle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received."""
_handle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received."""
_handle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin):
"""Async callback manager for chain run."""
def get_child(self) -> AsyncCallbackManager:
"""Get a child callback manager."""
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
await _ahandle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_chain_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when chain errors."""
await _ahandle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received."""
await _ahandle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received."""
await _ahandle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForToolRun(RunManager, ToolManagerMixin):
"""Callback manager for tool run."""
def get_child(self) -> CallbackManager:
"""Get a child callback manager."""
manager = CallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
def on_tool_end(
self,
output: str,
**kwargs: Any,
) -> None:
"""Run when tool ends running."""
_handle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors."""
_handle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin):
"""Async callback manager for tool run."""
def get_child(self) -> AsyncCallbackManager:
"""Get a child callback manager."""
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
await _ahandle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors."""
await _ahandle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManager(BaseCallbackManager):
"""Callback manager that can be used to handle callbacks from langchain."""
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
prompts,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForLLMRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
messages,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
# Re-use the LLM Run Manager since the outputs are treated
# the same for now
return CallbackManagerForLLMRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForChainRun:
"""Run when chain starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForChainRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForToolRun:
"""Run when tool starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForToolRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> CallbackManager:
"""Configure the callback manager."""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose)
class AsyncCallbackManager(BaseCallbackManager):
"""Async callback manager that can be used to handle callbacks from LangChain."""
@property
def is_async(self) -> bool:
"""Return whether the handler is async."""
return True
async def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
prompts,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForLLMRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
messages,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForLLMRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
async def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForChainRun:
"""Run when chain starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForChainRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
async def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForToolRun:
"""Run when tool starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForToolRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> AsyncCallbackManager:
"""Configure the callback manager."""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose)
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
def env_var_is_set(env_var: str) -> bool:
"""Check if an environment variable is set."""
return env_var in os.environ and os.environ[env_var] not in (
"",
"0",
"false",
"False",
)
def _configure(
callback_manager_cls: Type[T],
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> T:
"""Configure the callback manager."""
callback_manager = callback_manager_cls([])
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks_.copy(),
inheritable_handlers=inheritable_callbacks_.copy(),
)
else:
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks.handlers,
inheritable_handlers=inheritable_callbacks.inheritable_handlers,
parent_run_id=inheritable_callbacks.parent_run_id,
)
local_handlers_ = (
local_callbacks
if isinstance(local_callbacks, list)
else (local_callbacks.handlers if local_callbacks else [])
)
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
tracer = tracing_callback_var.get()
wandb_tracer = wandb_tracing_callback_var.get()
open_ai = openai_callback_var.get()
tracing_enabled_ = (
env_var_is_set("LANGCHAIN_TRACING")
or tracer is not None
or env_var_is_set("LANGCHAIN_HANDLER")
)
wandb_tracing_enabled_ = (
env_var_is_set("LANGCHAIN_WANDB_TRACING") or wandb_tracer is not None
)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = (
env_var_is_set("LANGCHAIN_TRACING_V2") or tracer_v2 is not None
)
tracer_session = os.environ.get("LANGCHAIN_SESSION")
debug = _get_debug()
if tracer_session is None:
tracer_session = "default"
if (
verbose
or debug
or tracing_enabled_
or tracing_v2_enabled_
or wandb_tracing_enabled_
or open_ai is not None
):
if verbose and not any(
isinstance(handler, StdOutCallbackHandler)
for handler in callback_manager.handlers
):
if debug:
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), False)
if debug and not any(
isinstance(handler, ConsoleCallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(ConsoleCallbackHandler(), True)
if tracing_enabled_ and not any(
isinstance(handler, LangChainTracerV1)
for handler in callback_manager.handlers
):
if tracer:
callback_manager.add_handler(tracer, True)
else:
handler = LangChainTracerV1()
handler.load_session(tracer_session)
callback_manager.add_handler(handler, True)
if wandb_tracing_enabled_ and not any(
isinstance(handler, WandbTracer) for handler in callback_manager.handlers
):
if wandb_tracer:
callback_manager.add_handler(wandb_tracer, True)
else:
handler = WandbTracer()
callback_manager.add_handler(handler, True)
if tracing_v2_enabled_ and not any(
isinstance(handler, LangChainTracer)
for handler in callback_manager.handlers
):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(session_name=tracer_session)
callback_manager.add_handler(handler, True)
except Exception as e:
logger.warning(
"Unable to load requested LangChainTracer."
" To disable this warning,"
" unset the LANGCHAIN_TRACING_V2 environment variables.",
e,
)
if open_ai is not None and not any(
isinstance(handler, OpenAICallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(open_ai, True)
return callback_manager
| [
"langchain.callbacks.tracers.langchain_v1.LangChainTracerV1",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.schema.get_buffer_string",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.callbacks.openai_info.OpenAICallbackHandler"
] | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((1286, 1329), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1296, 1329), False, 'from contextvars import ContextVar\n'), ((1406, 1450), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback"""'], {'default': 'None'}), "('tracing_callback', default=None)\n", (1416, 1450), False, 'from contextvars import ContextVar\n'), ((1541, 1591), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_wandb_callback"""'], {'default': 'None'}), "('tracing_wandb_callback', default=None)\n", (1551, 1591), False, 'from contextvars import ContextVar\n'), ((1684, 1731), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback_v2"""'], {'default': 'None'}), "('tracing_callback_v2', default=None)\n", (1694, 1731), False, 'from contextvars import ContextVar\n'), ((7547, 7585), 'typing.TypeVar', 'TypeVar', (['"""BRM"""'], {'bound': '"""BaseRunManager"""'}), "('BRM', bound='BaseRunManager')\n", (7554, 7585), False, 'from typing import Any, AsyncGenerator, Dict, Generator, List, Optional, Type, TypeVar, Union, cast\n'), ((24466, 24517), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'CallbackManager', 'AsyncCallbackManager'], {}), "('T', CallbackManager, AsyncCallbackManager)\n", (24473, 24517), False, 'from typing import Any, AsyncGenerator, Dict, Generator, List, Optional, Type, TypeVar, Union, cast\n'), ((1969, 1992), 'langchain.callbacks.openai_info.OpenAICallbackHandler', 'OpenAICallbackHandler', ([], {}), '()\n', (1990, 1992), False, 'from langchain.callbacks.openai_info import OpenAICallbackHandler\n'), ((2243, 2262), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (2260, 2262), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((2587, 2600), 'langchain.callbacks.tracers.wandb.WandbTracer', 'WandbTracer', ([], {}), '()\n', (2598, 2600), False, 'from langchain.callbacks.tracers.wandb import WandbTracer\n'), ((2990, 3107), 'warnings.warn', 'warnings.warn', (['"""The tracing v2 API is in development. This is not yet stable and may change in the future."""'], {}), "(\n 'The tracing v2 API is in development. This is not yet stable and may change in the future.'\n )\n", (3003, 3107), False, 'import warnings\n'), ((3206, 3271), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'example_id': 'example_id', 'session_name': 'session_name'}), '(example_id=example_id, session_name=session_name)\n', (3221, 3271), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((3669, 3734), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'session_name': 'session_name', 'example_id': 'example_id'}), '(session_name=session_name, example_id=example_id)\n', (3684, 3734), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((4272, 4337), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'session_name': 'session_name', 'example_id': 'example_id'}), '(session_name=session_name, example_id=example_id)\n', (4287, 4337), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((26539, 26574), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_SESSION"""'], {}), "('LANGCHAIN_SESSION')\n", (26553, 26574), False, 'import os\n'), ((3180, 3196), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (3184, 3196), False, 'from uuid import UUID, uuid4\n'), ((6199, 6233), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['event'], {}), '(event)\n', (6226, 6233), False, 'import asyncio\n'), ((8264, 8271), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (8269, 8271), False, 'from uuid import UUID, uuid4\n'), ((18037, 18044), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18042, 18044), False, 'from uuid import UUID, uuid4\n'), ((18744, 18751), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18749, 18751), False, 'from uuid import UUID, uuid4\n'), ((19547, 19554), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (19552, 19554), False, 'from uuid import UUID, uuid4\n'), ((20282, 20289), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (20287, 20289), False, 'from uuid import UUID, uuid4\n'), ((21564, 21571), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (21569, 21571), False, 'from uuid import UUID, uuid4\n'), ((22225, 22232), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (22230, 22232), False, 'from uuid import UUID, uuid4\n'), ((22958, 22965), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (22963, 22965), False, 'from uuid import UUID, uuid4\n'), ((23716, 23723), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (23721, 23723), False, 'from uuid import UUID, uuid4\n'), ((27319, 27343), 'langchain.callbacks.tracers.stdout.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (27341, 27343), False, 'from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler\n'), ((27633, 27652), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (27650, 27652), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((28048, 28061), 'langchain.callbacks.tracers.wandb.WandbTracer', 'WandbTracer', ([], {}), '()\n', (28059, 28061), False, 'from langchain.callbacks.tracers.wandb import WandbTracer\n'), ((6564, 6584), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (6581, 6584), False, 'from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string\n'), ((27096, 27119), 'langchain.callbacks.stdout.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (27117, 27119), False, 'from langchain.callbacks.stdout import StdOutCallbackHandler\n'), ((28436, 28480), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'session_name': 'tracer_session'}), '(session_name=tracer_session)\n', (28451, 28480), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((6388, 6429), 'functools.partial', 'functools.partial', (['event', '*args'], {}), '(event, *args, **kwargs)\n', (6405, 6429), False, 'import functools\n'), ((5291, 5311), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (5308, 5311), False, 'from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string\n'), ((6320, 6344), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (6342, 6344), False, 'import asyncio\n')] |
import langchain
from langchain.llms import VertexAI
from langchain.prompts import PromptTemplate, load_prompt
import wandb
from wandb.integration.langchain import WandbTracer
import streamlit as st
from google.oauth2 import service_account
# account_info = dict(st.secrets["GOOGLE_APPLICATION_CREDENTIALS"])
# credentials = service_account.Credentials.from_service_account_info(account_info)
def generate_prd_v3_palm(new_feature, new_feature_desc, wandb_name):
wandb.login(key=st.secrets["WANDB_API_KEY"])
wandb.init(
project="generate_prd_v3_palm",
config={
"model": "text-bison-001",
"temperature": 0.2
},
entity="arihantsheth",
name=wandb_name,
)
# llm = VertexAI(credentials=credentials, max_output_tokens=1024)
llm = VertexAI(project="synap-labs-390404", location="us-central1", credentials=dict(
st.secrets["GOOGLE_APPLICATION_CREDENTIALS"]), max_output_tokens=1024)
prompt_template = load_prompt("prompt_templates/generate_prd_template_v2.json") # For deployment
# prompt_template = load_prompt("../prompt_templates/generate_prd_template_v3.json") # For local testing
prompt = prompt_template.format(
new_feature=new_feature, new_feature_desc=new_feature_desc)
try:
output = llm(prompt, callbacks=[WandbTracer()])
except Exception as e:
print("GCP Authentication error")
print(e)
return
# with open(f"./generated_prds/{new_feature}_prd_v3_palm.md", "w") as f: # For deployment
# # with open(f"../generated_prds/{new_feature}_prd_palm.md", "w") as f: # For local testing
# f.write(output)
wandb.finish()
return output
| [
"langchain.prompts.load_prompt"
] | [((469, 513), 'wandb.login', 'wandb.login', ([], {'key': "st.secrets['WANDB_API_KEY']"}), "(key=st.secrets['WANDB_API_KEY'])\n", (480, 513), False, 'import wandb\n'), ((519, 666), 'wandb.init', 'wandb.init', ([], {'project': '"""generate_prd_v3_palm"""', 'config': "{'model': 'text-bison-001', 'temperature': 0.2}", 'entity': '"""arihantsheth"""', 'name': 'wandb_name'}), "(project='generate_prd_v3_palm', config={'model':\n 'text-bison-001', 'temperature': 0.2}, entity='arihantsheth', name=\n wandb_name)\n", (529, 666), False, 'import wandb\n'), ((993, 1054), 'langchain.prompts.load_prompt', 'load_prompt', (['"""prompt_templates/generate_prd_template_v2.json"""'], {}), "('prompt_templates/generate_prd_template_v2.json')\n", (1004, 1054), False, 'from langchain.prompts import PromptTemplate, load_prompt\n'), ((1679, 1693), 'wandb.finish', 'wandb.finish', ([], {}), '()\n', (1691, 1693), False, 'import wandb\n'), ((1339, 1352), 'wandb.integration.langchain.WandbTracer', 'WandbTracer', ([], {}), '()\n', (1350, 1352), False, 'from wandb.integration.langchain import WandbTracer\n')] |
#!/usr/bin/env python
# coding: utf-8
# # LangChain: Agents
#
# ## Outline:
#
# * Using built in LangChain tools: DuckDuckGo search and Wikipedia
# * Defining your own tools
# In[ ]:
import os
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
import warnings
warnings.filterwarnings("ignore")
# Note: LLM's do not always produce the same results. When executing the code in your notebook, you may get slightly different answers that those in the video.
# In[ ]:
# account for deprecation of LLM model
import datetime
# Get the current date
current_date = datetime.datetime.now().date()
# Define the date after which the model should be set to "gpt-3.5-turbo"
target_date = datetime.date(2024, 6, 12)
# Set the model variable based on the current date
if current_date > target_date:
llm_model = "gpt-3.5-turbo"
else:
llm_model = "gpt-3.5-turbo-0301"
# ## Built-in LangChain tools
# In[ ]:
#!pip install -U wikipedia
# In[ ]:
from langchain.agents.agent_toolkits import create_python_agent
from langchain.agents import load_tools, initialize_agent
from langchain.agents import AgentType
from langchain.tools.python.tool import PythonREPLTool
from langchain.python import PythonREPL
from langchain.chat_models import ChatOpenAI
# In[ ]:
llm = ChatOpenAI(temperature=0, model=llm_model)
# In[ ]:
tools = load_tools(["llm-math","wikipedia"], llm=llm)
# In[ ]:
agent= initialize_agent(
tools,
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
handle_parsing_errors=True,
verbose = True)
# In[ ]:
agent("What is the 25% of 300?")
# ## Wikipedia example
# In[ ]:
question = "Tom M. Mitchell is an American computer scientist \
and the Founders University Professor at Carnegie Mellon University (CMU)\
what book did he write?"
result = agent(question)
# ## Python Agent
# In[ ]:
agent = create_python_agent(
llm,
tool=PythonREPLTool(),
verbose=True
)
# In[ ]:
customer_list = [["Harrison", "Chase"],
["Lang", "Chain"],
["Dolly", "Too"],
["Elle", "Elem"],
["Geoff","Fusion"],
["Trance","Former"],
["Jen","Ayai"]
]
# In[ ]:
agent.run(f"""Sort these customers by \
last name and then first name \
and print the output: {customer_list}""")
# #### View detailed outputs of the chains
# In[ ]:
import langchain
langchain.debug=True
agent.run(f"""Sort these customers by \
last name and then first name \
and print the output: {customer_list}""")
langchain.debug=False
# ## Define your own tool
# In[ ]:
#!pip install DateTime
# In[ ]:
from langchain.agents import tool
from datetime import date
# In[ ]:
@tool
def time(text: str) -> str:
"""Returns todays date, use this for any \
questions related to knowing todays date. \
The input should always be an empty string, \
and this function will always return todays \
date - any date mathmatics should occur \
outside this function."""
return str(date.today())
# In[ ]:
agent= initialize_agent(
tools + [time],
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
handle_parsing_errors=True,
verbose = True)
# **Note**:
#
# The agent will sometimes come to the wrong conclusion (agents are a work in progress!).
#
# If it does, please try running it again.
# In[ ]:
try:
result = agent("whats the date today?")
except:
print("exception on external access")
# Reminder: Download your notebook to you local computer to save your work.
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| [
"langchain.tools.python.tool.PythonREPLTool",
"langchain.agents.load_tools",
"langchain.agents.initialize_agent",
"langchain.chat_models.ChatOpenAI"
] | [((315, 348), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (338, 348), False, 'import warnings\n'), ((735, 761), 'datetime.date', 'datetime.date', (['(2024)', '(6)', '(12)'], {}), '(2024, 6, 12)\n', (748, 761), False, 'import datetime\n'), ((1324, 1366), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': 'llm_model'}), '(temperature=0, model=llm_model)\n', (1334, 1366), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1388, 1434), 'langchain.agents.load_tools', 'load_tools', (["['llm-math', 'wikipedia']"], {'llm': 'llm'}), "(['llm-math', 'wikipedia'], llm=llm)\n", (1398, 1434), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((1454, 1579), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION', 'handle_parsing_errors': '(True)', 'verbose': '(True)'}), '(tools, llm, agent=AgentType.\n CHAT_ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True, verbose=True)\n', (1470, 1579), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((3139, 3273), 'langchain.agents.initialize_agent', 'initialize_agent', (['(tools + [time])', 'llm'], {'agent': 'AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION', 'handle_parsing_errors': '(True)', 'verbose': '(True)'}), '(tools + [time], llm, agent=AgentType.\n CHAT_ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True, verbose=True)\n', (3155, 3273), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((260, 273), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (271, 273), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((616, 639), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (637, 639), False, 'import datetime\n'), ((1952, 1968), 'langchain.tools.python.tool.PythonREPLTool', 'PythonREPLTool', ([], {}), '()\n', (1966, 1968), False, 'from langchain.tools.python.tool import PythonREPLTool\n'), ((3105, 3117), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3115, 3117), False, 'from datetime import date\n')] |
import sys
import pandas as pd
from llama_index import Document, set_global_service_context, StorageContext, load_index_from_storage, VectorStoreIndex
from llama_index.indices.base import BaseIndex
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.storage.index_store import SimpleIndexStore
from llama_index.vector_stores import SimpleVectorStore
from config import (
API_KEY,
DEPLOYMENT_NAME,
MODEL_NAME,
API_BASE,
API_VERSION,
EMBEDDING_MODEL,
EMBEDDING_DEPLOYMENT,
)
class LlamaQueryEngine:
def __init__(
self,
api_key=API_KEY,
deployment_name=DEPLOYMENT_NAME,
model_name=MODEL_NAME,
api_base=API_BASE,
api_version=API_VERSION,
embedding_model=EMBEDDING_MODEL,
embedding_deployment=EMBEDDING_DEPLOYMENT,
):
import openai
import logging
import os
from langchain.embeddings import OpenAIEmbeddings
from llama_index.llms import AzureOpenAI
from llama_index import LangchainEmbedding
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
openai.api_type = "azure"
openai.api_base = api_base
openai.api_version = api_version
os.environ["OPENAI_API_KEY"] = api_key
openai.api_key = os.getenv("OPENAI_API_KEY")
llm = AzureOpenAI(
deployment_name=deployment_name,
model=model_name,
temperature=0,
engine="gpt35",
max_tokens=2048
)
embedding_llm = LangchainEmbedding(
OpenAIEmbeddings(
model=embedding_model,
deployment=embedding_deployment,
openai_api_key=openai.api_key,
openai_api_base=openai.api_base,
openai_api_type=openai.api_type,
openai_api_version=openai.api_version,
),
embed_batch_size=1,
)
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embedding_llm,
)
set_global_service_context(service_context)
# index = VectorStoreIndex.from_documents(documents)
# self.index = index
# self.query_engine = index.as_query_engine()
self.index = None
self.query_engine = None
def load_doc_from_csv(self, csv_path, text_column="decoded_readme", max_docs=20, is_persist=False, has_persist=False, persist_dir="app/data/persist"):
if has_persist:
self.retrieve_index(persist_dir)
return
df = pd.read_csv(csv_path)
text_list = df[text_column].tolist()
text_list = text_list[:max_docs]
documents = [Document(text=t) for t in text_list]
index = VectorStoreIndex.from_documents(documents)
self.index = index
from llama_index.indices.postprocessor import SimilarityPostprocessor
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.indices.vector_store import VectorIndexRetriever
from llama_index import get_response_synthesizer
# configure retriever
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=2,
)
# configure response synthesizer
response_synthesizer = get_response_synthesizer()
# assemble query engine
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=[
SimilarityPostprocessor(similarity_cutoff=0.7)
]
)
self.query_engine = query_engine
# self.query_engine = index.as_query_engine()
if is_persist:
self.persist_index(persist_dir)
def retrieve_index(self, persist_dir):
storage_context = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir=persist_dir),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir=persist_dir),
index_store=SimpleIndexStore.from_persist_dir(persist_dir=persist_dir),
)
self.index = load_index_from_storage(storage_context)
self.query_engine = self.index.as_query_engine()
def persist_index(self, persist_dir):
self.index.storage_context.persist(persist_dir=persist_dir)
def query(self, query_text):
if not self.query_engine:
raise Exception("No query engine loaded")
return self.query_engine.query(query_text)
def get_index(self):
return self.index
| [
"langchain.embeddings.OpenAIEmbeddings"
] | [((1194, 1252), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (1213, 1252), False, 'import logging\n'), ((1435, 1462), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1444, 1462), False, 'import os\n'), ((1478, 1593), 'llama_index.llms.AzureOpenAI', 'AzureOpenAI', ([], {'deployment_name': 'deployment_name', 'model': 'model_name', 'temperature': '(0)', 'engine': '"""gpt35"""', 'max_tokens': '(2048)'}), "(deployment_name=deployment_name, model=model_name, temperature=\n 0, engine='gpt35', max_tokens=2048)\n", (1489, 1593), False, 'from llama_index.llms import AzureOpenAI\n'), ((2106, 2170), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embedding_llm'}), '(llm=llm, embed_model=embedding_llm)\n', (2134, 2170), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((2215, 2258), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (2241, 2258), False, 'from llama_index import Document, set_global_service_context, StorageContext, load_index_from_storage, VectorStoreIndex\n'), ((2719, 2740), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (2730, 2740), True, 'import pandas as pd\n'), ((2901, 2943), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2932, 2943), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((3298, 3351), 'llama_index.indices.vector_store.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(2)'}), '(index=index, similarity_top_k=2)\n', (3318, 3351), False, 'from llama_index.indices.vector_store import VectorIndexRetriever\n'), ((3460, 3486), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {}), '()\n', (3484, 3486), False, 'from llama_index import get_response_synthesizer\n'), ((4321, 4361), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (4344, 4361), False, 'from llama_index import Document, set_global_service_context, StorageContext, load_index_from_storage, VectorStoreIndex\n'), ((1716, 1932), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': 'embedding_model', 'deployment': 'embedding_deployment', 'openai_api_key': 'openai.api_key', 'openai_api_base': 'openai.api_base', 'openai_api_type': 'openai.api_type', 'openai_api_version': 'openai.api_version'}), '(model=embedding_model, deployment=embedding_deployment,\n openai_api_key=openai.api_key, openai_api_base=openai.api_base,\n openai_api_type=openai.api_type, openai_api_version=openai.api_version)\n', (1732, 1932), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2848, 2864), 'llama_index.Document', 'Document', ([], {'text': 't'}), '(text=t)\n', (2856, 2864), False, 'from llama_index import Document, set_global_service_context, StorageContext, load_index_from_storage, VectorStoreIndex\n'), ((4057, 4118), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (4093, 4118), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((4145, 4204), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (4179, 4204), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((4230, 4288), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (4263, 4288), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((3703, 3749), 'llama_index.indices.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': '(0.7)'}), '(similarity_cutoff=0.7)\n', (3726, 3749), False, 'from llama_index.indices.postprocessor import SimilarityPostprocessor\n')] |
import arxiv
import openai
import langchain
import pinecone
from langchain_community.document_loaders import ArxivLoader
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
from langchain.llms import OpenAI
from langchain import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.chains.summarize import load_summarize_chain
from langchain.chains.question_answering import load_qa_chain
from langchain import OpenAI
from utils import *
import streamlit as st
import os
from dotenv import load_dotenv
load_dotenv()
openai_api_key = os.getenv('OPENAI_API_KEY')
pinecone_api_key = os.getenv('PINECONE_API_KEY')
environment = os.getenv('PINECONE_ENV')
llm_summary = ChatOpenAI(temperature=0.3, model_name="gpt-3.5-turbo-0125")
llm = OpenAI(model_name="gpt-3.5-turbo-0125", temperature=0.6, api_key=openai_api_key)
if 'summary' not in st.session_state:
st.session_state.summary = None
if 'documents' not in st.session_state:
st.session_state.documents = None
st.title('Arxiv Paper Summarizer and Interactive Q&A')
paper_id_input = st.text_input('Enter Arxiv Paper ID', '')
if st.button('Summarize Paper') and paper_id_input:
with st.spinner('Fetching and summarizing the paper...'):
try:
doc = arxiv_loader(paper_id=paper_id_input)
st.session_state.documents = chunk_data(docs=doc)
# st.write(st.session_state.documents)
chain = load_summarize_chain(
llm=llm_summary,
chain_type='map_reduce',
verbose=False
)
summary = chain.run(st.session_state.documents)
st.subheader('Summary')
st.write(summary)
except Exception as e:
st.error(f"An error occurred: {e}")
def initialize_index(index_name='arxiv-summarizer'):
# documents = chunk_data(docs=doc)
embeddings = OpenAIEmbeddings(api_key=openai_api_key)
index_name = index_name
# Make sure environment is correctly spelled (there was a typo in your provided code)
pinecone.Pinecone(
api_key=pinecone_api_key,
environment=environment
)
if st.session_state.documents:
index = Pinecone.from_documents(st.session_state.documents, embeddings, index_name=index_name)
else:
index = None
return index
index = initialize_index()
def retrieve_query(query, k=2):
matching_results = index.similarity_search(query, k=k)
return matching_results
def retrieve_answers(query):
chain = load_qa_chain(llm, chain_type='stuff')
doc_search = retrieve_query(query)
print(doc_search)
response = chain.run(input_documents=doc_search, question=query)
return response
if paper_id_input:
user_query = st.text_input("Ask a question about the paper:", '')
if user_query:
if st.button('Get Answer'):
with st.spinner('Retrieving your answer...'):
try:
answer = retrieve_answers(user_query)
st.subheader('Answer')
st.write(answer)
except Exception as e:
st.error(f"An error occurred while retrieving the answer: {e}")
| [
"langchain.vectorstores.Pinecone.from_documents",
"langchain.chat_models.ChatOpenAI",
"langchain.chains.question_answering.load_qa_chain",
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.chains.summarize.load_summarize_chain",
"langchain.OpenAI"
] | [((690, 703), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (701, 703), False, 'from dotenv import load_dotenv\n'), ((722, 749), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (731, 749), False, 'import os\n'), ((769, 798), 'os.getenv', 'os.getenv', (['"""PINECONE_API_KEY"""'], {}), "('PINECONE_API_KEY')\n", (778, 798), False, 'import os\n'), ((813, 838), 'os.getenv', 'os.getenv', (['"""PINECONE_ENV"""'], {}), "('PINECONE_ENV')\n", (822, 838), False, 'import os\n'), ((854, 914), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.3)', 'model_name': '"""gpt-3.5-turbo-0125"""'}), "(temperature=0.3, model_name='gpt-3.5-turbo-0125')\n", (864, 914), False, 'from langchain.chat_models import ChatOpenAI\n'), ((925, 1010), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-0125"""', 'temperature': '(0.6)', 'api_key': 'openai_api_key'}), "(model_name='gpt-3.5-turbo-0125', temperature=0.6, api_key=openai_api_key\n )\n", (931, 1010), False, 'from langchain import OpenAI\n'), ((1162, 1216), 'streamlit.title', 'st.title', (['"""Arxiv Paper Summarizer and Interactive Q&A"""'], {}), "('Arxiv Paper Summarizer and Interactive Q&A')\n", (1170, 1216), True, 'import streamlit as st\n'), ((1235, 1276), 'streamlit.text_input', 'st.text_input', (['"""Enter Arxiv Paper ID"""', '""""""'], {}), "('Enter Arxiv Paper ID', '')\n", (1248, 1276), True, 'import streamlit as st\n'), ((1281, 1309), 'streamlit.button', 'st.button', (['"""Summarize Paper"""'], {}), "('Summarize Paper')\n", (1290, 1309), True, 'import streamlit as st\n'), ((2076, 2116), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'api_key': 'openai_api_key'}), '(api_key=openai_api_key)\n', (2092, 2116), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2239, 2307), 'pinecone.Pinecone', 'pinecone.Pinecone', ([], {'api_key': 'pinecone_api_key', 'environment': 'environment'}), '(api_key=pinecone_api_key, environment=environment)\n', (2256, 2307), False, 'import pinecone\n'), ((2706, 2744), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (2719, 2744), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((2932, 2984), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about the paper:"""', '""""""'], {}), "('Ask a question about the paper:', '')\n", (2945, 2984), True, 'import streamlit as st\n'), ((1339, 1390), 'streamlit.spinner', 'st.spinner', (['"""Fetching and summarizing the paper..."""'], {}), "('Fetching and summarizing the paper...')\n", (1349, 1390), True, 'import streamlit as st\n'), ((2381, 2472), 'langchain.vectorstores.Pinecone.from_documents', 'Pinecone.from_documents', (['st.session_state.documents', 'embeddings'], {'index_name': 'index_name'}), '(st.session_state.documents, embeddings, index_name=\n index_name)\n', (2404, 2472), False, 'from langchain.vectorstores import Pinecone\n'), ((3020, 3043), 'streamlit.button', 'st.button', (['"""Get Answer"""'], {}), "('Get Answer')\n", (3029, 3043), True, 'import streamlit as st\n'), ((1593, 1670), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', ([], {'llm': 'llm_summary', 'chain_type': '"""map_reduce"""', 'verbose': '(False)'}), "(llm=llm_summary, chain_type='map_reduce', verbose=False)\n", (1613, 1670), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((1821, 1844), 'streamlit.subheader', 'st.subheader', (['"""Summary"""'], {}), "('Summary')\n", (1833, 1844), True, 'import streamlit as st\n'), ((1857, 1874), 'streamlit.write', 'st.write', (['summary'], {}), '(summary)\n', (1865, 1874), True, 'import streamlit as st\n'), ((1918, 1953), 'streamlit.error', 'st.error', (['f"""An error occurred: {e}"""'], {}), "(f'An error occurred: {e}')\n", (1926, 1953), True, 'import streamlit as st\n'), ((3062, 3101), 'streamlit.spinner', 'st.spinner', (['"""Retrieving your answer..."""'], {}), "('Retrieving your answer...')\n", (3072, 3101), True, 'import streamlit as st\n'), ((3202, 3224), 'streamlit.subheader', 'st.subheader', (['"""Answer"""'], {}), "('Answer')\n", (3214, 3224), True, 'import streamlit as st\n'), ((3245, 3261), 'streamlit.write', 'st.write', (['answer'], {}), '(answer)\n', (3253, 3261), True, 'import streamlit as st\n'), ((3321, 3384), 'streamlit.error', 'st.error', (['f"""An error occurred while retrieving the answer: {e}"""'], {}), "(f'An error occurred while retrieving the answer: {e}')\n", (3329, 3384), True, 'import streamlit as st\n')] |
"""Create a ChatVectorDBChain for question/answering."""
from langchain.callbacks.manager import AsyncCallbackManager
from langchain.callbacks.tracers import LangChainTracer
from langchain.chains import ChatVectorDBChain
from langchain.chains.chat_vector_db.prompts import (CONDENSE_QUESTION_PROMPT,
QA_PROMPT)
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.vectorstores import Pinecone
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
# system_template = """Use the following pieces of context to answer the users question.
# If you don't know the answer, just say that you don't know, don't try to make up an answer.
# ----------------
# {context}"""
template = """You are a helpful AI assistant that answers questions about
an e-commerce company called "Sindabad.com" in a friendly and polite
manner. You will be given a context that will represent Sindabad.com's
product inventory. Users might ask about products, they might want to
know your suggestions as well. Most importantly, they might ask about
specific product and its associated product link. If they want to know
about product links, you will provide it accordingly with the help of the
given "Context". Answer the question in your own words as truthfully as
possible from the context given to you. If you do not know the answer to
the question, simply respond with "I don't know. Could you please rephrase
the question?". If questions are asked where there is no relevant information
available in the context, answer the question with your existing knowledge on
that question and "ignore" the "Context" given to you.
----------------
context: {context}"""
messages = [
SystemMessagePromptTemplate.from_template(template),
HumanMessagePromptTemplate.from_template("{question}")
]
prompt = ChatPromptTemplate.from_messages(messages)
def get_chain(
vectorstore: Pinecone,
question_handler,
stream_handler,
tracing: bool = False
) -> ChatVectorDBChain:
"""Create a ChatVectorDBChain for question/answering."""
# Construct a ChatVectorDBChain with a streaming llm for combine docs
# and a separate, non-streaming llm for question generation
manager = AsyncCallbackManager([])
question_manager = AsyncCallbackManager([question_handler])
stream_manager = AsyncCallbackManager([stream_handler])
if tracing:
tracer = LangChainTracer()
tracer.load_default_session()
manager.add_handler(tracer)
question_manager.add_handler(tracer)
stream_manager.add_handler(tracer)
question_gen_llm = ChatOpenAI(
temperature=0,
verbose=True,
callback_manager=question_manager,
)
streaming_llm = ChatOpenAI(
streaming=True,
callback_manager=stream_manager,
verbose=True,
temperature=0,
)
question_generator = LLMChain(
llm=question_gen_llm,
prompt=CONDENSE_QUESTION_PROMPT,
callback_manager=manager
)
doc_chain = load_qa_chain(
streaming_llm,
chain_type="stuff",
prompt=prompt,
callback_manager=manager
)
# qa = ChatVectorDBChain(
# vectorstore=vectorstore,
# combine_docs_chain=doc_chain,
# question_generator=question_generator,
# callback_manager=manager,
# )
qa = ConversationalRetrievalChain(
retriever=vectorstore.as_retriever(),
combine_docs_chain=doc_chain,
question_generator=question_generator,
callback_manager=manager
)
return qa
| [
"langchain.chains.llm.LLMChain",
"langchain.prompts.chat.HumanMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.callbacks.manager.AsyncCallbackManager",
"langchain.callbacks.tracers.LangChainTracer",
"langchain.chains.question_answering.load_qa_chain",
"langchain.prompts.chat.SystemMessagePromptTemplate.from_template",
"langchain.prompts.chat.ChatPromptTemplate.from_messages"
] | [((2109, 2151), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['messages'], {}), '(messages)\n', (2141, 2151), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((1986, 2037), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['template'], {}), '(template)\n', (2027, 2037), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((2043, 2097), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{question}"""'], {}), "('{question}')\n", (2083, 2097), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((2501, 2525), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[]'], {}), '([])\n', (2521, 2525), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2549, 2589), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[question_handler]'], {}), '([question_handler])\n', (2569, 2589), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2611, 2649), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[stream_handler]'], {}), '([stream_handler])\n', (2631, 2649), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2887, 2961), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'verbose': '(True)', 'callback_manager': 'question_manager'}), '(temperature=0, verbose=True, callback_manager=question_manager)\n', (2897, 2961), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3013, 3105), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'streaming': '(True)', 'callback_manager': 'stream_manager', 'verbose': '(True)', 'temperature': '(0)'}), '(streaming=True, callback_manager=stream_manager, verbose=True,\n temperature=0)\n', (3023, 3105), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3167, 3260), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'question_gen_llm', 'prompt': 'CONDENSE_QUESTION_PROMPT', 'callback_manager': 'manager'}), '(llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT,\n callback_manager=manager)\n', (3175, 3260), False, 'from langchain.chains.llm import LLMChain\n'), ((3303, 3396), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['streaming_llm'], {'chain_type': '"""stuff"""', 'prompt': 'prompt', 'callback_manager': 'manager'}), "(streaming_llm, chain_type='stuff', prompt=prompt,\n callback_manager=manager)\n", (3316, 3396), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((2683, 2700), 'langchain.callbacks.tracers.LangChainTracer', 'LangChainTracer', ([], {}), '()\n', (2698, 2700), False, 'from langchain.callbacks.tracers import LangChainTracer\n')] |
from llama_index.core import VectorStoreIndex,SimpleDirectoryReader,ServiceContext
print("VectorStoreIndex,SimpleDirectoryReader,ServiceContext imported")
from llama_index.llms.huggingface import HuggingFaceLLM
print("HuggingFaceLLM imported")
from llama_index.core.prompts.prompts import SimpleInputPrompt
print("SimpleInputPrompt imported")
from ctransformers import AutoModelForCausalLM
print("AutoModelForCausalLM imported")
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
print("HuggingFaceEmbeddings imported")
from llama_index.core import ServiceContext
print("ServiceContext imported")
from llama_index.embeddings.langchain import LangchainEmbedding
print("LangchainEmbedding imported")
from langchain_community.document_loaders import PyPDFLoader
print("PyPDFLoader imported")
import json
import torch
import os
from dotenv import load_dotenv
load_dotenv()
HuggingFace_Api = os.environ.get('HF_TOKEN')
documents = SimpleDirectoryReader('./testing/docs').load_data()
print("SimpleDirectoryReader imported")
def get_system_prompt():
'''This function is used to load the system prompt from the prompts.json file'''
with open('prompts.json') as f:
data = json.load(f)
return data['Default']
query_wrapper_prompt=SimpleInputPrompt("<|USER|>{query_str}<|ASSISTANT|>")
def load_model(context_window: int, max_new_tokens: int):
'''This function is used to load the model from the HuggingFaceLLM'''
print(f"""Available Cuda: {torch.cuda.get_device_name()} \n
Trying to load the model model""")
try:
llm = HuggingFaceLLM(context_window=context_window,
max_new_tokens=max_new_tokens,
generate_kwargs={"temperature": 0.0, "do_sample": False},
system_prompt=get_system_prompt(),
query_wrapper_prompt=query_wrapper_prompt,
tokenizer_name="./meta",
model_name="./meta",
device_map="cuda",
# uncomment this if using CUDA to reduce memory usage
model_kwargs={"torch_dtype": torch.float16,"load_in_8bit":True }
)
print("Model Loaded")
return llm
except Exception as e:
print(f"Error: {e}")
return None
def embed_model():
'''This function is used to load the model from the LangchainEmbedding'''
embed = LangchainEmbedding(
HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2"))
service_context=ServiceContext.from_defaults(
chunk_size=1024,
llm=load_model(context_window=4096, max_new_tokens=256),
embed_model=embed
)
return service_context
def get_index():
'''This function is used to load the index from the VectorStoreIndex'''
index=VectorStoreIndex.from_documents(documents,service_context=embed_model())
return index
def main():
query_engine=get_index().as_query_engine()
response=query_engine.query("what is this PDF tells about?")
out = response
print(response)
if __name__ == "__main__":
main() | [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings"
] | [((872, 885), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (883, 885), False, 'from dotenv import load_dotenv\n'), ((905, 931), 'os.environ.get', 'os.environ.get', (['"""HF_TOKEN"""'], {}), "('HF_TOKEN')\n", (919, 931), False, 'import os\n'), ((1262, 1315), 'llama_index.core.prompts.prompts.SimpleInputPrompt', 'SimpleInputPrompt', (['"""<|USER|>{query_str}<|ASSISTANT|>"""'], {}), "('<|USER|>{query_str}<|ASSISTANT|>')\n", (1279, 1315), False, 'from llama_index.core.prompts.prompts import SimpleInputPrompt\n'), ((945, 984), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./testing/docs"""'], {}), "('./testing/docs')\n", (966, 984), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((1200, 1212), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1209, 1212), False, 'import json\n'), ((2523, 2598), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-mpnet-base-v2"""'}), "(model_name='sentence-transformers/all-mpnet-base-v2')\n", (2544, 2598), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((1481, 1509), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', ([], {}), '()\n', (1507, 1509), False, 'import torch\n')] |
import itertools
from langchain.cache import InMemoryCache, SQLiteCache
import langchain
import pandas as pd
from certa.utils import merge_sources
import ellmer.models
import ellmer.metrics
from time import sleep, time
import traceback
from tqdm import tqdm
cache = "sqlite"
samples = 2
explanation_granularity = "attribute"
# setup langchain cache
if cache == "memory":
langchain.llm_cache = InMemoryCache()
elif cache == "sqlite":
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
llm_configs = [
{"model_type": "falcon", "model_name": "vilsonrodrigues/falcon-7b-instruct-sharded", "deployment_name": "local", "tag": "falcon"},
]
for llm_config in llm_configs:
pase = llm = ellmer.models.GenericEllmer(explanation_granularity=explanation_granularity, verbose=True,
deployment_name=llm_config['deployment_name'], temperature=0.01,
model_name=llm_config['model_name'], model_type=llm_config['model_type'],
prompts={"pase": "ellmer/prompts/lc_pase_llama2.txt"})
ptsew = ellmer.models.GenericEllmer(explanation_granularity=explanation_granularity, verbose=True,
deployment_name=llm_config['deployment_name'], temperature=0.01,
model_name=llm_config['model_name'], model_type=llm_config['model_type'],
prompts={
"ptse": {"er": "ellmer/prompts/er.txt", "why": "ellmer/prompts/er-why.txt",
"saliency": "ellmer/prompts/er-saliency-lc.txt",
"cf": "ellmer/prompts/er-cf-lc.txt"}})
# for each dataset in deepmatcher datasets
dataset_names = ['abt_buy', 'fodo_zaga', 'walmart_amazon']
base_dir = '/Users/tteofili/dev/cheapER/datasets/'
for d in dataset_names:
print(f'using dataset {d}')
dataset_dir = '/'.join([base_dir, d])
lsource = pd.read_csv(dataset_dir + '/tableA.csv')
rsource = pd.read_csv(dataset_dir + '/tableB.csv')
gt = pd.read_csv(dataset_dir + '/train.csv')
valid = pd.read_csv(dataset_dir + '/valid.csv')
test = pd.read_csv(dataset_dir + '/test.csv')
test_df = merge_sources(test, 'ltable_', 'rtable_', lsource, rsource, ['label'], [])
ellmers = {
"ptsew_" + llm_config['tag']: ptsew,
"pase_" + llm_config['tag']: pase,
}
result_files = []
all_llm_results = dict()
for key, llm in ellmers.items():
print(f'{key} on {d}')
curr_llm_results = []
start_time = time()
# generate predictions and explanations
test_data_df = test_df[:samples]
ranged = range(len(test_data_df))
for idx in tqdm(ranged, disable=False):
try:
rand_row = test_df.iloc[[idx]]
ltuple, rtuple = ellmer.utils.get_tuples(rand_row)
print(f'ltuple:\n{ltuple}\nrtuple:\n{rtuple}')
answer_dictionary = llm.predict_and_explain(ltuple, rtuple)
print(answer_dictionary)
prediction = answer_dictionary['prediction']
saliency = answer_dictionary['saliency']
cfs = [answer_dictionary['cf']]
curr_llm_results.append({"id": idx, "ltuple": ltuple, "rtuple": rtuple, "prediction": prediction,
"label": rand_row['label'].values[0], "saliency": saliency, "cfs": cfs})
except Exception:
traceback.print_exc()
print(f'error, waiting...')
sleep(10)
start_time += 10
| [
"langchain.cache.SQLiteCache",
"langchain.cache.InMemoryCache"
] | [((399, 414), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (412, 414), False, 'from langchain.cache import InMemoryCache, SQLiteCache\n'), ((465, 507), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (476, 507), False, 'from langchain.cache import InMemoryCache, SQLiteCache\n'), ((2112, 2152), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/tableA.csv')"], {}), "(dataset_dir + '/tableA.csv')\n", (2123, 2152), True, 'import pandas as pd\n'), ((2171, 2211), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/tableB.csv')"], {}), "(dataset_dir + '/tableB.csv')\n", (2182, 2211), True, 'import pandas as pd\n'), ((2225, 2264), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/train.csv')"], {}), "(dataset_dir + '/train.csv')\n", (2236, 2264), True, 'import pandas as pd\n'), ((2281, 2320), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/valid.csv')"], {}), "(dataset_dir + '/valid.csv')\n", (2292, 2320), True, 'import pandas as pd\n'), ((2336, 2374), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/test.csv')"], {}), "(dataset_dir + '/test.csv')\n", (2347, 2374), True, 'import pandas as pd\n'), ((2393, 2467), 'certa.utils.merge_sources', 'merge_sources', (['test', '"""ltable_"""', '"""rtable_"""', 'lsource', 'rsource', "['label']", '[]'], {}), "(test, 'ltable_', 'rtable_', lsource, rsource, ['label'], [])\n", (2406, 2467), False, 'from certa.utils import merge_sources\n'), ((2790, 2796), 'time.time', 'time', ([], {}), '()\n', (2794, 2796), False, 'from time import sleep, time\n'), ((2964, 2991), 'tqdm.tqdm', 'tqdm', (['ranged'], {'disable': '(False)'}), '(ranged, disable=False)\n', (2968, 2991), False, 'from tqdm import tqdm\n'), ((3796, 3817), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3815, 3817), False, 'import traceback\n'), ((3886, 3895), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (3891, 3895), False, 'from time import sleep, time\n')] |
import streamlit as st
import langchain
# from dotenv import load_dotenv
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from htmlTemplates import css, bot_template, user_template
import openai
import os
from PyPDF2 import PdfReader
from docx import Document
import pinecone
import time
from langchain.vectorstores import Pinecone
import toml
def main():
# load_dotenv()
st.set_page_config(page_title="Chat with multiple files", page_icon=":books:")
st.write(css, unsafe_allow_html=True)
openai.api_key = st.secrets["OPENAI_API_KEY"]
pinecone_api_key = st.secrets["PINECONE_API_KEY"]
pinecone_env = st.secrets["PINECONE_ENV"]
index_name = st.secrets["PINECONE_INDEX_NAME"]
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
st.header("Chat with multiple files :books:")
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
user_question = st.chat_input("What is up?")
if user_question:
if st.session_state.conversation is None:
st.error("Please provide data and click 'Process' before asking questions.")
else:
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response['chat_history']
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.write(user_template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
else:
st.write(bot_template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
selected_tab = st.sidebar.radio("Navigation", options=["Files", "Text"], horizontal=True, label_visibility="collapsed")
if selected_tab == "Files":
st.sidebar.subheader("Upload and Process Files")
uploaded_files = st.sidebar.file_uploader("Upload your files here and click on 'Process'", accept_multiple_files=True)
if uploaded_files:
if st.sidebar.button("Process"):
with st.spinner("Processing"):
# initialize pinecone
pinecone.init(api_key=pinecone_api_key, environment=pinecone_env)
if index_name in pinecone.list_indexes():
pinecone.delete_index(index_name)
# we create a new index
pinecone.create_index(name=index_name, metric='cosine',
dimension=1536) # 1536 dim of text-embedding-ada-002
# wait for index to be initialized
while not pinecone.describe_index(index_name).status['ready']:
time.sleep(1)
st.session_state.conversation = None
st.session_state.chat_history = None
text = ""
for file in uploaded_files:
file_extension = os.path.splitext(file.name)[1].lower()
if file_extension == '.pdf':
pdf_reader = PdfReader(file)
for page in pdf_reader.pages:
text += page.extract_text()
elif file_extension == '.txt':
text += file.read().decode("utf-8")
elif file_extension == '.docx':
doc = Document(file)
for paragraph in doc.paragraphs:
text += paragraph.text + "\n"
else:
st.warning('We only support PDF, TXT and DOCX files')
st.stop()
text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len)
text_chunks = text_splitter.split_text(text)
embeddings = OpenAIEmbeddings()
# vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
vectorstore = Pinecone.from_texts(text_chunks, embeddings, index_name=index_name)
llm = ChatOpenAI(model_name = 'gpt-4')
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(), memory=memory)
st.session_state.conversation = conversation_chain
elif selected_tab == "Text":
st.sidebar.subheader("Enter Text")
user_text = st.sidebar.text_area("Enter your text here", "")
if st.sidebar.button("Process Text"):
if not user_text.strip():
st.warning("Please enter some text before processing.")
else:
# Process the user's entered text
if user_text:
# total_character_count = len(user_text)
# if total_character_count > 400000:
# st.warning("Total input data should not exceed 400,000 characters.")
# st.stop()
st.session_state.conversation = None
st.session_state.chat_history = None
# initialize pinecone
pinecone.init(api_key=pinecone_api_key, environment=pinecone_env)
if index_name in pinecone.list_indexes():
pinecone.delete_index(index_name)
# we create a new index
pinecone.create_index(name=index_name, metric='cosine',
dimension=1536) # 1536 dim of text-embedding-ada-002
# wait for index to be initialized
while not pinecone.describe_index(index_name).status['ready']:
time.sleep(1)
st.session_state.conversation = None
st.session_state.chat_history = None
text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len)
text_chunks = text_splitter.split_text(user_text)
embeddings = OpenAIEmbeddings()
vectorstore = Pinecone.from_texts(text_chunks, embeddings, index_name=index_name)
llm = ChatOpenAI(model_name = 'gpt-4')
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=vectorstore.as_retriever(), memory=memory)
st.session_state.conversation = conversation_chain
if __name__ == '__main__':
main()
| [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.vectorstores.Pinecone.from_texts",
"langchain.chat_models.ChatOpenAI",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.memory.ConversationBufferMemory"
] | [((669, 747), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with multiple files"""', 'page_icon': '""":books:"""'}), "(page_title='Chat with multiple files', page_icon=':books:')\n", (687, 747), True, 'import streamlit as st\n'), ((752, 789), 'streamlit.write', 'st.write', (['css'], {'unsafe_allow_html': '(True)'}), '(css, unsafe_allow_html=True)\n', (760, 789), True, 'import streamlit as st\n'), ((1181, 1226), 'streamlit.header', 'st.header', (['"""Chat with multiple files :books:"""'], {}), "('Chat with multiple files :books:')\n", (1190, 1226), True, 'import streamlit as st\n'), ((1469, 1497), 'streamlit.chat_input', 'st.chat_input', (['"""What is up?"""'], {}), "('What is up?')\n", (1482, 1497), True, 'import streamlit as st\n'), ((2177, 2285), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Navigation"""'], {'options': "['Files', 'Text']", 'horizontal': '(True)', 'label_visibility': '"""collapsed"""'}), "('Navigation', options=['Files', 'Text'], horizontal=True,\n label_visibility='collapsed')\n", (2193, 2285), True, 'import streamlit as st\n'), ((2323, 2371), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Upload and Process Files"""'], {}), "('Upload and Process Files')\n", (2343, 2371), True, 'import streamlit as st\n'), ((2397, 2502), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload your files here and click on \'Process\'"""'], {'accept_multiple_files': '(True)'}), '("Upload your files here and click on \'Process\'",\n accept_multiple_files=True)\n', (2421, 2502), True, 'import streamlit as st\n'), ((1370, 1402), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (1385, 1402), True, 'import streamlit as st\n'), ((1416, 1447), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (1427, 1447), True, 'import streamlit as st\n'), ((1582, 1658), 'streamlit.error', 'st.error', (['"""Please provide data and click \'Process\' before asking questions."""'], {}), '("Please provide data and click \'Process\' before asking questions.")\n', (1590, 1658), True, 'import streamlit as st\n'), ((1696, 1754), 'streamlit.session_state.conversation', 'st.session_state.conversation', (["{'question': user_question}"], {}), "({'question': user_question})\n", (1725, 1754), True, 'import streamlit as st\n'), ((2542, 2570), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Process"""'], {}), "('Process')\n", (2559, 2570), True, 'import streamlit as st\n'), ((5125, 5159), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Enter Text"""'], {}), "('Enter Text')\n", (5145, 5159), True, 'import streamlit as st\n'), ((5180, 5228), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter your text here"""', '""""""'], {}), "('Enter your text here', '')\n", (5200, 5228), True, 'import streamlit as st\n'), ((5241, 5274), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Process Text"""'], {}), "('Process Text')\n", (5258, 5274), True, 'import streamlit as st\n'), ((2593, 2617), 'streamlit.spinner', 'st.spinner', (['"""Processing"""'], {}), "('Processing')\n", (2603, 2617), True, 'import streamlit as st\n'), ((2682, 2747), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'pinecone_api_key', 'environment': 'pinecone_env'}), '(api_key=pinecone_api_key, environment=pinecone_env)\n', (2695, 2747), False, 'import pinecone\n'), ((2934, 3005), 'pinecone.create_index', 'pinecone.create_index', ([], {'name': 'index_name', 'metric': '"""cosine"""', 'dimension': '(1536)'}), "(name=index_name, metric='cosine', dimension=1536)\n", (2955, 3005), False, 'import pinecone\n'), ((4300, 4398), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len'}), "(separator='\\n', chunk_size=1000, chunk_overlap=200,\n length_function=len)\n", (4321, 4398), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((4494, 4512), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (4510, 4512), False, 'from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings\n'), ((4641, 4708), 'langchain.vectorstores.Pinecone.from_texts', 'Pinecone.from_texts', (['text_chunks', 'embeddings'], {'index_name': 'index_name'}), '(text_chunks, embeddings, index_name=index_name)\n', (4660, 4708), False, 'from langchain.vectorstores import Pinecone\n'), ((4735, 4765), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""'}), "(model_name='gpt-4')\n", (4745, 4765), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4797, 4870), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (4821, 4870), False, 'from langchain.memory import ConversationBufferMemory\n'), ((5330, 5385), 'streamlit.warning', 'st.warning', (['"""Please enter some text before processing."""'], {}), "('Please enter some text before processing.')\n", (5340, 5385), True, 'import streamlit as st\n'), ((1957, 2006), 'htmlTemplates.user_template.replace', 'user_template.replace', (['"""{{MSG}}"""', 'message.content'], {}), "('{{MSG}}', message.content)\n", (1978, 2006), False, 'from htmlTemplates import css, bot_template, user_template\n'), ((2083, 2131), 'htmlTemplates.bot_template.replace', 'bot_template.replace', (['"""{{MSG}}"""', 'message.content'], {}), "('{{MSG}}', message.content)\n", (2103, 2131), False, 'from htmlTemplates import css, bot_template, user_template\n'), ((2786, 2809), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (2807, 2809), False, 'import pinecone\n'), ((2835, 2868), 'pinecone.delete_index', 'pinecone.delete_index', (['index_name'], {}), '(index_name)\n', (2856, 2868), False, 'import pinecone\n'), ((3249, 3262), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3259, 3262), False, 'import time\n'), ((5911, 5976), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'pinecone_api_key', 'environment': 'pinecone_env'}), '(api_key=pinecone_api_key, environment=pinecone_env)\n', (5924, 5976), False, 'import pinecone\n'), ((6163, 6234), 'pinecone.create_index', 'pinecone.create_index', ([], {'name': 'index_name', 'metric': '"""cosine"""', 'dimension': '(1536)'}), "(name=index_name, metric='cosine', dimension=1536)\n", (6184, 6234), False, 'import pinecone\n'), ((6644, 6742), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len'}), "(separator='\\n', chunk_size=1000, chunk_overlap=200,\n length_function=len)\n", (6665, 6742), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((6843, 6861), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (6859, 6861), False, 'from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings\n'), ((6896, 6963), 'langchain.vectorstores.Pinecone.from_texts', 'Pinecone.from_texts', (['text_chunks', 'embeddings'], {'index_name': 'index_name'}), '(text_chunks, embeddings, index_name=index_name)\n', (6915, 6963), False, 'from langchain.vectorstores import Pinecone\n'), ((6991, 7021), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""'}), "(model_name='gpt-4')\n", (7001, 7021), False, 'from langchain.chat_models import ChatOpenAI\n'), ((7053, 7126), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (7077, 7126), False, 'from langchain.memory import ConversationBufferMemory\n'), ((3632, 3647), 'PyPDF2.PdfReader', 'PdfReader', (['file'], {}), '(file)\n', (3641, 3647), False, 'from PyPDF2 import PdfReader\n'), ((6015, 6038), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (6036, 6038), False, 'import pinecone\n'), ((6064, 6097), 'pinecone.delete_index', 'pinecone.delete_index', (['index_name'], {}), '(index_name)\n', (6085, 6097), False, 'import pinecone\n'), ((6478, 6491), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6488, 6491), False, 'import time\n'), ((3172, 3207), 'pinecone.describe_index', 'pinecone.describe_index', (['index_name'], {}), '(index_name)\n', (3195, 3207), False, 'import pinecone\n'), ((3498, 3525), 'os.path.splitext', 'os.path.splitext', (['file.name'], {}), '(file.name)\n', (3514, 3525), False, 'import os\n'), ((3975, 3989), 'docx.Document', 'Document', (['file'], {}), '(file)\n', (3983, 3989), False, 'from docx import Document\n'), ((4171, 4224), 'streamlit.warning', 'st.warning', (['"""We only support PDF, TXT and DOCX files"""'], {}), "('We only support PDF, TXT and DOCX files')\n", (4181, 4224), True, 'import streamlit as st\n'), ((4253, 4262), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (4260, 4262), True, 'import streamlit as st\n'), ((6401, 6436), 'pinecone.describe_index', 'pinecone.describe_index', (['index_name'], {}), '(index_name)\n', (6424, 6436), False, 'import pinecone\n')] |
import langchain
from langchain.chains import LLMChain, SimpleSequentialChain, ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
langchain.verbose = True
chat = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
conversation = ConversationChain(
llm=chat,
memory=ConversationBufferMemory()
)
while True:
user_message = input("You: ")
ai_message = conversation.predict(input=user_message)
print(f"AI: {ai_message}")
| [
"langchain.memory.ConversationBufferMemory",
"langchain.chat_models.ChatOpenAI"
] | [((231, 279), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model='gpt-3.5-turbo', temperature=0)\n", (241, 279), False, 'from langchain.chat_models import ChatOpenAI\n'), ((339, 365), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (363, 365), False, 'from langchain.memory import ConversationBufferMemory\n')] |
import logging
import sys
import langchain
from extract_100knocks_qa import extract_questions
from langchain.chat_models import ChatOpenAI
from llama_index import (GPTSQLStructStoreIndex, LLMPredictor, ServiceContext,
SQLDatabase)
from ruamel.yaml import YAML
from sqlalchemy import create_engine
verbose = True
pgconfig = {
'host': 'localhost',
'port': 5432,
'database': 'dsdojo_db',
'user': 'padawan',
'password': 'padawan12345',
}
def main():
if verbose:
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
langchain.verbose = True
# データベースに接続
database_url = 'postgresql://{user}:{password}@{host}:{port}/{database}'.format(
**pgconfig)
engine = create_engine(database_url)
# LlamaIndexはデフォルトでtext-davinci-003を使うので、gpt-3.5-turboを使うよう設定
llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0)
predictor = LLMPredictor(llm)
service_context = ServiceContext.from_defaults(llm_predictor=predictor)
# LlamaIndexのtext-to-SQLの準備
sql_database = SQLDatabase(engine)
index = GPTSQLStructStoreIndex(
[],
service_context=service_context,
sql_database=sql_database,
)
# 問題の一覧を抽出
questions = extract_questions()
yaml = YAML()
yaml.default_style = '|'
with open('results/result.yaml', 'w', encoding='utf-8') as f:
# text-to-SQLを実行
for question in questions:
try:
response = index.query(question)
answer = response.extra_info['sql_query']
qa = {
'question': question,
'answer': answer,
}
except Exception as e:
qa = {
'question': question,
'error': str(e),
}
yaml.dump([qa], f)
if __name__ == "__main__":
main()
| [
"langchain.chat_models.ChatOpenAI"
] | [((829, 856), 'sqlalchemy.create_engine', 'create_engine', (['database_url'], {}), '(database_url)\n', (842, 856), False, 'from sqlalchemy import create_engine\n'), ((934, 987), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (944, 987), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1004, 1021), 'llama_index.LLMPredictor', 'LLMPredictor', (['llm'], {}), '(llm)\n', (1016, 1021), False, 'from llama_index import GPTSQLStructStoreIndex, LLMPredictor, ServiceContext, SQLDatabase\n'), ((1044, 1097), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'predictor'}), '(llm_predictor=predictor)\n', (1072, 1097), False, 'from llama_index import GPTSQLStructStoreIndex, LLMPredictor, ServiceContext, SQLDatabase\n'), ((1150, 1169), 'llama_index.SQLDatabase', 'SQLDatabase', (['engine'], {}), '(engine)\n', (1161, 1169), False, 'from llama_index import GPTSQLStructStoreIndex, LLMPredictor, ServiceContext, SQLDatabase\n'), ((1182, 1273), 'llama_index.GPTSQLStructStoreIndex', 'GPTSQLStructStoreIndex', (['[]'], {'service_context': 'service_context', 'sql_database': 'sql_database'}), '([], service_context=service_context, sql_database=\n sql_database)\n', (1204, 1273), False, 'from llama_index import GPTSQLStructStoreIndex, LLMPredictor, ServiceContext, SQLDatabase\n'), ((1332, 1351), 'extract_100knocks_qa.extract_questions', 'extract_questions', ([], {}), '()\n', (1349, 1351), False, 'from extract_100knocks_qa import extract_questions\n'), ((1364, 1370), 'ruamel.yaml.YAML', 'YAML', ([], {}), '()\n', (1368, 1370), False, 'from ruamel.yaml import YAML\n'), ((520, 579), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (539, 579), False, 'import logging\n'), ((619, 659), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (640, 659), False, 'import logging\n'), ((588, 607), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (605, 607), False, 'import logging\n')] |
import os
import openai
from dotenv import load_dotenv
import logging
import re
import hashlib
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import AzureOpenAI
from langchain.vectorstores.base import VectorStore
from langchain.chains import ChatVectorDBChain
from langchain.chains import ConversationalRetrievalChain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chains.llm import LLMChain
from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT
from langchain.prompts import PromptTemplate
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import TokenTextSplitter, TextSplitter
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders import TextLoader
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.agents import initialize_agent, Tool, AgentType
from utilities.formrecognizer import AzureFormRecognizerClient
from utilities.azureblobstorage import AzureBlobStorageClient
from utilities.translator import AzureTranslatorClient
from utilities.customprompt import PROMPT
from utilities.redis import RedisExtended
from utilities.azuresearch import AzureSearch
from utilities.NewAzureOpenAI import NewAzureOpenAI
import langchain
import pandas as pd
import urllib
from fake_useragent import UserAgent
from utilities.tools import LifeKnowledgeSearchTool, IotDeviceControlTool, MansionPriceTool, LifeKnowledgeSearchConfig
langchain.verbose = True
class LLMHelper:
def __init__(self,
document_loaders : BaseLoader = None,
text_splitter: TextSplitter = None,
embeddings: OpenAIEmbeddings = None,
llm: AzureOpenAI = None,
temperature: float = None,
max_tokens: int = None,
custom_prompt: str = "",
vector_store: VectorStore = None,
k: int = None,
pdf_parser: AzureFormRecognizerClient = None,
blob_client: AzureBlobStorageClient = None,
enable_translation: bool = False,
translator: AzureTranslatorClient = None):
load_dotenv()
openai.api_type = "azure"
openai.api_base = os.getenv('OPENAI_API_BASE')
openai.api_version = "2023-03-15-preview"
openai.api_key = os.getenv("OPENAI_API_KEY")
# Azure OpenAI settings
self.api_base = openai.api_base
self.api_version = openai.api_version
self.index_name: str = "embeddings"
self.model: str = os.getenv('OPENAI_EMBEDDINGS_ENGINE_DOC', "text-embedding-ada-002")
self.deployment_name: str = os.getenv("OPENAI_ENGINE", os.getenv("OPENAI_ENGINES", "text-davinci-003"))
self.deployment_type: str = os.getenv("OPENAI_DEPLOYMENT_TYPE", "Text")
self.temperature: float = float(os.getenv("OPENAI_TEMPERATURE", 0.7)) if temperature is None else temperature
self.max_tokens: int = int(os.getenv("OPENAI_MAX_TOKENS", -1)) if max_tokens is None else max_tokens
self.prompt = PROMPT if custom_prompt == '' else PromptTemplate(template=custom_prompt, input_variables=["summaries", "question"])
self.vector_store_type = os.getenv("VECTOR_STORE_TYPE")
# Azure Search settings
if self.vector_store_type == "AzureSearch":
self.vector_store_address: str = os.getenv('AZURE_SEARCH_SERVICE_NAME')
self.vector_store_password: str = os.getenv('AZURE_SEARCH_ADMIN_KEY')
else:
# Vector store settings
self.vector_store_address: str = os.getenv('REDIS_ADDRESS', "localhost")
self.vector_store_port: int= int(os.getenv('REDIS_PORT', 6379))
self.vector_store_protocol: str = os.getenv("REDIS_PROTOCOL", "redis://")
self.vector_store_password: str = os.getenv("REDIS_PASSWORD", None)
if self.vector_store_password:
self.vector_store_full_address = f"{self.vector_store_protocol}:{self.vector_store_password}@{self.vector_store_address}:{self.vector_store_port}"
else:
self.vector_store_full_address = f"{self.vector_store_protocol}{self.vector_store_address}:{self.vector_store_port}"
self.chunk_size = int(os.getenv('CHUNK_SIZE', 500))
self.chunk_overlap = int(os.getenv('CHUNK_OVERLAP', 100))
self.document_loaders: BaseLoader = WebBaseLoader if document_loaders is None else document_loaders
self.text_splitter: TextSplitter = TokenTextSplitter(chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap) if text_splitter is None else text_splitter
self.embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=self.model, chunk_size=1) if embeddings is None else embeddings
if self.deployment_type == "Chat":
self.llm: ChatOpenAI = ChatOpenAI(model_name=self.deployment_name, engine=self.deployment_name, temperature=self.temperature, max_tokens=self.max_tokens if self.max_tokens != -1 else None) if llm is None else llm
else:
self.llm: AzureOpenAI = NewAzureOpenAI(deployment_name=self.deployment_name, temperature=self.temperature, max_tokens=self.max_tokens) if llm is None else llm
if self.vector_store_type == "AzureSearch":
self.vector_store: VectorStore = AzureSearch(azure_cognitive_search_name=self.vector_store_address, azure_cognitive_search_key=self.vector_store_password, index_name=self.index_name, embedding_function=self.embeddings.embed_query) if vector_store is None else vector_store
else:
self.vector_store: RedisExtended = RedisExtended(redis_url=self.vector_store_full_address, index_name=self.index_name, embedding_function=self.embeddings.embed_query) if vector_store is None else vector_store
self.k : int = 3 if k is None else k
self.pdf_parser : AzureFormRecognizerClient = AzureFormRecognizerClient() if pdf_parser is None else pdf_parser
self.blob_client: AzureBlobStorageClient = AzureBlobStorageClient() if blob_client is None else blob_client
self.enable_translation : bool = False if enable_translation is None else enable_translation
self.translator : AzureTranslatorClient = AzureTranslatorClient() if translator is None else translator
self.user_agent: UserAgent() = UserAgent()
self.user_agent.random
self.current_contextDict = {}
self.current_sources = None
self.current_answer = None
langchain.verbose = True
def add_embeddings_lc(self, source_url):
retrycount = 0
while True:
try:
documents = self.document_loaders(source_url).load()
# Convert to UTF-8 encoding for non-ascii text
for(document) in documents:
try:
if document.page_content.encode("iso-8859-1") == document.page_content.encode("latin-1"):
document.page_content = document.page_content.encode("iso-8859-1").decode("utf-8", errors="ignore")
except:
pass
docs = self.text_splitter.split_documents(documents)
# Remove half non-ascii character from start/end of doc content (langchain TokenTextSplitter may split a non-ascii character in half)
pattern = re.compile(r'[\x00-\x09\x0b\x0c\x0e-\x1f\x7f\u0080-\u00a0\u2000-\u3000\ufff0-\uffff]') # do not remove \x0a (\n) nor \x0d (\r)
for(doc) in docs:
doc.page_content = re.sub(pattern, '', doc.page_content)
if doc.page_content == '':
docs.remove(doc)
keys = []
for i, doc in enumerate(docs):
# Create a unique key for the document
source_url = source_url.split('?')[0]
filename = "/".join(source_url.split('/')[4:])
hash_key = hashlib.sha1(f"{source_url}_{i}".encode('utf-8')).hexdigest()
hash_key = f"doc:{self.index_name}:{hash_key}"
keys.append(hash_key)
doc.metadata = {"source": f"[{source_url}]({source_url}_SAS_TOKEN_PLACEHOLDER_)" , "chunk": i, "key": hash_key, "filename": filename}
if self.vector_store_type == 'AzureSearch':
self.vector_store.add_documents(documents=docs, keys=keys)
else:
self.vector_store.add_documents(documents=docs, redis_url=self.vector_store_full_address, index_name=self.index_name, keys=keys)
return
except Exception as e:
logging.error(f"Error adding embeddings for {source_url}: {e}")
if retrycount > 3:
raise e
else:
print(f"Retrying adding embeddings for {source_url}")
retrycount += 1
def convert_file_and_add_embeddings(self, source_url, filename, enable_translation=False):
# Extract the text from the file
text = self.pdf_parser.analyze_read(source_url)
# Translate if requested
converted_text = list(map(lambda x: self.translator.translate(x), text)) if self.enable_translation else text
# Remove half non-ascii character from start/end of doc content (langchain TokenTextSplitter may split a non-ascii character in half)
pattern = re.compile(r'[\x00-\x09\x0b\x0c\x0e-\x1f\x7f\u0080-\u00a0\u2000-\u3000\ufff0-\uffff]') # do not remove \x0a (\n) nor \x0d (\r)
converted_text = re.sub(pattern, '', "\n".join(converted_text))
# Upload the text to Azure Blob Storage
converted_filename = f"converted/{filename}.txt"
source_url = self.blob_client.upload_file(converted_text, f"converted/{filename}.txt", content_type='text/plain; charset=utf-8')
print(f"Converted file uploaded to {source_url} with filename {filename}")
# Update the metadata to indicate that the file has been converted
self.blob_client.upsert_blob_metadata(filename, {"converted": "true"})
self.add_embeddings_lc(source_url=source_url)
return converted_filename
def get_all_documents(self, k: int = None):
result = self.vector_store.similarity_search(query="*", k= k if k else self.k)
dataFrame = pd.DataFrame(list(map(lambda x: {
'key': x.metadata['key'],
'filename': x.metadata['filename'],
'source': urllib.parse.unquote(x.metadata['source']),
'content': x.page_content,
'metadata' : x.metadata,
}, result)))
if dataFrame.empty is False:
dataFrame = dataFrame.sort_values(by='filename')
return dataFrame
def get_semantic_answer_lang_chain(self, question, chat_history):
question_generator = LLMChain(llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT, verbose=True)
doc_chain = load_qa_with_sources_chain(self.llm, chain_type="stuff", verbose=True, prompt=self.prompt)
chain = ConversationalRetrievalChain(
retriever=self.vector_store.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
return_source_documents=True,
verbose=True,
# top_k_docs_for_context= self.k
)
result = chain({"question": question, "chat_history": chat_history})
sources = "\n".join(set(map(lambda x: x.metadata["source"], result['source_documents'])))
container_sas = self.blob_client.get_container_sas()
contextDict ={}
for res in result['source_documents']:
source_key = self.filter_sourcesLinks(res.metadata['source'].replace('_SAS_TOKEN_PLACEHOLDER_', container_sas)).replace('\n', '').replace(' ', '')
if source_key not in contextDict:
contextDict[source_key] = []
myPageContent = self.clean_encoding(res.page_content)
contextDict[source_key].append(myPageContent)
result['answer'] = result['answer'].split('SOURCES:')[0].split('Sources:')[0].split('SOURCE:')[0].split('Source:')[0]
result['answer'] = self.clean_encoding(result['answer'])
sources = sources.replace('_SAS_TOKEN_PLACEHOLDER_', container_sas)
sources = self.filter_sourcesLinks(sources)
self.current_contextDict = contextDict
self.current_sources = sources
self.current_answer = result['answer']
return result['answer'], contextDict, sources
def get_general_operation_lang_chain(self, question, chat_history):
lfTool = LifeKnowledgeSearchTool(config=LifeKnowledgeSearchConfig(chat_history=chat_history, get_semantic_answer_lang_chain_func=self.get_semantic_answer_lang_chain))
tools = [
MansionPriceTool(),
lfTool,
IotDeviceControlTool(),
# Tool(
# name="生活の知恵機能",
# func=semantic_run,
# description="ユーザの問い合わせが生活の知恵が必要な場合は、生活の知恵ナレッジから返答することができます。"
# ),
# Tool(
# name="建物の価格機能",
# func=semantic_run,
# description="ユーザが引っ越しや新しい家を探している場合は、この機能をりようすることができます"
# ),
# Tool(
# name="IoTデバイス制御",
# func=iot_device_run,
# description="ユーザが自宅のデバイスを制御したい際に利用することができます。"
# ),
]
agent = initialize_agent(
tools,
llm=self.llm,
agent=AgentType.OPENAI_FUNCTIONS,
verbose=True,
return_intermediate_steps=True)
result = agent({"input":question})
return question, result['output'], self.current_contextDict, self.current_sources
def get_embeddings_model(self):
OPENAI_EMBEDDINGS_ENGINE_DOC = os.getenv('OPENAI_EMEBDDINGS_ENGINE', os.getenv('OPENAI_EMBEDDINGS_ENGINE_DOC', 'text-embedding-ada-002'))
OPENAI_EMBEDDINGS_ENGINE_QUERY = os.getenv('OPENAI_EMEBDDINGS_ENGINE', os.getenv('OPENAI_EMBEDDINGS_ENGINE_QUERY', 'text-embedding-ada-002'))
return {
"doc": OPENAI_EMBEDDINGS_ENGINE_DOC,
"query": OPENAI_EMBEDDINGS_ENGINE_QUERY
}
def get_completion(self, prompt, **kwargs):
if self.deployment_type == 'Chat':
return self.llm([HumanMessage(content=prompt)]).content
else:
return self.llm(prompt)
# remove paths from sources to only keep the filename
def filter_sourcesLinks(self, sources):
# use regex to replace all occurences of '[anypath/anypath/somefilename.xxx](the_link)' to '[somefilename](thelink)' in sources
pattern = r'\[[^\]]*?/([^/\]]*?)\]'
match = re.search(pattern, sources)
while match:
withoutExtensions = match.group(1).split('.')[0] # remove any extension to the name of the source document
sources = sources[:match.start()] + f'[{withoutExtensions}]' + sources[match.end():]
match = re.search(pattern, sources)
sources = ' \n ' + sources.replace('\n', ' \n ') # add a carriage return after each source
return sources
def extract_followupquestions(self, answer):
followupTag = answer.find('Follow-up Questions')
followupQuestions = answer.find('<<')
# take min of followupTag and folloupQuestions if not -1 to avoid taking the followup questions if there is no followupTag
followupTag = min(followupTag, followupQuestions) if followupTag != -1 and followupQuestions != -1 else max(followupTag, followupQuestions)
answer_without_followupquestions = answer[:followupTag] if followupTag != -1 else answer
followup_questions = answer[followupTag:].strip() if followupTag != -1 else ''
# Extract the followup questions as a list
pattern = r'\<\<(.*?)\>\>'
match = re.search(pattern, followup_questions)
followup_questions_list = []
while match:
followup_questions_list.append(followup_questions[match.start()+2:match.end()-2])
followup_questions = followup_questions[match.end():]
match = re.search(pattern, followup_questions)
if followup_questions_list != '':
# Extract follow up question
pattern = r'\d. (.*)'
match = re.search(pattern, followup_questions)
while match:
followup_questions_list.append(followup_questions[match.start()+3:match.end()])
followup_questions = followup_questions[match.end():]
match = re.search(pattern, followup_questions)
if followup_questions_list != '':
pattern = r'Follow-up Question: (.*)'
match = re.search(pattern, followup_questions)
while match:
followup_questions_list.append(followup_questions[match.start()+19:match.end()])
followup_questions = followup_questions[match.end():]
match = re.search(pattern, followup_questions)
# Special case when 'Follow-up questions:' appears in the answer after the <<
followupTag = answer_without_followupquestions.lower().find('follow-up questions')
if followupTag != -1:
answer_without_followupquestions = answer_without_followupquestions[:followupTag]
followupTag = answer_without_followupquestions.lower().find('follow up questions') # LLM can make variations...
if followupTag != -1:
answer_without_followupquestions = answer_without_followupquestions[:followupTag]
return answer_without_followupquestions, followup_questions_list
# insert citations in the answer - find filenames in the answer maching sources from the filenamelist and replace them with '${(id+1)}'
def insert_citations_in_answer(self, answer, filenameList):
filenameList_lowered = [x.lower() for x in filenameList] # LLM can make case mitakes in returing the filename of the source
matched_sources = []
pattern = r'\[\[(.*?)\]\]'
match = re.search(pattern, answer)
while match:
filename = match.group(1).split('.')[0] # remove any extension to the name of the source document
if filename in filenameList:
if filename not in matched_sources:
matched_sources.append(filename.lower())
filenameIndex = filenameList.index(filename) + 1
answer = answer[:match.start()] + '$^{' + f'{filenameIndex}' + '}$' + answer[match.end():]
else:
answer = answer[:match.start()] + '$^{' + f'{filename.lower()}' + '}$' + answer[match.end():]
match = re.search(pattern, answer)
# When page is reloaded search for references already added to the answer (e.g. '${(id+1)}')
for id, filename in enumerate(filenameList_lowered):
reference = '$^{' + f'{id+1}' + '}$'
if reference in answer and not filename in matched_sources:
matched_sources.append(filename)
return answer, matched_sources, filenameList_lowered
def get_links_filenames(self, answer, sources):
if sources != None:
split_sources = sources.split(' \n ') # soures are expected to be of format ' \n [filename1.ext](sourcelink1) \n [filename2.ext](sourcelink2) \n [filename3.ext](sourcelink3) \n '
else:
split_sources = []
srcList = []
linkList = []
filenameList = []
for src in split_sources:
if src != '':
srcList.append(src)
link = src[1:].split('(')[1][:-1].split(')')[0] # get the link
linkList.append(link)
filename = src[1:].split(']')[0] # retrieve the source filename.
source_url = link.split('?')[0]
answer = answer.replace(source_url, filename) # if LLM added a path to the filename, remove it from the answer
filenameList.append(filename)
answer, matchedSourcesList, filenameList = self.insert_citations_in_answer(answer, filenameList) # Add (1), (2), (3) to the answer to indicate the source of the answer
return answer, srcList, matchedSourcesList, linkList, filenameList
def clean_encoding(self, text):
try:
encoding = 'ISO-8859-1'
encodedtext = text.encode(encoding)
encodedtext = encodedtext.decode('utf-8')
except Exception as e:
encodedtext = text
return encodedtext
| [
"langchain.chains.llm.LLMChain",
"langchain.chains.qa_with_sources.load_qa_with_sources_chain",
"langchain.agents.initialize_agent",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.PromptTemplate",
"langchain.schema.HumanMessage",
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.text_splitter.TokenTextSplitter"
] | [((2224, 2237), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2235, 2237), False, 'from dotenv import load_dotenv\n'), ((2298, 2326), 'os.getenv', 'os.getenv', (['"""OPENAI_API_BASE"""'], {}), "('OPENAI_API_BASE')\n", (2307, 2326), False, 'import os\n'), ((2402, 2429), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2411, 2429), False, 'import os\n'), ((2619, 2686), 'os.getenv', 'os.getenv', (['"""OPENAI_EMBEDDINGS_ENGINE_DOC"""', '"""text-embedding-ada-002"""'], {}), "('OPENAI_EMBEDDINGS_ENGINE_DOC', 'text-embedding-ada-002')\n", (2628, 2686), False, 'import os\n'), ((2835, 2878), 'os.getenv', 'os.getenv', (['"""OPENAI_DEPLOYMENT_TYPE"""', '"""Text"""'], {}), "('OPENAI_DEPLOYMENT_TYPE', 'Text')\n", (2844, 2878), False, 'import os\n'), ((3278, 3308), 'os.getenv', 'os.getenv', (['"""VECTOR_STORE_TYPE"""'], {}), "('VECTOR_STORE_TYPE')\n", (3287, 3308), False, 'import os\n'), ((6379, 6390), 'fake_useragent.UserAgent', 'UserAgent', ([], {}), '()\n', (6388, 6390), False, 'from fake_useragent import UserAgent\n'), ((6393, 6404), 'fake_useragent.UserAgent', 'UserAgent', ([], {}), '()\n', (6402, 6404), False, 'from fake_useragent import UserAgent\n'), ((9603, 9711), 're.compile', 're.compile', (['"""[\\\\x00-\\\\x09\\\\x0b\\\\x0c\\\\x0e-\\\\x1f\\\\x7f\\\\u0080-\\\\u00a0\\\\u2000-\\\\u3000\\\\ufff0-\\\\uffff]"""'], {}), "(\n '[\\\\x00-\\\\x09\\\\x0b\\\\x0c\\\\x0e-\\\\x1f\\\\x7f\\\\u0080-\\\\u00a0\\\\u2000-\\\\u3000\\\\ufff0-\\\\uffff]'\n )\n", (9613, 9711), False, 'import re\n'), ((11066, 11135), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'CONDENSE_QUESTION_PROMPT', 'verbose': '(True)'}), '(llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT, verbose=True)\n', (11074, 11135), False, 'from langchain.chains.llm import LLMChain\n'), ((11156, 11250), 'langchain.chains.qa_with_sources.load_qa_with_sources_chain', 'load_qa_with_sources_chain', (['self.llm'], {'chain_type': '"""stuff"""', 'verbose': '(True)', 'prompt': 'self.prompt'}), "(self.llm, chain_type='stuff', verbose=True,\n prompt=self.prompt)\n", (11182, 11250), False, 'from langchain.chains.qa_with_sources import load_qa_with_sources_chain\n'), ((13693, 13814), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools'], {'llm': 'self.llm', 'agent': 'AgentType.OPENAI_FUNCTIONS', 'verbose': '(True)', 'return_intermediate_steps': '(True)'}), '(tools, llm=self.llm, agent=AgentType.OPENAI_FUNCTIONS,\n verbose=True, return_intermediate_steps=True)\n', (13709, 13814), False, 'from langchain.agents import initialize_agent, Tool, AgentType\n'), ((14982, 15009), 're.search', 're.search', (['pattern', 'sources'], {}), '(pattern, sources)\n', (14991, 15009), False, 'import re\n'), ((16149, 16187), 're.search', 're.search', (['pattern', 'followup_questions'], {}), '(pattern, followup_questions)\n', (16158, 16187), False, 'import re\n'), ((18363, 18389), 're.search', 're.search', (['pattern', 'answer'], {}), '(pattern, answer)\n', (18372, 18389), False, 'import re\n'), ((2750, 2797), 'os.getenv', 'os.getenv', (['"""OPENAI_ENGINES"""', '"""text-davinci-003"""'], {}), "('OPENAI_ENGINES', 'text-davinci-003')\n", (2759, 2797), False, 'import os\n'), ((3163, 3248), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'custom_prompt', 'input_variables': "['summaries', 'question']"}), "(template=custom_prompt, input_variables=['summaries',\n 'question'])\n", (3177, 3248), False, 'from langchain.prompts import PromptTemplate\n'), ((3440, 3478), 'os.getenv', 'os.getenv', (['"""AZURE_SEARCH_SERVICE_NAME"""'], {}), "('AZURE_SEARCH_SERVICE_NAME')\n", (3449, 3478), False, 'import os\n'), ((3525, 3560), 'os.getenv', 'os.getenv', (['"""AZURE_SEARCH_ADMIN_KEY"""'], {}), "('AZURE_SEARCH_ADMIN_KEY')\n", (3534, 3560), False, 'import os\n'), ((3657, 3696), 'os.getenv', 'os.getenv', (['"""REDIS_ADDRESS"""', '"""localhost"""'], {}), "('REDIS_ADDRESS', 'localhost')\n", (3666, 3696), False, 'import os\n'), ((3819, 3858), 'os.getenv', 'os.getenv', (['"""REDIS_PROTOCOL"""', '"""redis://"""'], {}), "('REDIS_PROTOCOL', 'redis://')\n", (3828, 3858), False, 'import os\n'), ((3905, 3938), 'os.getenv', 'os.getenv', (['"""REDIS_PASSWORD"""', 'None'], {}), "('REDIS_PASSWORD', None)\n", (3914, 3938), False, 'import os\n'), ((4328, 4356), 'os.getenv', 'os.getenv', (['"""CHUNK_SIZE"""', '(500)'], {}), "('CHUNK_SIZE', 500)\n", (4337, 4356), False, 'import os\n'), ((4391, 4422), 'os.getenv', 'os.getenv', (['"""CHUNK_OVERLAP"""', '(100)'], {}), "('CHUNK_OVERLAP', 100)\n", (4400, 4422), False, 'import os\n'), ((4575, 4654), 'langchain.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': 'self.chunk_size', 'chunk_overlap': 'self.chunk_overlap'}), '(chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap)\n', (4592, 4654), False, 'from langchain.text_splitter import TokenTextSplitter, TextSplitter\n'), ((4743, 4791), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': 'self.model', 'chunk_size': '(1)'}), '(model=self.model, chunk_size=1)\n', (4759, 4791), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((5958, 5985), 'utilities.formrecognizer.AzureFormRecognizerClient', 'AzureFormRecognizerClient', ([], {}), '()\n', (5983, 5985), False, 'from utilities.formrecognizer import AzureFormRecognizerClient\n'), ((6075, 6099), 'utilities.azureblobstorage.AzureBlobStorageClient', 'AzureBlobStorageClient', ([], {}), '()\n', (6097, 6099), False, 'from utilities.azureblobstorage import AzureBlobStorageClient\n'), ((6291, 6314), 'utilities.translator.AzureTranslatorClient', 'AzureTranslatorClient', ([], {}), '()\n', (6312, 6314), False, 'from utilities.translator import AzureTranslatorClient\n'), ((13045, 13063), 'utilities.tools.MansionPriceTool', 'MansionPriceTool', ([], {}), '()\n', (13061, 13063), False, 'from utilities.tools import LifeKnowledgeSearchTool, IotDeviceControlTool, MansionPriceTool, LifeKnowledgeSearchConfig\n'), ((13097, 13119), 'utilities.tools.IotDeviceControlTool', 'IotDeviceControlTool', ([], {}), '()\n', (13117, 13119), False, 'from utilities.tools import LifeKnowledgeSearchTool, IotDeviceControlTool, MansionPriceTool, LifeKnowledgeSearchConfig\n'), ((14123, 14190), 'os.getenv', 'os.getenv', (['"""OPENAI_EMBEDDINGS_ENGINE_DOC"""', '"""text-embedding-ada-002"""'], {}), "('OPENAI_EMBEDDINGS_ENGINE_DOC', 'text-embedding-ada-002')\n", (14132, 14190), False, 'import os\n'), ((14273, 14342), 'os.getenv', 'os.getenv', (['"""OPENAI_EMBEDDINGS_ENGINE_QUERY"""', '"""text-embedding-ada-002"""'], {}), "('OPENAI_EMBEDDINGS_ENGINE_QUERY', 'text-embedding-ada-002')\n", (14282, 14342), False, 'import os\n'), ((15267, 15294), 're.search', 're.search', (['pattern', 'sources'], {}), '(pattern, sources)\n', (15276, 15294), False, 'import re\n'), ((16426, 16464), 're.search', 're.search', (['pattern', 'followup_questions'], {}), '(pattern, followup_questions)\n', (16435, 16464), False, 'import re\n'), ((16612, 16650), 're.search', 're.search', (['pattern', 'followup_questions'], {}), '(pattern, followup_questions)\n', (16621, 16650), False, 'import re\n'), ((17018, 17056), 're.search', 're.search', (['pattern', 'followup_questions'], {}), '(pattern, followup_questions)\n', (17027, 17056), False, 'import re\n'), ((18995, 19021), 're.search', 're.search', (['pattern', 'answer'], {}), '(pattern, answer)\n', (19004, 19021), False, 'import re\n'), ((2919, 2955), 'os.getenv', 'os.getenv', (['"""OPENAI_TEMPERATURE"""', '(0.7)'], {}), "('OPENAI_TEMPERATURE', 0.7)\n", (2928, 2955), False, 'import os\n'), ((3032, 3066), 'os.getenv', 'os.getenv', (['"""OPENAI_MAX_TOKENS"""', '(-1)'], {}), "('OPENAI_MAX_TOKENS', -1)\n", (3041, 3066), False, 'import os\n'), ((3742, 3771), 'os.getenv', 'os.getenv', (['"""REDIS_PORT"""', '(6379)'], {}), "('REDIS_PORT', 6379)\n", (3751, 3771), False, 'import os\n'), ((4908, 5082), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'self.deployment_name', 'engine': 'self.deployment_name', 'temperature': 'self.temperature', 'max_tokens': '(self.max_tokens if self.max_tokens != -1 else None)'}), '(model_name=self.deployment_name, engine=self.deployment_name,\n temperature=self.temperature, max_tokens=self.max_tokens if self.\n max_tokens != -1 else None)\n', (4918, 5082), False, 'from langchain.chat_models import ChatOpenAI\n'), ((5148, 5263), 'utilities.NewAzureOpenAI.NewAzureOpenAI', 'NewAzureOpenAI', ([], {'deployment_name': 'self.deployment_name', 'temperature': 'self.temperature', 'max_tokens': 'self.max_tokens'}), '(deployment_name=self.deployment_name, temperature=self.\n temperature, max_tokens=self.max_tokens)\n', (5162, 5263), False, 'from utilities.NewAzureOpenAI import NewAzureOpenAI\n'), ((5380, 5586), 'utilities.azuresearch.AzureSearch', 'AzureSearch', ([], {'azure_cognitive_search_name': 'self.vector_store_address', 'azure_cognitive_search_key': 'self.vector_store_password', 'index_name': 'self.index_name', 'embedding_function': 'self.embeddings.embed_query'}), '(azure_cognitive_search_name=self.vector_store_address,\n azure_cognitive_search_key=self.vector_store_password, index_name=self.\n index_name, embedding_function=self.embeddings.embed_query)\n', (5391, 5586), False, 'from utilities.azuresearch import AzureSearch\n'), ((5681, 5817), 'utilities.redis.RedisExtended', 'RedisExtended', ([], {'redis_url': 'self.vector_store_full_address', 'index_name': 'self.index_name', 'embedding_function': 'self.embeddings.embed_query'}), '(redis_url=self.vector_store_full_address, index_name=self.\n index_name, embedding_function=self.embeddings.embed_query)\n', (5694, 5817), False, 'from utilities.redis import RedisExtended\n'), ((7486, 7594), 're.compile', 're.compile', (['"""[\\\\x00-\\\\x09\\\\x0b\\\\x0c\\\\x0e-\\\\x1f\\\\x7f\\\\u0080-\\\\u00a0\\\\u2000-\\\\u3000\\\\ufff0-\\\\uffff]"""'], {}), "(\n '[\\\\x00-\\\\x09\\\\x0b\\\\x0c\\\\x0e-\\\\x1f\\\\x7f\\\\u0080-\\\\u00a0\\\\u2000-\\\\u3000\\\\ufff0-\\\\uffff]'\n )\n", (7496, 7594), False, 'import re\n'), ((12888, 13017), 'utilities.tools.LifeKnowledgeSearchConfig', 'LifeKnowledgeSearchConfig', ([], {'chat_history': 'chat_history', 'get_semantic_answer_lang_chain_func': 'self.get_semantic_answer_lang_chain'}), '(chat_history=chat_history,\n get_semantic_answer_lang_chain_func=self.get_semantic_answer_lang_chain)\n', (12913, 13017), False, 'from utilities.tools import LifeKnowledgeSearchTool, IotDeviceControlTool, MansionPriceTool, LifeKnowledgeSearchConfig\n'), ((16866, 16904), 're.search', 're.search', (['pattern', 'followup_questions'], {}), '(pattern, followup_questions)\n', (16875, 16904), False, 'import re\n'), ((17273, 17311), 're.search', 're.search', (['pattern', 'followup_questions'], {}), '(pattern, followup_questions)\n', (17282, 17311), False, 'import re\n'), ((7687, 7724), 're.sub', 're.sub', (['pattern', '""""""', 'doc.page_content'], {}), "(pattern, '', doc.page_content)\n", (7693, 7724), False, 'import re\n'), ((8829, 8892), 'logging.error', 'logging.error', (['f"""Error adding embeddings for {source_url}: {e}"""'], {}), "(f'Error adding embeddings for {source_url}: {e}')\n", (8842, 8892), False, 'import logging\n'), ((14593, 14621), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (14605, 14621), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((10684, 10726), 'urllib.parse.unquote', 'urllib.parse.unquote', (["x.metadata['source']"], {}), "(x.metadata['source'])\n", (10704, 10726), False, 'import urllib\n')] |
from langchain.vectorstores import Milvus
from langchain.chains.retrieval_qa.base import RetrievalQA
from typing import Any
from langchain.memory import ConversationBufferMemory
from langchain import PromptTemplate, FAISS
from langchain.schema import Document
from langchain.embeddings import DashScopeEmbeddings
from llm.dashscope_llm import Dashscope
from embedding.xinghuo_embedding import XhEmbeddings
from llm.spark_llm import Spark
import config
import langchain
from langchain.cache import RedisCache
from redis import Redis
# redis 缓存
langchain.llm_cache = RedisCache(Redis(host=config.llm_cache_redis_host, port=config.llm_cache_redis_port, db=config.llm_cache_redis_db))
#embeddings = OpenAIEmbeddings(openai_api_key=config.OPENAI_API_KEY)
#llm = ChatOpenAI(openai_api_key=config.OPENAI_API_KEY, temperature=0, model_name="gpt-3.5-turbo-16k")
#embeddings =XhEmbeddings(appid=config.embedding_xh_appid,
# api_key=config.embedding_xh_api_key,
# api_secret=config.embedding_xh_api_secret,
# embedding_url=config.embedding_xh_embedding_url
# )
embeddings = DashScopeEmbeddings(model="text-embedding-v1", dashscope_api_key=config.llm_tyqw_api_key)
#llm = Dashscope()
llm = Spark(version=3)
def get_vector_chain(collection_name) -> Any:
llm
template = """
Use the following context (delimited by <ctx></ctx>) and the chat history (delimited by <hs></hs>) to answer the question,The answer cannot exceed 200,If you don't know the answer, just say that you don't know, don't try to make up an answer.
------
<ctx>
{context}
</ctx>
------
<hs>
{history}
</hs>
------
Question: {question}
"""
#Answer in the language in which the question was asked:
prompt = PromptTemplate(
input_variables=["history", "context", "question"],
template=template,
)
vector_db = Milvus(
embedding_function=embeddings,
connection_args={"host": config.Milvus_host, "port": config.Milvus_port, "user": config.Milvus_user, "password":config.Milvus_password},
collection_name=collection_name,
)
chain = RetrievalQA.from_chain_type(
llm,
retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={"k": 3}),
chain_type="stuff",
chain_type_kwargs={
"prompt": prompt,
"memory": ConversationBufferMemory(
memory_key="history",
input_key="question"),
},
)
return chain
def answer_bydoc(collection_name, question):
chain = get_vector_chain(collection_name)
return chain.run(question)
def answer_bybase(question):
result = llm(question)
return result
def question_derive(question):
prompt = "<question>"+question+"</question>,Please generate 5 different short questions for <question>"
llm = Dashscope()
result = llm(prompt)
return result
def query_doc(collection_name, question):
vector_db = Milvus(
embedding_function=embeddings,
connection_args={"host": config.Milvus_host, "port": config.Milvus_port, "user": config.Milvus_user, "password":config.Milvus_password},
collection_name=collection_name,
)
retriever = vector_db.as_retriever(search_type="similarity", search_kwargs={"k": 5})
docs = retriever.get_relevant_documents(question)
return docs
def add_doc(collection_name,question,content):
source = question
base_add_doc(collection_name,source,content)
def base_add_doc(collection_name,source, content):
vector_db = Milvus(
embedding_function=embeddings,
connection_args={"host": config.Milvus_host, "port": config.Milvus_port, "user": config.Milvus_user,
"password": config.Milvus_password},
collection_name=collection_name,
)
doc = Document(page_content=content,
metadata={"source": source})
docs=[]
docs.append(doc)
vector_db.add_documents(docs)
#eplay=answer("my_doc1","你们周六上班吗" )
#replay=answer("my_doc1","我周六可以去吗" )
#print(replay)
#replay=answer("my_doc1","你好" )
#print(replay) | [
"langchain.PromptTemplate",
"langchain.embeddings.DashScopeEmbeddings",
"langchain.schema.Document",
"langchain.memory.ConversationBufferMemory",
"langchain.vectorstores.Milvus"
] | [((1149, 1243), 'langchain.embeddings.DashScopeEmbeddings', 'DashScopeEmbeddings', ([], {'model': '"""text-embedding-v1"""', 'dashscope_api_key': 'config.llm_tyqw_api_key'}), "(model='text-embedding-v1', dashscope_api_key=config.\n llm_tyqw_api_key)\n", (1168, 1243), False, 'from langchain.embeddings import DashScopeEmbeddings\n'), ((1264, 1280), 'llm.spark_llm.Spark', 'Spark', ([], {'version': '(3)'}), '(version=3)\n', (1269, 1280), False, 'from llm.spark_llm import Spark\n'), ((579, 686), 'redis.Redis', 'Redis', ([], {'host': 'config.llm_cache_redis_host', 'port': 'config.llm_cache_redis_port', 'db': 'config.llm_cache_redis_db'}), '(host=config.llm_cache_redis_host, port=config.llm_cache_redis_port,\n db=config.llm_cache_redis_db)\n', (584, 686), False, 'from redis import Redis\n'), ((1812, 1902), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['history', 'context', 'question']", 'template': 'template'}), "(input_variables=['history', 'context', 'question'], template\n =template)\n", (1826, 1902), False, 'from langchain import PromptTemplate, FAISS\n'), ((1937, 2154), 'langchain.vectorstores.Milvus', 'Milvus', ([], {'embedding_function': 'embeddings', 'connection_args': "{'host': config.Milvus_host, 'port': config.Milvus_port, 'user': config.\n Milvus_user, 'password': config.Milvus_password}", 'collection_name': 'collection_name'}), "(embedding_function=embeddings, connection_args={'host': config.\n Milvus_host, 'port': config.Milvus_port, 'user': config.Milvus_user,\n 'password': config.Milvus_password}, collection_name=collection_name)\n", (1943, 2154), False, 'from langchain.vectorstores import Milvus\n'), ((2915, 2926), 'llm.dashscope_llm.Dashscope', 'Dashscope', ([], {}), '()\n', (2924, 2926), False, 'from llm.dashscope_llm import Dashscope\n'), ((3028, 3245), 'langchain.vectorstores.Milvus', 'Milvus', ([], {'embedding_function': 'embeddings', 'connection_args': "{'host': config.Milvus_host, 'port': config.Milvus_port, 'user': config.\n Milvus_user, 'password': config.Milvus_password}", 'collection_name': 'collection_name'}), "(embedding_function=embeddings, connection_args={'host': config.\n Milvus_host, 'port': config.Milvus_port, 'user': config.Milvus_user,\n 'password': config.Milvus_password}, collection_name=collection_name)\n", (3034, 3245), False, 'from langchain.vectorstores import Milvus\n'), ((3614, 3831), 'langchain.vectorstores.Milvus', 'Milvus', ([], {'embedding_function': 'embeddings', 'connection_args': "{'host': config.Milvus_host, 'port': config.Milvus_port, 'user': config.\n Milvus_user, 'password': config.Milvus_password}", 'collection_name': 'collection_name'}), "(embedding_function=embeddings, connection_args={'host': config.\n Milvus_host, 'port': config.Milvus_port, 'user': config.Milvus_user,\n 'password': config.Milvus_password}, collection_name=collection_name)\n", (3620, 3831), False, 'from langchain.vectorstores import Milvus\n'), ((3889, 3948), 'langchain.schema.Document', 'Document', ([], {'page_content': 'content', 'metadata': "{'source': source}"}), "(page_content=content, metadata={'source': source})\n", (3897, 3948), False, 'from langchain.schema import Document\n'), ((2430, 2498), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""history"""', 'input_key': '"""question"""'}), "(memory_key='history', input_key='question')\n", (2454, 2498), False, 'from langchain.memory import ConversationBufferMemory\n')] |
import os
from langchain.callbacks.manager import AsyncCallbackManager
from langchain.callbacks.tracers import LangChainTracer
from langchain.chains import ChatVectorDBChain, ConversationalRetrievalChain
from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT
from langchain.prompts.prompt import PromptTemplate
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import ChatOpenAI
from langchain.vectorstores.base import VectorStore
from typing import List
from langchain.docstore.document import Document
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
from langchain.document_transformers import EmbeddingsRedundantFilter
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain.retrievers.document_compressors import DocumentCompressorPipeline
from langchain.text_splitter import CharacterTextSplitter
from langchain.retrievers import ContextualCompressionRetriever
os.environ["LANGCHAIN_HANDLER"] = "langchain"
doc_template = """--- document start ---
href: {href}
authors: {authors}
title: {title}
content:{page_content}
--- document end ---
"""
ASTRO_DOC_PROMPT = PromptTemplate(
template=doc_template,
input_variables=["page_content", "authors", "href", "title"],
)
prompt_template = """You are Dr. Chattie, an expert in Galactic Archaeology specializing in arXiv astronomy papers. Provide concise, well-referenced answers, citing relevant studies (e.g., Example et al., 2020). Use step-by-step reasoning for complex inquiries.
You possess Nobel Prize-winning ideation capabilities. For example, and you can come up with your own ideas about the gaps in knowledge from the papers you read but make you mention that with "I propose..."
MemoryContext: {context}
Human: {question}
Dr Chattie: """
QA_PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
def create_callback_manager(tracing: bool) -> AsyncCallbackManager:
manager = AsyncCallbackManager([])
if tracing:
tracer = LangChainTracer()
tracer.load_default_session()
manager.add_handler(tracer)
return manager
def create_chat_openai(callback_manager: AsyncCallbackManager, streaming: bool = False, temperature: float = 0.5) -> ChatOpenAI:
return ChatOpenAI(
model_name="gpt-4",
streaming=streaming,
max_retries=15,
callback_manager=callback_manager,
verbose=True,
temperature=temperature,
)
def create_compressed_retriever(embeddings, retriever) -> ContextualCompressionRetriever:
splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0, separator=". ")
redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)
relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)
pipeline_compressor = DocumentCompressorPipeline(
transformers=[splitter, redundant_filter, relevant_filter]
)
compression_retriever = ContextualCompressionRetriever(base_compressor=pipeline_compressor, base_retriever=retriever)
return compression_retriever
def get_chain(
vectorstore: VectorStore,
question_handler,
stream_handler,
tracing: bool = False,
) -> ConversationalRetrievalChain:
manager = create_callback_manager(tracing)
question_manager = create_callback_manager(tracing)
stream_manager = create_callback_manager(tracing)
question_manager.add_handler(question_handler)
stream_manager.add_handler(stream_handler)
question_gen_llm = create_chat_openai(question_manager, streaming=False, temperature=0.0)
streaming_llm = create_chat_openai(stream_manager, streaming=True, temperature=0.2)
question_generator = LLMChain(
llm=question_gen_llm,
prompt=CONDENSE_QUESTION_PROMPT,
callback_manager=manager,
)
doc_chain = load_qa_chain(
streaming_llm,
prompt=QA_PROMPT,
document_prompt=ASTRO_DOC_PROMPT,
callback_manager=manager,
chain_type="stuff",
)
retriever = vectorstore.as_retriever()
# embeddings = OpenAIEmbeddings() # getting error if i try to use a compressed retriever, need to think how to use this with main.py
# compression_retriever = create_compressed_retriever(embeddings, retriever)
qa = ConversationalRetrievalChain(
retriever=retriever,
combine_docs_chain=doc_chain,
question_generator=question_generator,
)
return qa
| [
"langchain.chains.llm.LLMChain",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.chat_models.ChatOpenAI",
"langchain.callbacks.manager.AsyncCallbackManager",
"langchain.prompts.prompt.PromptTemplate",
"langchain.callbacks.tracers.LangChainTracer",
"langchain.retrievers.document_compressors.EmbeddingsFilter",
"langchain.chains.question_answering.load_qa_chain",
"langchain.chains.ConversationalRetrievalChain",
"langchain.retrievers.document_compressors.DocumentCompressorPipeline",
"langchain.document_transformers.EmbeddingsRedundantFilter",
"langchain.retrievers.ContextualCompressionRetriever"
] | [((1253, 1356), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'doc_template', 'input_variables': "['page_content', 'authors', 'href', 'title']"}), "(template=doc_template, input_variables=['page_content',\n 'authors', 'href', 'title'])\n", (1267, 1356), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1909, 1994), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']"}), "(template=prompt_template, input_variables=['context',\n 'question'])\n", (1923, 1994), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((2080, 2104), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[]'], {}), '([])\n', (2100, 2104), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2390, 2535), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'streaming': 'streaming', 'max_retries': '(15)', 'callback_manager': 'callback_manager', 'verbose': '(True)', 'temperature': 'temperature'}), "(model_name='gpt-4', streaming=streaming, max_retries=15,\n callback_manager=callback_manager, verbose=True, temperature=temperature)\n", (2400, 2535), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2693, 2763), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(300)', 'chunk_overlap': '(0)', 'separator': '""". """'}), "(chunk_size=300, chunk_overlap=0, separator='. ')\n", (2714, 2763), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((2787, 2835), 'langchain.document_transformers.EmbeddingsRedundantFilter', 'EmbeddingsRedundantFilter', ([], {'embeddings': 'embeddings'}), '(embeddings=embeddings)\n', (2812, 2835), False, 'from langchain.document_transformers import EmbeddingsRedundantFilter\n'), ((2858, 2924), 'langchain.retrievers.document_compressors.EmbeddingsFilter', 'EmbeddingsFilter', ([], {'embeddings': 'embeddings', 'similarity_threshold': '(0.76)'}), '(embeddings=embeddings, similarity_threshold=0.76)\n', (2874, 2924), False, 'from langchain.retrievers.document_compressors import EmbeddingsFilter\n'), ((2951, 3041), 'langchain.retrievers.document_compressors.DocumentCompressorPipeline', 'DocumentCompressorPipeline', ([], {'transformers': '[splitter, redundant_filter, relevant_filter]'}), '(transformers=[splitter, redundant_filter,\n relevant_filter])\n', (2977, 3041), False, 'from langchain.retrievers.document_compressors import DocumentCompressorPipeline\n'), ((3081, 3178), 'langchain.retrievers.ContextualCompressionRetriever', 'ContextualCompressionRetriever', ([], {'base_compressor': 'pipeline_compressor', 'base_retriever': 'retriever'}), '(base_compressor=pipeline_compressor,\n base_retriever=retriever)\n', (3111, 3178), False, 'from langchain.retrievers import ContextualCompressionRetriever\n'), ((3825, 3918), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'question_gen_llm', 'prompt': 'CONDENSE_QUESTION_PROMPT', 'callback_manager': 'manager'}), '(llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT,\n callback_manager=manager)\n', (3833, 3918), False, 'from langchain.chains.llm import LLMChain\n'), ((3962, 4093), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['streaming_llm'], {'prompt': 'QA_PROMPT', 'document_prompt': 'ASTRO_DOC_PROMPT', 'callback_manager': 'manager', 'chain_type': '"""stuff"""'}), "(streaming_llm, prompt=QA_PROMPT, document_prompt=\n ASTRO_DOC_PROMPT, callback_manager=manager, chain_type='stuff')\n", (3975, 4093), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((4406, 4529), 'langchain.chains.ConversationalRetrievalChain', 'ConversationalRetrievalChain', ([], {'retriever': 'retriever', 'combine_docs_chain': 'doc_chain', 'question_generator': 'question_generator'}), '(retriever=retriever, combine_docs_chain=\n doc_chain, question_generator=question_generator)\n', (4434, 4529), False, 'from langchain.chains import ChatVectorDBChain, ConversationalRetrievalChain\n'), ((2138, 2155), 'langchain.callbacks.tracers.LangChainTracer', 'LangChainTracer', ([], {}), '()\n', (2153, 2155), False, 'from langchain.callbacks.tracers import LangChainTracer\n')] |
# Import necessary libraries
import hubspot
import langchain
import openai
import streamlit
# Define function to analyze customer data using Langchain
def analyze_customer_data(customer_data):
langchain.analyze(customer_data)
# returns analyzed data
# Define function to send personalized appointment reminders via email and text message
def send_appointment_reminder(customer_email, customer_phone, appt_time):
# Create message using OpenAI language model
message = openai.generate_message(customer_name, appt_time)
# Send email using Hubspot API
hubspot.send_email(customer_email, message)
# Send text message using Hubspot API
hubspot.send_text(customer_phone, message)
# Call analyze_customer_data function on customer data
analyzed_data = analyze_customer_data(customer_data)
# Loop through customers in analyzed_data
for customer in analyzed_data:
# Check if customer has an appointment scheduled
if customer['appointment_time'] != None:
# Send personalized appointment reminder to customer via email and text message
send_appointment_reminder(customer['email'], customer['phone'], customer['appointment_time'])
| [
"langchain.analyze"
] | [((206, 238), 'langchain.analyze', 'langchain.analyze', (['customer_data'], {}), '(customer_data)\n', (223, 238), False, 'import langchain\n'), ((499, 548), 'openai.generate_message', 'openai.generate_message', (['customer_name', 'appt_time'], {}), '(customer_name, appt_time)\n', (522, 548), False, 'import openai\n'), ((590, 633), 'hubspot.send_email', 'hubspot.send_email', (['customer_email', 'message'], {}), '(customer_email, message)\n', (608, 633), False, 'import hubspot\n'), ((682, 724), 'hubspot.send_text', 'hubspot.send_text', (['customer_phone', 'message'], {}), '(customer_phone, message)\n', (699, 724), False, 'import hubspot\n')] |
import langchain
from dotenv import load_dotenv
from langchain.agents import initialize_agent, AgentType
from langchain.chat_models import ChatOpenAI
from datetime import timedelta, datetime
import chainlit as cl
from utils.custom_tools import CustomTrinoListTable, CustomTrinoTableSchema, CustomTrinoSqlQuery, CustomTrinoSqlCheck, CustomTrinoTableJoin
# 加载.env文件中的环境变量
load_dotenv()
langchain.debug = True
today = (datetime.now()).strftime("%Y%m%d")
yeaterday = (datetime.now() - timedelta(days=1)).strftime("%Y%m%d")
today_d = (datetime.now()).strftime("%Y-%m-%d")
yeaterday_d = (datetime.now() - timedelta(days=1)).strftime("%Y-%m-%d")
custom_prefix = f""" You are an agent designed to interact with a Trino SQL database.
Please do not answer others questions,use Chinese to answer questions,think it step by step.
NOTE:
data rule:
date string format YYYYMMDD. for example, today is {today}, then yeaterday is {yeaterday}, and so on.
date string format YYYY-MM-DD. for example, today is {today_d}, then yeaterday is {yeaterday_d}, and so on.
column dt is table data version string, not a business date.
sql rule:
sql generation not need to end with ; only sql itself.
get answer from single sql after plan logics step by step.
You have access to the following tools:"""
@cl.on_chat_start
async def start():
# Initialize model
model = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, verbose=False, streaming=True)
custom_tool_list = [CustomTrinoListTable(), CustomTrinoTableSchema(), CustomTrinoSqlQuery(), CustomTrinoSqlCheck(), CustomTrinoTableJoin()]
agent_executor = initialize_agent(
custom_tool_list,
llm=model,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
max_iterations=6,
agent_kwargs={"prefix": custom_prefix},
handle_parsing_errors="Check your output and make sure it conforms"
)
cl.user_session.set("agent", agent_executor)
# Send the initial message
elements = [
cl.Text(name="提问:", content="计算订单明细表的dt为昨天,销售日期为本月的总销售数量", display="inline"),
cl.Text(name="我能生成SQL脚本:", content=f"SELECT SUM(num) AS total_sales_quantity FROM gjdw.dw_sale_tr_goods_dt WHERE dt = '{today}' AND dates >= '2023-10-01' AND dates <= '2023-10-31'", display="inline", language="SQL"),
cl.Text(name="最终结果:", content="订单明细表的dt为昨天,销售日期为本月的总销售数量是0。", display="inline"),
]
content = "Hi,我是 Trino SQL Agent ,我能帮助你查询trino数据库。您可以向我提问,例如:"
await cl.Message(content=content, elements=elements).send()
@cl.on_message
async def main(message: cl.Message):
agent = cl.user_session.get("agent") # type: #AgentExecutor
cb = cl.LangchainCallbackHandler(stream_final_answer=True)
print(message)
await cl.make_async(agent.run)(message, callbacks=[cb])
# def ask(input: str) -> str:
# print("-- Serving request for input: %s" % input)
# try:
# response = agent_executor.run(input)
# except Exception as e:
# response = str(e)
# if response.startswith("Could not parse LLM output: `"):
# response = response.removeprefix("Could not parse LLM output: `").removesuffix("`")
# return response
# agent_executor.run(" table gjdw.dw_sale_tr_goods_dt has column named bill_code ?")
# ask("计算订单明细表的dt为昨天,销售日期为本月初的总销售数量")
| [
"langchain.agents.initialize_agent",
"langchain.chat_models.ChatOpenAI"
] | [((371, 384), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (382, 384), False, 'from dotenv import load_dotenv\n'), ((1351, 1439), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'verbose': '(False)', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', temperature=0, verbose=False,\n streaming=True)\n", (1361, 1439), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1603, 1848), 'langchain.agents.initialize_agent', 'initialize_agent', (['custom_tool_list'], {'llm': 'model', 'agent': 'AgentType.ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(True)', 'max_iterations': '(6)', 'agent_kwargs': "{'prefix': custom_prefix}", 'handle_parsing_errors': '"""Check your output and make sure it conforms"""'}), "(custom_tool_list, llm=model, agent=AgentType.\n ZERO_SHOT_REACT_DESCRIPTION, verbose=True, max_iterations=6,\n agent_kwargs={'prefix': custom_prefix}, handle_parsing_errors=\n 'Check your output and make sure it conforms')\n", (1619, 1848), False, 'from langchain.agents import initialize_agent, AgentType\n'), ((1901, 1945), 'chainlit.user_session.set', 'cl.user_session.set', (['"""agent"""', 'agent_executor'], {}), "('agent', agent_executor)\n", (1920, 1945), True, 'import chainlit as cl\n'), ((2597, 2625), 'chainlit.user_session.get', 'cl.user_session.get', (['"""agent"""'], {}), "('agent')\n", (2616, 2625), True, 'import chainlit as cl\n'), ((2659, 2712), 'chainlit.LangchainCallbackHandler', 'cl.LangchainCallbackHandler', ([], {'stream_final_answer': '(True)'}), '(stream_final_answer=True)\n', (2686, 2712), True, 'import chainlit as cl\n'), ((417, 431), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (429, 431), False, 'from datetime import timedelta, datetime\n'), ((531, 545), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (543, 545), False, 'from datetime import timedelta, datetime\n'), ((1461, 1483), 'utils.custom_tools.CustomTrinoListTable', 'CustomTrinoListTable', ([], {}), '()\n', (1481, 1483), False, 'from utils.custom_tools import CustomTrinoListTable, CustomTrinoTableSchema, CustomTrinoSqlQuery, CustomTrinoSqlCheck, CustomTrinoTableJoin\n'), ((1485, 1509), 'utils.custom_tools.CustomTrinoTableSchema', 'CustomTrinoTableSchema', ([], {}), '()\n', (1507, 1509), False, 'from utils.custom_tools import CustomTrinoListTable, CustomTrinoTableSchema, CustomTrinoSqlQuery, CustomTrinoSqlCheck, CustomTrinoTableJoin\n'), ((1511, 1532), 'utils.custom_tools.CustomTrinoSqlQuery', 'CustomTrinoSqlQuery', ([], {}), '()\n', (1530, 1532), False, 'from utils.custom_tools import CustomTrinoListTable, CustomTrinoTableSchema, CustomTrinoSqlQuery, CustomTrinoSqlCheck, CustomTrinoTableJoin\n'), ((1534, 1555), 'utils.custom_tools.CustomTrinoSqlCheck', 'CustomTrinoSqlCheck', ([], {}), '()\n', (1553, 1555), False, 'from utils.custom_tools import CustomTrinoListTable, CustomTrinoTableSchema, CustomTrinoSqlQuery, CustomTrinoSqlCheck, CustomTrinoTableJoin\n'), ((1557, 1579), 'utils.custom_tools.CustomTrinoTableJoin', 'CustomTrinoTableJoin', ([], {}), '()\n', (1577, 1579), False, 'from utils.custom_tools import CustomTrinoListTable, CustomTrinoTableSchema, CustomTrinoSqlQuery, CustomTrinoSqlCheck, CustomTrinoTableJoin\n'), ((2002, 2078), 'chainlit.Text', 'cl.Text', ([], {'name': '"""提问:"""', 'content': '"""计算订单明细表的dt为昨天,销售日期为本月的总销售数量"""', 'display': '"""inline"""'}), "(name='提问:', content='计算订单明细表的dt为昨天,销售日期为本月的总销售数量', display='inline')\n", (2009, 2078), True, 'import chainlit as cl\n'), ((2088, 2313), 'chainlit.Text', 'cl.Text', ([], {'name': '"""我能生成SQL脚本:"""', 'content': 'f"""SELECT SUM(num) AS total_sales_quantity FROM gjdw.dw_sale_tr_goods_dt WHERE dt = \'{today}\' AND dates >= \'2023-10-01\' AND dates <= \'2023-10-31\'"""', 'display': '"""inline"""', 'language': '"""SQL"""'}), '(name=\'我能生成SQL脚本:\', content=\n f"SELECT SUM(num) AS total_sales_quantity FROM gjdw.dw_sale_tr_goods_dt WHERE dt = \'{today}\' AND dates >= \'2023-10-01\' AND dates <= \'2023-10-31\'"\n , display=\'inline\', language=\'SQL\')\n', (2095, 2313), True, 'import chainlit as cl\n'), ((2313, 2392), 'chainlit.Text', 'cl.Text', ([], {'name': '"""最终结果:"""', 'content': '"""订单明细表的dt为昨天,销售日期为本月的总销售数量是0。"""', 'display': '"""inline"""'}), "(name='最终结果:', content='订单明细表的dt为昨天,销售日期为本月的总销售数量是0。', display='inline')\n", (2320, 2392), True, 'import chainlit as cl\n'), ((465, 479), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (477, 479), False, 'from datetime import timedelta, datetime\n'), ((482, 499), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (491, 499), False, 'from datetime import timedelta, datetime\n'), ((583, 597), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (595, 597), False, 'from datetime import timedelta, datetime\n'), ((600, 617), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (609, 617), False, 'from datetime import timedelta, datetime\n'), ((2742, 2766), 'chainlit.make_async', 'cl.make_async', (['agent.run'], {}), '(agent.run)\n', (2755, 2766), True, 'import chainlit as cl\n'), ((2477, 2523), 'chainlit.Message', 'cl.Message', ([], {'content': 'content', 'elements': 'elements'}), '(content=content, elements=elements)\n', (2487, 2523), True, 'import chainlit as cl\n')] |
import asyncio
import inspect
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
Sequence,
cast,
)
import langchain
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForLLMRun,
CallbackManager,
CallbackManagerForLLMRun,
Callbacks,
)
from langchain.load.dump import dumpd, dumps
from langchain.prompts.base import StringPromptValue
from langchain.prompts.chat import ChatPromptValue
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import (
ChatGeneration,
ChatResult,
LLMResult,
PromptValue,
RunInfo,
)
from langchain.schema.language_model import BaseLanguageModel, LanguageModelInput
from langchain.schema.messages import (
AIMessage,
BaseMessage,
BaseMessageChunk,
HumanMessage,
)
from langchain.schema.output import ChatGenerationChunk
from langchain.schema.runnable import RunnableConfig
def _get_verbosity() -> bool:
return langchain.verbose
class BaseChatModel(BaseLanguageModel[BaseMessageChunk], ABC):
"""Base class for Chat models."""
cache: Optional[bool] = None
"""Whether to cache the response."""
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether to print out response text."""
callbacks: Callbacks = Field(default=None, exclude=True)
"""Callbacks to add to the run trace."""
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
"""Callback manager to add to the run trace."""
tags: Optional[List[str]] = Field(default=None, exclude=True)
"""Tags to add to the run trace."""
metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True)
"""Metadata to add to the run trace."""
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
# --- Runnable methods ---
def _convert_input(self, input: LanguageModelInput) -> PromptValue:
if isinstance(input, PromptValue):
return input
elif isinstance(input, str):
return StringPromptValue(text=input)
elif isinstance(input, list):
return ChatPromptValue(messages=input)
else:
raise ValueError(
f"Invalid input type {type(input)}. "
"Must be a PromptValue, str, or list of BaseMessages."
)
def invoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> BaseMessageChunk:
config = config or {}
return cast(
BaseMessageChunk,
cast(
ChatGeneration,
self.generate_prompt(
[self._convert_input(input)],
stop=stop,
callbacks=config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
**kwargs,
).generations[0][0],
).message,
)
async def ainvoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> BaseMessageChunk:
if type(self)._agenerate == BaseChatModel._agenerate:
# model doesn't implement async generation, so use default implementation
return await asyncio.get_running_loop().run_in_executor(
None, partial(self.invoke, input, config, stop=stop, **kwargs)
)
config = config or {}
llm_result = await self.agenerate_prompt(
[self._convert_input(input)],
stop=stop,
callbacks=config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
**kwargs,
)
return cast(
BaseMessageChunk, cast(ChatGeneration, llm_result.generations[0][0]).message
)
def stream(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> Iterator[BaseMessageChunk]:
if type(self)._stream == BaseChatModel._stream:
# model doesn't implement streaming, so use default implementation
yield self.invoke(input, config=config, stop=stop, **kwargs)
else:
config = config or {}
messages = self._convert_input(input).to_messages()
params = self._get_invocation_params(stop=stop, **kwargs)
options = {"stop": stop, **kwargs}
callback_manager = CallbackManager.configure(
config.get("callbacks"),
self.callbacks,
self.verbose,
config.get("tags"),
self.tags,
config.get("metadata"),
self.metadata,
)
(run_manager,) = callback_manager.on_chat_model_start(
dumpd(self), [messages], invocation_params=params, options=options
)
try:
message: Optional[BaseMessageChunk] = None
for chunk in self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
):
yield chunk.message
if message is None:
message = chunk.message
else:
message += chunk.message
assert message is not None
except (KeyboardInterrupt, Exception) as e:
run_manager.on_llm_error(e)
raise e
else:
run_manager.on_llm_end(
LLMResult(generations=[[ChatGeneration(message=message)]]),
)
async def astream(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> AsyncIterator[BaseMessageChunk]:
if type(self)._astream == BaseChatModel._astream:
# model doesn't implement streaming, so use default implementation
yield self.invoke(input, config=config, stop=stop, **kwargs)
else:
config = config or {}
messages = self._convert_input(input).to_messages()
params = self._get_invocation_params(stop=stop, **kwargs)
options = {"stop": stop, **kwargs}
callback_manager = AsyncCallbackManager.configure(
config.get("callbacks"),
self.callbacks,
self.verbose,
config.get("tags"),
self.tags,
config.get("metadata"),
self.metadata,
)
(run_manager,) = await callback_manager.on_chat_model_start(
dumpd(self), [messages], invocation_params=params, options=options
)
try:
message: Optional[BaseMessageChunk] = None
async for chunk in self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
):
yield chunk.message
if message is None:
message = chunk.message
else:
message += chunk.message
assert message is not None
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_llm_error(e)
raise e
else:
await run_manager.on_llm_end(
LLMResult(generations=[[ChatGeneration(message=message)]]),
)
# --- Custom methods ---
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
return {}
def _get_invocation_params(
self,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> dict:
params = self.dict()
params["stop"] = stop
return {**params, **kwargs}
def _get_llm_string(self, stop: Optional[List[str]] = None, **kwargs: Any) -> str:
if self.lc_serializable:
params = {**kwargs, **{"stop": stop}}
param_string = str(sorted([(k, v) for k, v in params.items()]))
llm_string = dumps(self)
return llm_string + "---" + param_string
else:
params = self._get_invocation_params(stop=stop, **kwargs)
params = {**params, **kwargs}
return str(sorted([(k, v) for k, v in params.items()]))
def generate(
self,
messages: List[List[BaseMessage]],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> LLMResult:
"""Top Level call"""
params = self._get_invocation_params(stop=stop, **kwargs)
options = {"stop": stop}
callback_manager = CallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
run_managers = callback_manager.on_chat_model_start(
dumpd(self), messages, invocation_params=params, options=options
)
results = []
for i, m in enumerate(messages):
try:
results.append(
self._generate_with_cache(
m,
stop=stop,
run_manager=run_managers[i] if run_managers else None,
**kwargs,
)
)
except (KeyboardInterrupt, Exception) as e:
if run_managers:
run_managers[i].on_llm_error(e)
raise e
flattened_outputs = [
LLMResult(generations=[res.generations], llm_output=res.llm_output)
for res in results
]
llm_output = self._combine_llm_outputs([res.llm_output for res in results])
generations = [res.generations for res in results]
output = LLMResult(generations=generations, llm_output=llm_output)
if run_managers:
run_infos = []
for manager, flattened_output in zip(run_managers, flattened_outputs):
manager.on_llm_end(flattened_output)
run_infos.append(RunInfo(run_id=manager.run_id))
output.run = run_infos
return output
async def agenerate(
self,
messages: List[List[BaseMessage]],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> LLMResult:
"""Top Level call"""
params = self._get_invocation_params(stop=stop, **kwargs)
options = {"stop": stop}
callback_manager = AsyncCallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
run_managers = await callback_manager.on_chat_model_start(
dumpd(self), messages, invocation_params=params, options=options
)
results = await asyncio.gather(
*[
self._agenerate_with_cache(
m,
stop=stop,
run_manager=run_managers[i] if run_managers else None,
**kwargs,
)
for i, m in enumerate(messages)
],
return_exceptions=True,
)
exceptions = []
for i, res in enumerate(results):
if isinstance(res, Exception):
if run_managers:
await run_managers[i].on_llm_error(res)
exceptions.append(res)
if exceptions:
if run_managers:
await asyncio.gather(
*[
run_manager.on_llm_end(
LLMResult(
generations=[res.generations], llm_output=res.llm_output
)
)
for run_manager, res in zip(run_managers, results)
if not isinstance(res, Exception)
]
)
raise exceptions[0]
flattened_outputs = [
LLMResult(generations=[res.generations], llm_output=res.llm_output)
for res in results
]
llm_output = self._combine_llm_outputs([res.llm_output for res in results])
generations = [res.generations for res in results]
output = LLMResult(generations=generations, llm_output=llm_output)
await asyncio.gather(
*[
run_manager.on_llm_end(flattened_output)
for run_manager, flattened_output in zip(
run_managers, flattened_outputs
)
]
)
if run_managers:
output.run = [
RunInfo(run_id=run_manager.run_id) for run_manager in run_managers
]
return output
def generate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
prompt_messages = [p.to_messages() for p in prompts]
return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
async def agenerate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
prompt_messages = [p.to_messages() for p in prompts]
return await self.agenerate(
prompt_messages, stop=stop, callbacks=callbacks, **kwargs
)
def _generate_with_cache(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
new_arg_supported = inspect.signature(self._generate).parameters.get(
"run_manager"
)
disregard_cache = self.cache is not None and not self.cache
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
if new_arg_supported:
return self._generate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
else:
return self._generate(messages, stop=stop, **kwargs)
else:
llm_string = self._get_llm_string(stop=stop, **kwargs)
prompt = dumps(messages)
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
return ChatResult(generations=cache_val)
else:
if new_arg_supported:
result = self._generate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
else:
result = self._generate(messages, stop=stop, **kwargs)
langchain.llm_cache.update(prompt, llm_string, result.generations)
return result
async def _agenerate_with_cache(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
new_arg_supported = inspect.signature(self._agenerate).parameters.get(
"run_manager"
)
disregard_cache = self.cache is not None and not self.cache
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
if new_arg_supported:
return await self._agenerate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
else:
return await self._agenerate(messages, stop=stop, **kwargs)
else:
llm_string = self._get_llm_string(stop=stop, **kwargs)
prompt = dumps(messages)
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
return ChatResult(generations=cache_val)
else:
if new_arg_supported:
result = await self._agenerate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
else:
result = await self._agenerate(messages, stop=stop, **kwargs)
langchain.llm_cache.update(prompt, llm_string, result.generations)
return result
@abstractmethod
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call"""
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call"""
raise NotImplementedError()
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
raise NotImplementedError()
def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
raise NotImplementedError()
def __call__(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseMessage:
generation = self.generate(
[messages], stop=stop, callbacks=callbacks, **kwargs
).generations[0][0]
if isinstance(generation, ChatGeneration):
return generation.message
else:
raise ValueError("Unexpected generation type")
async def _call_async(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseMessage:
result = await self.agenerate(
[messages], stop=stop, callbacks=callbacks, **kwargs
)
generation = result.generations[0][0]
if isinstance(generation, ChatGeneration):
return generation.message
else:
raise ValueError("Unexpected generation type")
def call_as_llm(
self, message: str, stop: Optional[List[str]] = None, **kwargs: Any
) -> str:
return self.predict(message, stop=stop, **kwargs)
def predict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
result = self([HumanMessage(content=text)], stop=_stop, **kwargs)
return result.content
def predict_messages(
self,
messages: List[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
if stop is None:
_stop = None
else:
_stop = list(stop)
return self(messages, stop=_stop, **kwargs)
async def apredict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
result = await self._call_async(
[HumanMessage(content=text)], stop=_stop, **kwargs
)
return result.content
async def apredict_messages(
self,
messages: List[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
if stop is None:
_stop = None
else:
_stop = list(stop)
return await self._call_async(messages, stop=_stop, **kwargs)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {}
@property
@abstractmethod
def _llm_type(self) -> str:
"""Return type of chat model."""
def dict(self, **kwargs: Any) -> Dict:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
return starter_dict
class SimpleChatModel(BaseChatModel):
"""Simple Chat Model."""
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs)
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
@abstractmethod
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Simpler interface."""
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
func = partial(
self._generate, messages, stop=stop, run_manager=run_manager, **kwargs
)
return await asyncio.get_event_loop().run_in_executor(None, func)
| [
"langchain.llm_cache.lookup",
"langchain.schema.messages.HumanMessage",
"langchain.prompts.base.StringPromptValue",
"langchain.schema.messages.AIMessage",
"langchain.schema.ChatGeneration",
"langchain.load.dump.dumps",
"langchain.schema.RunInfo",
"langchain.llm_cache.update",
"langchain.pydantic_v1.Field",
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.ChatResult",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.pydantic_v1.root_validator",
"langchain.schema.LLMResult",
"langchain.load.dump.dumpd",
"langchain.prompts.chat.ChatPromptValue"
] | [((1364, 1401), 'langchain.pydantic_v1.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (1369, 1401), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1475, 1508), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1480, 1508), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1608, 1641), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1613, 1641), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1726, 1759), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1731, 1759), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1841, 1874), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1846, 1874), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1925, 1941), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (1939, 1941), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((9835, 9947), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose', 'tags', 'self.tags', 'metadata', 'self.metadata'], {}), '(callbacks, self.callbacks, self.verbose, tags,\n self.tags, metadata, self.metadata)\n', (9860, 9947), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((11036, 11093), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (11045, 11093), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((11869, 11986), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose', 'tags', 'self.tags', 'metadata', 'self.metadata'], {}), '(callbacks, self.callbacks, self.verbose,\n tags, self.tags, metadata, self.metadata)\n', (11899, 11986), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((13721, 13778), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (13730, 13778), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((22871, 22900), 'langchain.schema.messages.AIMessage', 'AIMessage', ([], {'content': 'output_str'}), '(content=output_str)\n', (22880, 22900), False, 'from langchain.schema.messages import AIMessage, BaseMessage, BaseMessageChunk, HumanMessage\n'), ((22922, 22953), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'message'}), '(message=message)\n', (22936, 22953), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((22969, 23005), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': '[generation]'}), '(generations=[generation])\n', (22979, 23005), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((23517, 23596), 'functools.partial', 'partial', (['self._generate', 'messages'], {'stop': 'stop', 'run_manager': 'run_manager'}), '(self._generate, messages, stop=stop, run_manager=run_manager, **kwargs)\n', (23524, 23596), False, 'from functools import partial\n'), ((2132, 2234), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (2145, 2234), False, 'import warnings\n'), ((9118, 9129), 'langchain.load.dump.dumps', 'dumps', (['self'], {}), '(self)\n', (9123, 9129), False, 'from langchain.load.dump import dumpd, dumps\n'), ((10112, 10123), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (10117, 10123), False, 'from langchain.load.dump import dumpd, dumps\n'), ((10767, 10834), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': '[res.generations]', 'llm_output': 'res.llm_output'}), '(generations=[res.generations], llm_output=res.llm_output)\n', (10776, 10834), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((13452, 13519), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': '[res.generations]', 'llm_output': 'res.llm_output'}), '(generations=[res.generations], llm_output=res.llm_output)\n', (13461, 13519), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((16027, 16042), 'langchain.load.dump.dumps', 'dumps', (['messages'], {}), '(messages)\n', (16032, 16042), False, 'from langchain.load.dump import dumpd, dumps\n'), ((16067, 16113), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (16093, 16113), False, 'import langchain\n'), ((17747, 17762), 'langchain.load.dump.dumps', 'dumps', (['messages'], {}), '(messages)\n', (17752, 17762), False, 'from langchain.load.dump import dumpd, dumps\n'), ((17787, 17833), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (17813, 17833), False, 'import langchain\n'), ((2713, 2742), 'langchain.prompts.base.StringPromptValue', 'StringPromptValue', ([], {'text': 'input'}), '(text=input)\n', (2730, 2742), False, 'from langchain.prompts.base import StringPromptValue\n'), ((4629, 4679), 'typing.cast', 'cast', (['ChatGeneration', 'llm_result.generations[0][0]'], {}), '(ChatGeneration, llm_result.generations[0][0])\n', (4633, 4679), False, 'from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Sequence, cast\n'), ((5755, 5766), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (5760, 5766), False, 'from langchain.load.dump import dumpd, dumps\n'), ((12158, 12169), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (12163, 12169), False, 'from langchain.load.dump import dumpd, dumps\n'), ((14101, 14135), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (14108, 14135), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((16181, 16214), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': 'cache_val'}), '(generations=cache_val)\n', (16191, 16214), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((16530, 16596), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result.generations'], {}), '(prompt, llm_string, result.generations)\n', (16556, 16596), False, 'import langchain\n'), ((17901, 17934), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': 'cache_val'}), '(generations=cache_val)\n', (17911, 17934), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((18264, 18330), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result.generations'], {}), '(prompt, llm_string, result.generations)\n', (18290, 18330), False, 'import langchain\n'), ((20898, 20924), 'langchain.schema.messages.HumanMessage', 'HumanMessage', ([], {'content': 'text'}), '(content=text)\n', (20910, 20924), False, 'from langchain.schema.messages import AIMessage, BaseMessage, BaseMessageChunk, HumanMessage\n'), ((2800, 2831), 'langchain.prompts.chat.ChatPromptValue', 'ChatPromptValue', ([], {'messages': 'input'}), '(messages=input)\n', (2815, 2831), False, 'from langchain.prompts.chat import ChatPromptValue\n'), ((4200, 4256), 'functools.partial', 'partial', (['self.invoke', 'input', 'config'], {'stop': 'stop'}), '(self.invoke, input, config, stop=stop, **kwargs)\n', (4207, 4256), False, 'from functools import partial\n'), ((7657, 7668), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (7662, 7668), False, 'from langchain.load.dump import dumpd, dumps\n'), ((11315, 11345), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'manager.run_id'}), '(run_id=manager.run_id)\n', (11322, 11345), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((15194, 15227), 'inspect.signature', 'inspect.signature', (['self._generate'], {}), '(self._generate)\n', (15211, 15227), False, 'import inspect\n'), ((16899, 16933), 'inspect.signature', 'inspect.signature', (['self._agenerate'], {}), '(self._agenerate)\n', (16916, 16933), False, 'import inspect\n'), ((21574, 21600), 'langchain.schema.messages.HumanMessage', 'HumanMessage', ([], {'content': 'text'}), '(content=text)\n', (21586, 21600), False, 'from langchain.schema.messages import AIMessage, BaseMessage, BaseMessageChunk, HumanMessage\n'), ((23640, 23664), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (23662, 23664), False, 'import asyncio\n'), ((4134, 4160), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (4158, 4160), False, 'import asyncio\n'), ((6521, 6552), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'message'}), '(message=message)\n', (6535, 6552), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((13049, 13116), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': '[res.generations]', 'llm_output': 'res.llm_output'}), '(generations=[res.generations], llm_output=res.llm_output)\n', (13058, 13116), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((8442, 8473), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'message'}), '(message=message)\n', (8456, 8473), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n')] |
import sys
import chromadb
import pandas
import sqlite3
from langchain.embeddings import OpenAIEmbeddings
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.document_loaders import TextLoader
from langchain.document_loaders import WikipediaLoader
from langchain.retrievers.multi_query import MultiQueryRetriever
import langchain
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.cache import InMemoryCache
from langchain import PromptTemplate
import os
import openai
from langchain.prompts import (
ChatPromptTemplate,
PromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import AIMessage, HumanMessage, SystemMessage
os.environ["OPENAI_API_KEY"] = "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"
openai.api_key = "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"
api_key = "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"
llm = OpenAI()
# chat = ChatOpenAI(openai_api_key=api_key, temperature=0)
embedding_function = OpenAIEmbeddings()
def us_constitution_helper(question):
loader = TextLoader("some_data/US_Constitution.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(chunk_size=500)
docs = text_splitter.split_documents(documents)
_embedding_function = OpenAIEmbeddings()
db = Chroma.from_documents(
docs, _embedding_function, persist_directory="./US_Constitution"
)
db.persist()
chat = ChatOpenAI(openai_api_key=api_key, temperature=0)
compressor = LLMChainExtractor.from_llm(chat)
compression_retriever = ContextualCompressionRetriever(
base_compressor=compressor, base_retriever=db.as_retriever()
)
compressed_docs = compression_retriever.get_relevant_documents(question)
return compressed_docs[0].page_content
print(us_constitution_helper("What is the 13th Amendment?"))
| [
"langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder",
"langchain.chat_models.ChatOpenAI",
"langchain.vectorstores.Chroma.from_documents",
"langchain.llms.OpenAI",
"langchain.retrievers.document_compressors.LLMChainExtractor.from_llm",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.document_loaders.TextLoader"
] | [((1247, 1255), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (1253, 1255), False, 'from langchain.llms import OpenAI\n'), ((1336, 1354), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1352, 1354), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1408, 1451), 'langchain.document_loaders.TextLoader', 'TextLoader', (['"""some_data/US_Constitution.txt"""'], {}), "('some_data/US_Constitution.txt')\n", (1418, 1451), False, 'from langchain.document_loaders import TextLoader\n'), ((1503, 1562), 'langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder', 'CharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': '(500)'}), '(chunk_size=500)\n', (1546, 1562), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1642, 1660), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1658, 1660), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1670, 1762), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['docs', '_embedding_function'], {'persist_directory': '"""./US_Constitution"""'}), "(docs, _embedding_function, persist_directory=\n './US_Constitution')\n", (1691, 1762), False, 'from langchain.vectorstores import Chroma\n'), ((1801, 1850), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'temperature': '(0)'}), '(openai_api_key=api_key, temperature=0)\n', (1811, 1850), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1868, 1900), 'langchain.retrievers.document_compressors.LLMChainExtractor.from_llm', 'LLMChainExtractor.from_llm', (['chat'], {}), '(chat)\n', (1894, 1900), False, 'from langchain.retrievers.document_compressors import LLMChainExtractor\n')] |
import os
import sys
module_path = ".."
sys.path.append(os.path.abspath(module_path))
import langchain
from langchain.document_loaders import ConfluenceLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
from langchain.embeddings import BedrockEmbeddings
from langchain.llms.bedrock import Bedrock
from langchain.vectorstores import FAISS
from langchain.indexes.vectorstore import VectorStoreIndexWrapper
from utils import bedrock
boto3_bedrock = bedrock.get_bedrock_client(
assumed_role=os.environ.get("BEDROCK_ASSUME_ROLE", None),
endpoint_url=os.environ.get("BEDROCK_ENDPOINT_URL", None),
region=os.environ.get("AWS_DEFAULT_REGION", None),
)
class BedrockConfluenceQA:
def __init__(self, config: dict = {}):
self.config = config
self.embedding = None
self.vectordb = None
self.llm = None
self.qa = None
self.retriever = None
self.model_id = None
def init_embeddings(self) -> None:
# AWS Bedrock Embeddings
self.embedding = BedrockEmbeddings(client=boto3_bedrock)
def init_models(self, parameters: dict = {}) -> None:
self.parameters = parameters
max_token_count = self.parameters.get("max_token_count", 512)
temprature = self.parameters.get("temprature", 1)
top_p = self.parameters.get("top_p", 1)
top_k = self.parameters.get("top_k", 1)
model_id = self.parameters.get("model_id", "amazon.titan-tg1-large")
self.model_id = model_id
# AWS Bedrock titan
if "claude" in model_id:
self.llm = Bedrock(
model_id=model_id,
client=boto3_bedrock,
model_kwargs={
"max_tokens_to_sample":max_token_count,
"temperature": temprature,
"top_k": top_k,
"top_p": top_p,
}
)
if "titan" in model_id:
self.llm = Bedrock(model_id=model_id, client=boto3_bedrock, model_kwargs= {
"maxTokenCount": max_token_count,
"temperature": temprature,
"topP": top_p,
})
if "ai21" in model_id:
self.llm = Bedrock(model_id=model_id, client=boto3_bedrock, model_kwargs= {
"maxTokens": max_token_count,
"temperature": temprature,
"topP": top_p,
})
def vector_db_confluence_docs(self, force_reload: bool = False) -> None:
"""
creates vector db for the embeddings and persists them or loads a vector db from the persist directory
"""
persist_directory = self.config.get("persist_directory", None)
confluence_url = self.config.get("confluence_url", None)
username = self.config.get("username", None)
api_key = self.config.get("api_key", None)
space_key = self.config.get("space_key", None)
if persist_directory and os.path.exists(persist_directory) and not force_reload:
## Load from the persist db
self.vectordb = FAISS.load_local("faiss_index", embeddings=self.embedding)
else:
loader = ConfluenceLoader(
url=confluence_url, username=username, api_key=api_key
)
documents = loader.load(space_key=space_key, limit=50)
## 2. Split the texts
text_splitter = RecursiveCharacterTextSplitter(
# Set a really small chunk size, just to show.
# Make sure the chunk size does not exceed titan text embeddings max tokens (512)
chunk_size=1000,
chunk_overlap=100,
# separators=["\n", "\n\n"]
)
docs = text_splitter.split_documents(documents)
print(len(docs))
## 3. Create Embeddings and add to chroma store
##TODO: Validate if self.embedding is not None
vectorstore_faiss = FAISS.from_documents(
docs,
self.embedding,
)
VectorStoreIndexWrapper(vectorstore=vectorstore_faiss)
self.vectordb = vectorstore_faiss
# vectorstore_faiss_aws.save_local("faiss_index")
def retreival_qa_chain(self):
"""
Creates retrieval qa chain using vectordb as retrivar and LLM to complete the prompt
"""
##TODO: Use custom prompt
self.retriever = self.vectordb.as_retriever(search_kwargs={"k": 10})
# self.qa = RetrievalQA.from_chain_type(llm=self.llm, chain_type="stuff",retriever=self.retriever)
# prompt_template = """Human: Use the following pieces of context to provide a concise answer to the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
# {context}
# Question: {question}
# Assistant:"""
# prompt_template = """Human: Please use the context below to craft a succinct response to the following question. If you don't have the information, it's okay to state that you're unaware instead of inventing an answer.
# {context}
# Question: {question}
# Assistant:"""
prompt_template = """Human: Utilize the context provided to formulate a comprehensive response to the following question. If you're uncertain about the answer, it's perfectly fine to acknowledge that you're unsure rather than providing speculative information.
{context}
Question: {question}
Assistant:"""
## used for the bulk answers generation
prompt_template = """# INSTRUCTION
Answer any question about onboarding or company-related topics at LogicWorks acting as a onboarding manager. If you don't have the information, it's okay to state that you're unaware instead of inventing an answer.
Utilize the context provided to formulate a comprehensive response to the following question. If you don't have the information, it's okay to state that you're unaware instead of inventing an answer.
# CONTEXT
{context}
# QUESTION
{question}
Assistant:
"""
prompt_template = """User: Answer the question based only on the information provided between ##. If you don't know the answer, just say that you don't know, don't try to make up an answer.
#
{context}
#
Question: {question}
Assistant:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
self.qa = RetrievalQA.from_chain_type(
llm=self.llm,
chain_type="stuff",
retriever=self.retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": PROMPT},
)
def answer_confluence(self, question: str) -> str:
"""
Answer the question
"""
answer = self.qa({"query": question})
return answer
| [
"langchain.llms.bedrock.Bedrock",
"langchain.document_loaders.ConfluenceLoader",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.prompts.PromptTemplate",
"langchain.embeddings.BedrockEmbeddings",
"langchain.vectorstores.FAISS.from_documents",
"langchain.indexes.vectorstore.VectorStoreIndexWrapper",
"langchain.vectorstores.FAISS.load_local",
"langchain.chains.RetrievalQA.from_chain_type"
] | [((58, 86), 'os.path.abspath', 'os.path.abspath', (['module_path'], {}), '(module_path)\n', (73, 86), False, 'import os\n'), ((606, 649), 'os.environ.get', 'os.environ.get', (['"""BEDROCK_ASSUME_ROLE"""', 'None'], {}), "('BEDROCK_ASSUME_ROLE', None)\n", (620, 649), False, 'import os\n'), ((668, 712), 'os.environ.get', 'os.environ.get', (['"""BEDROCK_ENDPOINT_URL"""', 'None'], {}), "('BEDROCK_ENDPOINT_URL', None)\n", (682, 712), False, 'import os\n'), ((725, 767), 'os.environ.get', 'os.environ.get', (['"""AWS_DEFAULT_REGION"""', 'None'], {}), "('AWS_DEFAULT_REGION', None)\n", (739, 767), False, 'import os\n'), ((1135, 1174), 'langchain.embeddings.BedrockEmbeddings', 'BedrockEmbeddings', ([], {'client': 'boto3_bedrock'}), '(client=boto3_bedrock)\n', (1152, 1174), False, 'from langchain.embeddings import BedrockEmbeddings\n'), ((6643, 6728), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']"}), "(template=prompt_template, input_variables=['context',\n 'question'])\n", (6657, 6728), False, 'from langchain.prompts import PromptTemplate\n'), ((6770, 6935), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'self.llm', 'chain_type': '"""stuff"""', 'retriever': 'self.retriever', 'return_source_documents': '(True)', 'chain_type_kwargs': "{'prompt': PROMPT}"}), "(llm=self.llm, chain_type='stuff', retriever=\n self.retriever, return_source_documents=True, chain_type_kwargs={\n 'prompt': PROMPT})\n", (6797, 6935), False, 'from langchain.chains import RetrievalQA\n'), ((1689, 1861), 'langchain.llms.bedrock.Bedrock', 'Bedrock', ([], {'model_id': 'model_id', 'client': 'boto3_bedrock', 'model_kwargs': "{'max_tokens_to_sample': max_token_count, 'temperature': temprature,\n 'top_k': top_k, 'top_p': top_p}"}), "(model_id=model_id, client=boto3_bedrock, model_kwargs={\n 'max_tokens_to_sample': max_token_count, 'temperature': temprature,\n 'top_k': top_k, 'top_p': top_p})\n", (1696, 1861), False, 'from langchain.llms.bedrock import Bedrock\n'), ((2068, 2217), 'langchain.llms.bedrock.Bedrock', 'Bedrock', ([], {'model_id': 'model_id', 'client': 'boto3_bedrock', 'model_kwargs': "{'maxTokenCount': max_token_count, 'temperature': temprature, 'topP': top_p}"}), "(model_id=model_id, client=boto3_bedrock, model_kwargs={\n 'maxTokenCount': max_token_count, 'temperature': temprature, 'topP': top_p}\n )\n", (2075, 2217), False, 'from langchain.llms.bedrock import Bedrock\n'), ((2326, 2465), 'langchain.llms.bedrock.Bedrock', 'Bedrock', ([], {'model_id': 'model_id', 'client': 'boto3_bedrock', 'model_kwargs': "{'maxTokens': max_token_count, 'temperature': temprature, 'topP': top_p}"}), "(model_id=model_id, client=boto3_bedrock, model_kwargs={'maxTokens':\n max_token_count, 'temperature': temprature, 'topP': top_p})\n", (2333, 2465), False, 'from langchain.llms.bedrock import Bedrock\n'), ((3068, 3101), 'os.path.exists', 'os.path.exists', (['persist_directory'], {}), '(persist_directory)\n', (3082, 3101), False, 'import os\n'), ((3192, 3250), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['"""faiss_index"""'], {'embeddings': 'self.embedding'}), "('faiss_index', embeddings=self.embedding)\n", (3208, 3250), False, 'from langchain.vectorstores import FAISS\n'), ((3286, 3358), 'langchain.document_loaders.ConfluenceLoader', 'ConfluenceLoader', ([], {'url': 'confluence_url', 'username': 'username', 'api_key': 'api_key'}), '(url=confluence_url, username=username, api_key=api_key)\n', (3302, 3358), False, 'from langchain.document_loaders import ConfluenceLoader\n'), ((3518, 3584), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(100)'}), '(chunk_size=1000, chunk_overlap=100)\n', (3548, 3584), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((4079, 4121), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['docs', 'self.embedding'], {}), '(docs, self.embedding)\n', (4099, 4121), False, 'from langchain.vectorstores import FAISS\n'), ((4181, 4235), 'langchain.indexes.vectorstore.VectorStoreIndexWrapper', 'VectorStoreIndexWrapper', ([], {'vectorstore': 'vectorstore_faiss'}), '(vectorstore=vectorstore_faiss)\n', (4204, 4235), False, 'from langchain.indexes.vectorstore import VectorStoreIndexWrapper\n')] |
import pickle
import torch
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate,)
import numpy as np
import random
np.int = int #fixing shap/numpy compatibility issue
from sklearn.metrics import classification_report
import shap
from matplotlib import pyplot as plt
from langchain.chains import LLMChain
from lime_stability.stability import LimeTabularExplainerOvr
import argparse
import pandas as pd
from sklearn.model_selection import train_test_split
import pathlib
import langchain
from langchain.output_parsers.enum import EnumOutputParser
from enum import Enum
from progressbar import ProgressBar, Percentage, Bar, Timer, ETA, Counter
#cf. https://stackoverflow.com/a/53304527/5899161
from sklearn.preprocessing import LabelEncoder
from collections import defaultdict
import fastshap
from torch import nn
import dice_ml
from anchor import anchor_tabular
from langchain.llms import VLLM
from SALib.sample import morris as morris_sample
from SALib.test_functions import Ishigami
from SALib.analyze import morris as morris_analyze
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import HumanMessage
import tqdm
#langchain.verbose=True
def vicuna15(temperature=.7):
model = "vicuna"
llm = ChatOpenAI(model_name=model, openai_api_key="EMPTY", openai_api_base="http://localhost:8000/v1", max_tokens=150, verbose=True, temperature=temperature)
return llm
def llama2(temperature=.4):
model = "llama2"
llm = ChatOpenAI(model_name=model, openai_api_key="EMPTY", openai_api_base="http://localhost:8000/v1", max_tokens=150,
temperature=temperature)
return llm
def llama2_hf_70b(temperature = .4):
#cf. https://www.pinecone.io/learn/llama-2/
import torch
import transformers
from langchain.llms import HuggingFacePipeline
bnb_config = transformers.BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type='nf4',
bnb_4bit_use_double_quant=True,
bnb_4bit_compute_dtype=torch.bfloat16
)
model_id = "/lu/tetyda/home/lgorski/llama/llama-2-70b-chat-hf/models--meta-llama--Llama-2-70b-chat-hf/snapshots/36d9a7388cc80e5f4b3e9701ca2f250d21a96c30/"
model_config = transformers.AutoConfig.from_pretrained(model_id)
model = transformers.AutoModelForCausalLM.from_pretrained(
model_id,
config=model_config,
quantization_config=bnb_config,
device_map = "auto")
tokenizer = transformers.AutoTokenizer.from_pretrained(model_id)
generate_text = transformers.pipeline(
model=model, tokenizer=tokenizer, task="text-generation",
temperature=temperature,
max_new_tokens=150,
repetition_penalty=1.1
)
llm = HuggingFacePipeline(pipeline=generate_text)
return llm
def gpt4_azure(temperature=.3):
import json
import os
with open("gpt4.json", encoding="utf-8") as credentials_file:
credentials = json.load(credentials_file)
llm = AzureChatOpenAI(
openai_api_base=credentials["OPENAI_API_BASE"],
openai_api_version=credentials["OPENAI_API_VERSION"],
deployment_name="test-gpt4-32k",
openai_api_key=credentials["OPENAI_API_KEY"],
openai_api_type=credentials["OPENAI_API_TYPE"],
max_tokens=150,
temperature=temperature,
)
return llm
def grouper(iterable, n):
for i in range(0, len(iterable), n):
yield iterable[i:i+n]
#returns a list of violated rules
def predict_rules_only(X, features_closure, encoder : defaultdict):
def analyze_rules(X):
features = features_closure.tolist()
violated = []
#r1 (gender = f and age >= 60) or (gender = male and age >= 65)
gender_idx = features.index("gender")
age_idx = features.index("age")
gender = encoder[gender_idx].inverse_transform([X[gender_idx]])[0]
age = X[age_idx]
if not((gender == "f" and X[age_idx] >= 60) or (gender == "m" and age >= 65)):
violated.append(1)
#r2 r2: at least four of the following features are "yes": paid_contribution_1, paid_contribution_2, paid_contribution_3, paid_contribution_4, paid_contribution_5
paid_contribution_1_idx = features.index("paid_contribution_1")
paid_contribution_2_idx = features.index("paid_contribution_2")
paid_contribution_3_idx = features.index("paid_contribution_3")
paid_contribution_4_idx = features.index("paid_contribution_4")
paid_contribution_5_idx = features.index("paid_contribution_5")
paid_contribution_1 = encoder[paid_contribution_1_idx].inverse_transform([X[paid_contribution_1_idx]])[0]
paid_contribution_2 = encoder[paid_contribution_2_idx].inverse_transform([X[paid_contribution_2_idx]])[0]
paid_contribution_3 = encoder[paid_contribution_3_idx].inverse_transform([X[paid_contribution_3_idx]])[0]
paid_contribution_4 = encoder[paid_contribution_4_idx].inverse_transform([X[paid_contribution_4_idx]])[0]
paid_contribution_5 = encoder[paid_contribution_5_idx].inverse_transform([X[paid_contribution_5_idx]])[0]
paid_contributions = sum([1 if elem == "yes" else 0 for elem in [paid_contribution_1, paid_contribution_2, paid_contribution_3, paid_contribution_4, paid_contribution_5]])
if not (paid_contributions >= 4):
violated.append(2)
#r3 r3: is_spouse=yes
is_spouse_idx = features.index("is_spouse")
is_spouse = encoder[is_spouse_idx].inverse_transform([X[is_spouse_idx]])[0] == "True"
if not (is_spouse == True):
violated.append(3)
#r4 is_absent=no
is_absent_idx = features.index("is_absent")
is_absent = encoder[is_absent_idx].inverse_transform([X[is_absent_idx]])[0] == "True"
if not (is_absent == False):
violated.append(4)
#r5 it is not true that capital_resources >= 3000
capital_resources_idx = features.index("capital_resources")
capital_resources = X[capital_resources_idx]
if capital_resources >= 3000:
violated.append(5)
# r6: (patient_type= in and distance_to_hospital < 50) or (patient_type=out and distance_to_hospital >= 50)
patient_type_idx = features.index("patient_type")
distance_to_hospital_idx = features.index("distance_to_hospital")
patient_type = encoder[patient_type_idx].inverse_transform([X[patient_type_idx]])[0]
distance_to_hospital = X[distance_to_hospital_idx]
if not ((patient_type == "in" and distance_to_hospital < 50) or (patient_type == "out" and distance_to_hospital >= 50)):
violated.append(6)
return violated
def inner(X, violated_rules=None):
if violated_rules==None:
violated_rules=[]
result = []
for row in X:
violated = analyze_rules(row)
violated_rules.append(violated)
if len(violated) == 0:
result.append(1)
else:
result.append(0)
return np.array(result)
return inner
def predict_rules_simplified_only(X, features_closure, encoder : defaultdict):
def analyze_rules(X):
features = features_closure.tolist()
violated = []
#r1 (gender = f and age >= 60) or (gender = male and age >= 65)
gender_idx = features.index("gender")
age_idx = features.index("age")
gender = encoder[gender_idx].inverse_transform([X[gender_idx]])[0]
age = X[age_idx]
if not((gender == "f" and X[age_idx] >= 60) or (gender == "m" and age >= 65)):
violated.append(1)
# r2: (patient_type= in and distance_to_hospital < 50) or (patient_type=out and distance_to_hospital >= 50)
patient_type_idx = features.index("patient_type")
distance_to_hospital_idx = features.index("distance_to_hospital")
patient_type = encoder[patient_type_idx].inverse_transform([X[patient_type_idx]])[0]
distance_to_hospital = X[distance_to_hospital_idx]
if not ((patient_type == "in" and distance_to_hospital < 50) or (patient_type == "out" and distance_to_hospital >= 50)):
violated.append(2)
return violated
def inner(X, violated_rules=None):
if violated_rules==None:
violated_rules=[]
result = []
for row in X:
violated = analyze_rules(row)
violated_rules.append(violated)
if len(violated) == 0:
result.append(1)
else:
result.append(0)
return np.array(result)
return inner
import time
def predict_rules(chain, features, encoder : defaultdict, configuration=None, save_reply=False, memory={}):
def predict_rules_inner(X : np.ndarray, output = None):
results = []
widgets = [' [', Percentage(), '] ', Bar(), ' (', Timer(), ') ', ETA(), ' ', Counter(), ' of ', str(len(X))]
pbar = ProgressBar(widgets=widgets, maxval=len(X)).start()
counter = 0
X=np.array(X, dtype=object)
for index, encoding in encoder.items():
inversed = encoding.inverse_transform(X[:, index].astype(int))
X[:, index] = inversed
for row in X:
if row.tobytes() in memory:
classification = memory[row.tobytes()]
else:
text=",".join([str(elem) for elem in row])
classification = chain(text)
if output is not None:
output += [classification]
if save_reply:
with open(f"log_{configuration.model_factory}.txt", "a") as log:
log.write(classification + "\n")
classification=classification["text"]
cleaned = classification.strip().replace(".", "").lower()
if "granted" in cleaned:
results.append(1)
elif "denied" in cleaned:
results.append(0)
else: #answer not fitting the template
results.append(2)
counter += 1
pbar.update(counter)
if configuration.throttle:
time.sleep(configuration.throttle)
pbar.finish()
return np.array(results)
return predict_rules_inner
# def lime_explainer(train, test, predict, feature_names, encoder):
# categorical_features = list(sorted(encoder.keys()))
# categorical_names = { key : list(encoder[key].classes_) for key in categorical_features}
# explainer = LimeTabularExplainerOvr(train, feature_names=feature_names, categorical_features=categorical_features,
# categorical_names=categorical_names,
# class_names=["not granted", "granted", "unknown"])
# print(explainer.explain_instance(np.array(test[0]), predict).as_list())
import random
def shap_explainer(train, test, y_train, y_test, predict, features, encoder, configuration):
explainer = shap.KernelExplainer(model=predict, data=shap.sample(train, 100))
#test=pd.read_csv(r"data/welfare_dataset/DiscoveringTheRationaleOfDecisions/datasets/confused_gpt4.csv").drop(columns=["eligible"])
shap_values = explainer.shap_values(test)
if configuration.saveout:
np.save(f'shap_values_{configuration.model_factory}.npy', shap_values)
print(shap_values)
#shap.summary_plot(shap_values, show=False, feature_names=features, class_names=["not granted", "granted", "unknown"])
#plt.savefig('vis.png')
def morris_explainer(train, test, y_train, y_test, predict, features, encoder, configuration):
from interpret.blackbox import MorrisSensitivity
msa = MorrisSensitivity(predict, test, feature_names=features, num_resamples=10, num_levels=2)
print(msa)
def anchor_explainer(train, test, y_train, y_test, predict, features, encoder, configuration):
explainer = anchor_tabular.AnchorTabularExplainer(
["not granted", "granted", "unknown"],
features,
train,
{ key: value.classes_ for key, value in encoder.items() })
explanations = []
for test_instance_idx in range(test.shape[0]):
print (f"calculating {test_instance_idx} of {len(test)}")
explanation = explainer.explain_instance(test[test_instance_idx], predict, threshold=0.95)
print("Anchor: %s" % (' AND '.join(explanation.names())))
print('Precision: %.2f' % explanation.precision())
print('Coverage: %.2f' % explanation.coverage())
explanations.append(explanation)
if configuration.saveout:
with open("anchor_explanations.pkl", "wb") as output:
pickle.dump(explanations, output)
def counterfactual_explainer(train, test, y_train, y_test, predict, features, encoder, configuration):
pd_train_x = pd.DataFrame(train, columns=features)
pd_train_y = pd.DataFrame(y_train, columns=["eligible"])
pd_test_x = pd.DataFrame(test, columns=features)
pd_test_y = pd.DataFrame(y_test, columns=["eligible"])
dice_data = pd.concat([pd_train_x, pd_train_y], axis=1, join='inner')
dice_data = dice_data.infer_objects()
continuous_features = [ features[idx] for idx in encoder.keys()]
dice_dataset = dice_ml.Data(dataframe=dice_data, outcome_name='eligible', continuous_features=continuous_features)
dice_model = dice_ml.Model(model=predict, backend="sklearn")
exp = dice_ml.Dice(dice_dataset, dice_model, method="random")
explanation = exp.generate_counterfactuals(pd.DataFrame(pd_test_x, columns=features), total_CFs=3, desired_class="opposite")
print(explanation)
#print(exp.visualize_as_dataframe(show_only_changes=True, display_sparse_df=False))
def define_command_line_options():
parser = argparse.ArgumentParser()
parser.add_argument('--model-factory', choices=["llama2_hf_70b",
"llama2_transformers", "vicuna", "vicuna15", "vicuna_vllm", "llama2_vllm", "gpt4_azure", "llama2"
], default="vicuna")
parser.add_argument('--dataset', type=str, default="A_2400.csv")
parser.add_argument('--predict-function', choices = ["predict_rules", "predict_rules_only"], default="predict_rules")
parser.add_argument('--system-template', type=str, default="system_template_6_conditions.txt")
parser.add_argument('--xai', default=[], choices=["shap_explainer", "fastshap_explainer",
"fastshap2_explainer", "morris_explainer", "counterfactual_explainer"], action='append')
parser.add_argument('--saveout', default=False, action='store_true')
parser.add_argument('--classification-report', default=False, action='store_true')
parser.add_argument('--fastshap-model-load', default=None, type=str)
parser.add_argument('--fastshap-model-save', default=None, type=str)
parser.add_argument('--drop-noise', default=False, action='store_true')
parser.add_argument('--check-rules', default=False, action='store_true')
parser.add_argument('--test-size', default=.2, type=float)
parser.add_argument('--optimize-temperature', default=False, action='store_true')
parser.add_argument('--throttle', default=0, type=int)
parser.add_argument('--stability', default=False, action='store_true')
parser.add_argument('--surrogate-model', default=None, type=str)
parser.add_argument('--ablation-study', default=False, action='store_true')
parser.add_argument('--confusion-study', default=False, action='store_true')
return parser
def read_command_line_options(parser : argparse.ArgumentParser) -> argparse.Namespace:
return parser.parse_args()
#this function does too much, split it later
def prepare_train_test_split(dataset, test_size=.2, drop_noise=False, random_state=42):
df = pd.read_csv(dataset)
encoding = defaultdict(LabelEncoder)
mask = (df.dtypes == object) | (df.dtypes == bool)
X = df.drop(columns=["eligible"], axis=1)
if drop_noise:
to_drop = [ col for col in X.columns if "noise" in col]
X = X.drop(columns=to_drop, axis=1)
X.loc[:, mask] = X.loc[:, mask].astype(str).apply(lambda s: encoding[X.columns.get_loc(s.name)].fit_transform(s))
X_columns = X.columns
#conversion to numpy array, because shap works with numpy arrays
X = X.to_numpy()
y = df["eligible"].to_numpy()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state, stratify=y)
return X, y, X_train, y_train, X_test, y_test, X_columns, encoding
parser = define_command_line_options()
configuration = read_command_line_options(parser)
system_template = pathlib.Path(configuration.system_template).read_text()
system_prompt = SystemMessagePromptTemplate.from_template(system_template)
X, y, X_train, y_train, X_test, y_test, columns, encoding = prepare_train_test_split(configuration.dataset,
test_size=configuration.test_size,
drop_noise=configuration.drop_noise)
X_columns = ",".join([name for name in columns])
system_prompt = system_prompt.format(names=X_columns)
human_template = """{features}"""
human_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_prompt, human_prompt])
llm = globals()[configuration.model_factory]()
chain = LLMChain(llm=llm, prompt=chat_prompt)
predict = globals()[configuration.predict_function](chain, columns, encoding, configuration=configuration)
def create_llm_classifier(predict):
def _create_llm_classifier(cls):
from sklearn.base import BaseEstimator, ClassifierMixin
class LLMEstimatorSK(BaseEstimator, ClassifierMixin, cls):
def __init__(self, temperature=.7):
self.temperature = temperature
def fit(self, X, y):
return self
def predict(self, X):
return predict(X)
return LLMEstimatorSK
return _create_llm_classifier
@create_llm_classifier(predict=predict)
class LLMEstimatorSK:
def __init__(self, temperature=.7):
self.temperature = temperature
def fit(self, X, y):
return self
def predict(self, X):
return predict(X)
def indices_tmp():
indices = range(len(X_train))
_, _, _, _, indices_train, indices_test = train_test_split(X_train, y_train, indices, test_size=configuration.test_size,
random_state=42, stratify=y_train)
yield indices_train, indices_test
if configuration.optimize_temperature:
from sklearn.model_selection import GridSearchCV
grid = GridSearchCV(LLMEstimatorSK(), cv=indices_tmp(), param_grid={"temperature": [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1]})
grid.fit(X_train, y_train)
print(grid.best_params_)
scores = pd.DataFrame(grid.cv_results_)
scores.to_excel(f"grid_search_{configuration.model_factory}_04_06.xlsx")
#rules consistency check
if configuration.check_rules:
predict_rules=predict_rules_only(X_train, columns, encoding)
for idx in range(len(X_train)):
violated = analyze_rules(X_train[idx], columns, encoding)
if (len(violated) == 0 and y_train[idx] == False) or (len(violated) > 0 and y_train[idx] == True):
print(f"rule violation for: {X_train[idx]}")
if configuration.classification_report:
y_pred = predict(X_test)
print(classification_report(y_test, y_pred))
if configuration.stability:
output = []
for _ in range(5):
predict(X_test, output=output)
import json
with open(f"stability_{configuration.model_factory}.json", "w") as output_file:
json.dump(output, output_file)
if configuration.surrogate_model:
from sklearn.tree import DecisionTreeClassifier
surrogate_model = DecisionTreeClassifier(random_state=42)
#get model answers
answers = pd.read_json(configuration.surrogate_model)
target = pd.DataFrame([1 if "granted" in text.lower() else 0 for text in answers["text"].tolist()])
#get indices of the test set
indices = range(len(X))
_, _, _, _, indices_train, indices_test = train_test_split(X, y, indices, test_size=configuration.test_size,
random_state=42, stratify=y)
print(len(target))
surrogate_model.fit(X_train, target.iloc[indices_train])
print(surrogate_model.feature_importances_)
for xai in configuration.xai:
globals()[xai](X_train, X_test, y_train, y_test, surrogate_model.predict, columns, encoding, configuration)
for xai in configuration.xai:
globals()[xai](X_train, X_test, y_train, y_test, predict, columns, encoding, configuration)
if configuration.ablation_study:
import json
output=[]
y_pred=predict(X_test, output=output)
with open(f"ablation_{configuration.system_template}", "w") as classification_file, \
open(f"ablation_{configuration.system_template}.json", "w") as json_file:
classification_file.write(classification_report(y_test, y_pred))
json.dump(output, json_file)
if configuration.confusion_study:
# ---tmp----
class Status:
def __init__(self):
self.first_rule_violated = False,
self.second_rule_violated = False,
self.y_true = None,
self.y_pred = None
self.features = None
self.answer = None
def __str__(self):
return f"first_rule_violated: {self.first_rule_violated}, second_rule_violated: {self.second_rule_violated}, y_true: {self.y_true}, y_pred: {self.y_pred}, features: {self.features}, gpt4: {self.answer}"
def __repr__(self):
return self.__str__()
def __hash__(self):
return hash((self.first_rule_violated, self.second_rule_violated, self.y_true,
self.y_pred))
def __eq__(self, other):
return (self.first_rule_violated, self.second_rule_violated, self.y_true,
self.y_pred) == (other.first_rule_violated, other.second_rule_violated, other.y_true,
other.y_pred)
#
#additional modules
statuses = set()
y_=[1 if yy else 0 for yy in y_test]
analyze_rules=predict_rules_simplified_only(X_test, columns, encoding)
violations=[]
analyze_rules(X_test, violations)
x_test_set = []
y_test_set = []
for rule, violation, y_true in zip(X_test, violations, y_):
if len(statuses) == 8:
break
status = Status()
status.y_true = y_true
output=[]
status.y_pred = predict([rule], output=output)[0]
status.features = rule
status.first_rule_violated = 1 in violation
status.second_rule_violated = 2 in violation
status.answer = output[0]
if status not in statuses:
statuses.add(status)
x_test_set += [rule]
y_test_set += [y_]
print(status)
print("----- Found rule ----")
with open("confusion_study.txt", "a") as file:
file.write(str(status))
file.write("\n")
print("-----------------")
print(statuses)
print("---performing xai----")
shap_explainer(X_train, np.array(x_test_set), y_train, y_test_set, predict, columns, encoding, configuration)
| [
"langchain.chains.LLMChain",
"langchain.prompts.chat.HumanMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.chat_models.AzureChatOpenAI",
"langchain.llms.HuggingFacePipeline",
"langchain.prompts.chat.SystemMessagePromptTemplate.from_template",
"langchain.prompts.chat.ChatPromptTemplate.from_messages"
] | [((17189, 17247), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['system_template'], {}), '(system_template)\n', (17230, 17247), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((17759, 17815), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['human_template'], {}), '(human_template)\n', (17799, 17815), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((17833, 17896), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_prompt, human_prompt]'], {}), '([system_prompt, human_prompt])\n', (17865, 17896), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((17960, 17997), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'chat_prompt'}), '(llm=llm, prompt=chat_prompt)\n', (17968, 17997), False, 'from langchain.chains import LLMChain\n'), ((1366, 1527), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model', 'openai_api_key': '"""EMPTY"""', 'openai_api_base': '"""http://localhost:8000/v1"""', 'max_tokens': '(150)', 'verbose': '(True)', 'temperature': 'temperature'}), "(model_name=model, openai_api_key='EMPTY', openai_api_base=\n 'http://localhost:8000/v1', max_tokens=150, verbose=True, temperature=\n temperature)\n", (1376, 1527), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1598, 1740), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model', 'openai_api_key': '"""EMPTY"""', 'openai_api_base': '"""http://localhost:8000/v1"""', 'max_tokens': '(150)', 'temperature': 'temperature'}), "(model_name=model, openai_api_key='EMPTY', openai_api_base=\n 'http://localhost:8000/v1', max_tokens=150, temperature=temperature)\n", (1608, 1740), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1978, 2136), 'transformers.BitsAndBytesConfig', 'transformers.BitsAndBytesConfig', ([], {'load_in_4bit': '(True)', 'bnb_4bit_quant_type': '"""nf4"""', 'bnb_4bit_use_double_quant': '(True)', 'bnb_4bit_compute_dtype': 'torch.bfloat16'}), "(load_in_4bit=True, bnb_4bit_quant_type=\n 'nf4', bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.\n bfloat16)\n", (2009, 2136), False, 'import transformers\n'), ((2352, 2401), 'transformers.AutoConfig.from_pretrained', 'transformers.AutoConfig.from_pretrained', (['model_id'], {}), '(model_id)\n', (2391, 2401), False, 'import transformers\n'), ((2417, 2553), 'transformers.AutoModelForCausalLM.from_pretrained', 'transformers.AutoModelForCausalLM.from_pretrained', (['model_id'], {'config': 'model_config', 'quantization_config': 'bnb_config', 'device_map': '"""auto"""'}), "(model_id, config=\n model_config, quantization_config=bnb_config, device_map='auto')\n", (2466, 2553), False, 'import transformers\n'), ((2605, 2657), 'transformers.AutoTokenizer.from_pretrained', 'transformers.AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id)\n', (2647, 2657), False, 'import transformers\n'), ((2681, 2838), 'transformers.pipeline', 'transformers.pipeline', ([], {'model': 'model', 'tokenizer': 'tokenizer', 'task': '"""text-generation"""', 'temperature': 'temperature', 'max_new_tokens': '(150)', 'repetition_penalty': '(1.1)'}), "(model=model, tokenizer=tokenizer, task=\n 'text-generation', temperature=temperature, max_new_tokens=150,\n repetition_penalty=1.1)\n", (2702, 2838), False, 'import transformers\n'), ((2886, 2929), 'langchain.llms.HuggingFacePipeline', 'HuggingFacePipeline', ([], {'pipeline': 'generate_text'}), '(pipeline=generate_text)\n', (2905, 2929), False, 'from langchain.llms import HuggingFacePipeline\n'), ((3144, 3446), 'langchain.chat_models.AzureChatOpenAI', 'AzureChatOpenAI', ([], {'openai_api_base': "credentials['OPENAI_API_BASE']", 'openai_api_version': "credentials['OPENAI_API_VERSION']", 'deployment_name': '"""test-gpt4-32k"""', 'openai_api_key': "credentials['OPENAI_API_KEY']", 'openai_api_type': "credentials['OPENAI_API_TYPE']", 'max_tokens': '(150)', 'temperature': 'temperature'}), "(openai_api_base=credentials['OPENAI_API_BASE'],\n openai_api_version=credentials['OPENAI_API_VERSION'], deployment_name=\n 'test-gpt4-32k', openai_api_key=credentials['OPENAI_API_KEY'],\n openai_api_type=credentials['OPENAI_API_TYPE'], max_tokens=150,\n temperature=temperature)\n", (3159, 3446), False, 'from langchain.chat_models import AzureChatOpenAI\n'), ((12092, 12184), 'interpret.blackbox.MorrisSensitivity', 'MorrisSensitivity', (['predict', 'test'], {'feature_names': 'features', 'num_resamples': '(10)', 'num_levels': '(2)'}), '(predict, test, feature_names=features, num_resamples=10,\n num_levels=2)\n', (12109, 12184), False, 'from interpret.blackbox import MorrisSensitivity\n'), ((13240, 13277), 'pandas.DataFrame', 'pd.DataFrame', (['train'], {'columns': 'features'}), '(train, columns=features)\n', (13252, 13277), True, 'import pandas as pd\n'), ((13296, 13339), 'pandas.DataFrame', 'pd.DataFrame', (['y_train'], {'columns': "['eligible']"}), "(y_train, columns=['eligible'])\n", (13308, 13339), True, 'import pandas as pd\n'), ((13357, 13393), 'pandas.DataFrame', 'pd.DataFrame', (['test'], {'columns': 'features'}), '(test, columns=features)\n', (13369, 13393), True, 'import pandas as pd\n'), ((13411, 13453), 'pandas.DataFrame', 'pd.DataFrame', (['y_test'], {'columns': "['eligible']"}), "(y_test, columns=['eligible'])\n", (13423, 13453), True, 'import pandas as pd\n'), ((13471, 13528), 'pandas.concat', 'pd.concat', (['[pd_train_x, pd_train_y]'], {'axis': '(1)', 'join': '"""inner"""'}), "([pd_train_x, pd_train_y], axis=1, join='inner')\n", (13480, 13528), True, 'import pandas as pd\n'), ((13664, 13767), 'dice_ml.Data', 'dice_ml.Data', ([], {'dataframe': 'dice_data', 'outcome_name': '"""eligible"""', 'continuous_features': 'continuous_features'}), "(dataframe=dice_data, outcome_name='eligible',\n continuous_features=continuous_features)\n", (13676, 13767), False, 'import dice_ml\n'), ((13782, 13829), 'dice_ml.Model', 'dice_ml.Model', ([], {'model': 'predict', 'backend': '"""sklearn"""'}), "(model=predict, backend='sklearn')\n", (13795, 13829), False, 'import dice_ml\n'), ((13841, 13896), 'dice_ml.Dice', 'dice_ml.Dice', (['dice_dataset', 'dice_model'], {'method': '"""random"""'}), "(dice_dataset, dice_model, method='random')\n", (13853, 13896), False, 'import dice_ml\n'), ((14192, 14217), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14215, 14217), False, 'import argparse\n'), ((16230, 16250), 'pandas.read_csv', 'pd.read_csv', (['dataset'], {}), '(dataset)\n', (16241, 16250), True, 'import pandas as pd\n'), ((16267, 16292), 'collections.defaultdict', 'defaultdict', (['LabelEncoder'], {}), '(LabelEncoder)\n', (16278, 16292), False, 'from collections import defaultdict\n'), ((16839, 16925), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': 'random_state', 'stratify': 'y'}), '(X, y, test_size=test_size, random_state=random_state,\n stratify=y)\n', (16855, 16925), False, 'from sklearn.model_selection import train_test_split\n'), ((18980, 19098), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train', 'indices'], {'test_size': 'configuration.test_size', 'random_state': '(42)', 'stratify': 'y_train'}), '(X_train, y_train, indices, test_size=configuration.\n test_size, random_state=42, stratify=y_train)\n', (18996, 19098), False, 'from sklearn.model_selection import train_test_split\n'), ((19523, 19553), 'pandas.DataFrame', 'pd.DataFrame', (['grid.cv_results_'], {}), '(grid.cv_results_)\n', (19535, 19553), True, 'import pandas as pd\n'), ((20523, 20562), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (20545, 20562), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((20602, 20645), 'pandas.read_json', 'pd.read_json', (['configuration.surrogate_model'], {}), '(configuration.surrogate_model)\n', (20614, 20645), True, 'import pandas as pd\n'), ((20861, 20960), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y', 'indices'], {'test_size': 'configuration.test_size', 'random_state': '(42)', 'stratify': 'y'}), '(X, y, indices, test_size=configuration.test_size,\n random_state=42, stratify=y)\n', (20877, 20960), False, 'from sklearn.model_selection import train_test_split\n'), ((3103, 3130), 'json.load', 'json.load', (['credentials_file'], {}), '(credentials_file)\n', (3112, 3130), False, 'import json\n'), ((7289, 7305), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (7297, 7305), True, 'import numpy as np\n'), ((8861, 8877), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (8869, 8877), True, 'import numpy as np\n'), ((9327, 9352), 'numpy.array', 'np.array', (['X'], {'dtype': 'object'}), '(X, dtype=object)\n', (9335, 9352), True, 'import numpy as np\n'), ((10606, 10623), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (10614, 10623), True, 'import numpy as np\n'), ((11681, 11751), 'numpy.save', 'np.save', (['f"""shap_values_{configuration.model_factory}.npy"""', 'shap_values'], {}), "(f'shap_values_{configuration.model_factory}.npy', shap_values)\n", (11688, 11751), True, 'import numpy as np\n'), ((13945, 13986), 'pandas.DataFrame', 'pd.DataFrame', (['pd_test_x'], {'columns': 'features'}), '(pd_test_x, columns=features)\n', (13957, 13986), True, 'import pandas as pd\n'), ((17116, 17159), 'pathlib.Path', 'pathlib.Path', (['configuration.system_template'], {}), '(configuration.system_template)\n', (17128, 17159), False, 'import pathlib\n'), ((20115, 20152), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (20136, 20152), False, 'from sklearn.metrics import classification_report\n'), ((20377, 20407), 'json.dump', 'json.dump', (['output', 'output_file'], {}), '(output, output_file)\n', (20386, 20407), False, 'import json\n'), ((21830, 21858), 'json.dump', 'json.dump', (['output', 'json_file'], {}), '(output, json_file)\n', (21839, 21858), False, 'import json\n'), ((24095, 24115), 'numpy.array', 'np.array', (['x_test_set'], {}), '(x_test_set)\n', (24103, 24115), True, 'import numpy as np\n'), ((9135, 9147), 'progressbar.Percentage', 'Percentage', ([], {}), '()\n', (9145, 9147), False, 'from progressbar import ProgressBar, Percentage, Bar, Timer, ETA, Counter\n'), ((9155, 9160), 'progressbar.Bar', 'Bar', ([], {}), '()\n', (9158, 9160), False, 'from progressbar import ProgressBar, Percentage, Bar, Timer, ETA, Counter\n'), ((9168, 9175), 'progressbar.Timer', 'Timer', ([], {}), '()\n', (9173, 9175), False, 'from progressbar import ProgressBar, Percentage, Bar, Timer, ETA, Counter\n'), ((9183, 9188), 'progressbar.ETA', 'ETA', ([], {}), '()\n', (9186, 9188), False, 'from progressbar import ProgressBar, Percentage, Bar, Timer, ETA, Counter\n'), ((9195, 9204), 'progressbar.Counter', 'Counter', ([], {}), '()\n', (9202, 9204), False, 'from progressbar import ProgressBar, Percentage, Bar, Timer, ETA, Counter\n'), ((11430, 11453), 'shap.sample', 'shap.sample', (['train', '(100)'], {}), '(train, 100)\n', (11441, 11453), False, 'import shap\n'), ((13082, 13115), 'pickle.dump', 'pickle.dump', (['explanations', 'output'], {}), '(explanations, output)\n', (13093, 13115), False, 'import pickle\n'), ((21782, 21819), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (21803, 21819), False, 'from sklearn.metrics import classification_report\n'), ((10532, 10566), 'time.sleep', 'time.sleep', (['configuration.throttle'], {}), '(configuration.throttle)\n', (10542, 10566), False, 'import time\n')] |
import os
from transformers import AutoTokenizer
from configs import (
EMBEDDING_MODEL,
KB_ROOT_PATH,
CHUNK_SIZE,
OVERLAP_SIZE,
ZH_TITLE_ENHANCE,
logger,
log_verbose,
text_splitter_dict,
LLM_MODEL,
TEXT_SPLITTER_NAME,
)
import importlib
from text_splitter import zh_title_enhance as func_zh_title_enhance
import langchain.document_loaders
from langchain.docstore.document import Document
from langchain.text_splitter import TextSplitter
from pathlib import Path
import json
from concurrent.futures import ThreadPoolExecutor
from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config
import io
from typing import List, Union, Callable, Dict, Optional, Tuple, Generator
import chardet
def validate_kb_name(knowledge_base_id: str) -> bool:
# 检查是否包含预期外的字符或路径攻击关键字
if "../" in knowledge_base_id:
return False
return True
def get_kb_path(knowledge_base_name: str):
return os.path.join(KB_ROOT_PATH, knowledge_base_name)
def get_doc_path(knowledge_base_name: str):
return os.path.join(get_kb_path(knowledge_base_name), "content")
def get_vs_path(knowledge_base_name: str, vector_name: str):
return os.path.join(get_kb_path(knowledge_base_name), vector_name)
def get_file_path(knowledge_base_name: str, doc_name: str):
return os.path.join(get_doc_path(knowledge_base_name), doc_name)
def list_kbs_from_folder():
return [f for f in os.listdir(KB_ROOT_PATH)
if os.path.isdir(os.path.join(KB_ROOT_PATH, f))]
def list_files_from_folder(kb_name: str):
doc_path = get_doc_path(kb_name)
return [file for file in os.listdir(doc_path)
if os.path.isfile(os.path.join(doc_path, file))]
def load_embeddings(model: str = EMBEDDING_MODEL, device: str = embedding_device()):
'''
从缓存中加载embeddings,可以避免多线程时竞争加载。
'''
from server.knowledge_base.kb_cache.base import embeddings_pool
return embeddings_pool.load_embeddings(model=model, device=device)
LOADER_DICT = {"UnstructuredHTMLLoader": ['.html'],
"UnstructuredMarkdownLoader": ['.md'],
"CustomJSONLoader": [".json"],
"CSVLoader": [".csv"],
"RapidOCRPDFLoader": [".pdf"],
"RapidOCRLoader": ['.png', '.jpg', '.jpeg', '.bmp'],
"UnstructuredFileLoader": ['.eml', '.msg', '.rst',
'.rtf', '.txt', '.xml',
'.docx', '.epub', '.odt',
'.ppt', '.pptx', '.tsv'],
}
SUPPORTED_EXTS = [ext for sublist in LOADER_DICT.values() for ext in sublist]
class CustomJSONLoader(langchain.document_loaders.JSONLoader):
'''
langchain的JSONLoader需要jq,在win上使用不便,进行替代。针对langchain==0.0.286
'''
def __init__(
self,
file_path: Union[str, Path],
content_key: Optional[str] = None,
metadata_func: Optional[Callable[[Dict, Dict], Dict]] = None,
text_content: bool = True,
json_lines: bool = False,
):
"""Initialize the JSONLoader.
Args:
file_path (Union[str, Path]): The path to the JSON or JSON Lines file.
content_key (str): The key to use to extract the content from the JSON if
results to a list of objects (dict).
metadata_func (Callable[Dict, Dict]): A function that takes in the JSON
object extracted by the jq_schema and the default metadata and returns
a dict of the updated metadata.
text_content (bool): Boolean flag to indicate whether the content is in
string format, default to True.
json_lines (bool): Boolean flag to indicate whether the input is in
JSON Lines format.
"""
self.file_path = Path(file_path).resolve()
self._content_key = content_key
self._metadata_func = metadata_func
self._text_content = text_content
self._json_lines = json_lines
def _parse(self, content: str, docs: List[Document]) -> None:
"""Convert given content to documents."""
data = json.loads(content)
# Perform some validation
# This is not a perfect validation, but it should catch most cases
# and prevent the user from getting a cryptic error later on.
if self._content_key is not None:
self._validate_content_key(data)
if self._metadata_func is not None:
self._validate_metadata_func(data)
for i, sample in enumerate(data, len(docs) + 1):
text = self._get_text(sample=sample)
metadata = self._get_metadata(
sample=sample, source=str(self.file_path), seq_num=i
)
docs.append(Document(page_content=text, metadata=metadata))
langchain.document_loaders.CustomJSONLoader = CustomJSONLoader
def get_LoaderClass(file_extension):
for LoaderClass, extensions in LOADER_DICT.items():
if file_extension in extensions:
return LoaderClass
# 把一些向量化共用逻辑从KnowledgeFile抽取出来,等langchain支持内存文件的时候,可以将非磁盘文件向量化
def get_loader(loader_name: str, file_path_or_content: Union[str, bytes, io.StringIO, io.BytesIO]):
'''
根据loader_name和文件路径或内容返回文档加载器。
'''
try:
if loader_name in ["RapidOCRPDFLoader", "RapidOCRLoader"]:
document_loaders_module = importlib.import_module('document_loaders')
else:
document_loaders_module = importlib.import_module('langchain.document_loaders')
DocumentLoader = getattr(document_loaders_module, loader_name)
except Exception as e:
msg = f"为文件{file_path_or_content}查找加载器{loader_name}时出错:{e}"
logger.error(f'{e.__class__.__name__}: {msg}',
exc_info=e if log_verbose else None)
document_loaders_module = importlib.import_module('langchain.document_loaders')
DocumentLoader = getattr(document_loaders_module, "UnstructuredFileLoader")
if loader_name == "UnstructuredFileLoader":
loader = DocumentLoader(file_path_or_content, autodetect_encoding=True)
elif loader_name == "CSVLoader":
# 自动识别文件编码类型,避免langchain loader 加载文件报编码错误
with open(file_path_or_content, 'rb') as struct_file:
encode_detect = chardet.detect(struct_file.read())
if encode_detect:
loader = DocumentLoader(file_path_or_content, encoding=encode_detect["encoding"])
else:
loader = DocumentLoader(file_path_or_content, encoding="utf-8")
elif loader_name == "JSONLoader":
loader = DocumentLoader(file_path_or_content, jq_schema=".", text_content=False)
elif loader_name == "CustomJSONLoader":
loader = DocumentLoader(file_path_or_content, text_content=False)
elif loader_name == "UnstructuredMarkdownLoader":
loader = DocumentLoader(file_path_or_content, mode="elements")
elif loader_name == "UnstructuredHTMLLoader":
loader = DocumentLoader(file_path_or_content, mode="elements")
else:
loader = DocumentLoader(file_path_or_content)
return loader
def make_text_splitter(
splitter_name: str = TEXT_SPLITTER_NAME,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
llm_model: str = LLM_MODEL,
):
"""
根据参数获取特定的分词器
"""
splitter_name = splitter_name or "SpacyTextSplitter"
try:
if splitter_name == "MarkdownHeaderTextSplitter": # MarkdownHeaderTextSplitter特殊判定
headers_to_split_on = text_splitter_dict[splitter_name]['headers_to_split_on']
text_splitter = langchain.text_splitter.MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on)
else:
try: ## 优先使用用户自定义的text_splitter
text_splitter_module = importlib.import_module('text_splitter')
TextSplitter = getattr(text_splitter_module, splitter_name)
except: ## 否则使用langchain的text_splitter
text_splitter_module = importlib.import_module('langchain.text_splitter')
TextSplitter = getattr(text_splitter_module, splitter_name)
if text_splitter_dict[splitter_name]["source"] == "tiktoken": ## 从tiktoken加载
try:
text_splitter = TextSplitter.from_tiktoken_encoder(
encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"],
pipeline="zh_core_web_sm",
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
except:
text_splitter = TextSplitter.from_tiktoken_encoder(
encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"],
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
elif text_splitter_dict[splitter_name]["source"] == "huggingface": ## 从huggingface加载
if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "":
config = get_model_worker_config(llm_model)
text_splitter_dict[splitter_name]["tokenizer_name_or_path"] = \
config.get("model_path")
if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "gpt2":
from transformers import GPT2TokenizerFast
from langchain.text_splitter import CharacterTextSplitter
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
else: ## 字符长度加载
tokenizer = AutoTokenizer.from_pretrained(
text_splitter_dict[splitter_name]["tokenizer_name_or_path"],
trust_remote_code=True)
text_splitter = TextSplitter.from_huggingface_tokenizer(
tokenizer=tokenizer,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
else:
try:
text_splitter = TextSplitter(
pipeline="zh_core_web_sm",
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
except:
text_splitter = TextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
except Exception as e:
print(e)
text_splitter_module = importlib.import_module('langchain.text_splitter')
TextSplitter = getattr(text_splitter_module, "RecursiveCharacterTextSplitter")
text_splitter = TextSplitter(chunk_size=250, chunk_overlap=50)
return text_splitter
class KnowledgeFile:
def __init__(
self,
filename: str,
knowledge_base_name: str
):
'''
对应知识库目录中的文件,必须是磁盘上存在的才能进行向量化等操作。
'''
self.kb_name = knowledge_base_name
self.filename = filename
self.ext = os.path.splitext(filename)[-1].lower()
if self.ext not in SUPPORTED_EXTS:
raise ValueError(f"暂未支持的文件格式 {self.ext}")
self.filepath = get_file_path(knowledge_base_name, filename)
self.docs = None
self.splited_docs = None
self.document_loader_name = get_LoaderClass(self.ext)
self.text_splitter_name = TEXT_SPLITTER_NAME
def file2docs(self, refresh: bool=False):
if self.docs is None or refresh:
logger.info(f"{self.document_loader_name} used for {self.filepath}")
loader = get_loader(self.document_loader_name, self.filepath)
self.docs = loader.load()
return self.docs
def docs2texts(
self,
docs: List[Document] = None,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
refresh: bool = False,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
text_splitter: TextSplitter = None,
):
docs = docs or self.file2docs(refresh=refresh)
if not docs:
return []
if self.ext not in [".csv"]:
if text_splitter is None:
text_splitter = make_text_splitter(splitter_name=self.text_splitter_name, chunk_size=chunk_size, chunk_overlap=chunk_overlap)
if self.text_splitter_name == "MarkdownHeaderTextSplitter":
docs = text_splitter.split_text(docs[0].page_content)
for doc in docs:
# 如果文档有元数据
if doc.metadata:
doc.metadata["source"] = os.path.basename(self.filepath)
else:
docs = text_splitter.split_documents(docs)
print(f"文档切分示例:{docs[0]}")
if zh_title_enhance:
docs = func_zh_title_enhance(docs)
self.splited_docs = docs
return self.splited_docs
def file2text(
self,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
refresh: bool = False,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
text_splitter: TextSplitter = None,
):
if self.splited_docs is None or refresh:
docs = self.file2docs()
self.splited_docs = self.docs2texts(docs=docs,
zh_title_enhance=zh_title_enhance,
refresh=refresh,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
text_splitter=text_splitter)
return self.splited_docs
def file_exist(self):
return os.path.isfile(self.filepath)
def get_mtime(self):
return os.path.getmtime(self.filepath)
def get_size(self):
return os.path.getsize(self.filepath)
def files2docs_in_thread(
files: List[Union[KnowledgeFile, Tuple[str, str], Dict]],
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
pool: ThreadPoolExecutor = None,
) -> Generator:
'''
利用多线程批量将磁盘文件转化成langchain Document.
如果传入参数是Tuple,形式为(filename, kb_name)
生成器返回值为 status, (kb_name, file_name, docs | error)
'''
def file2docs(*, file: KnowledgeFile, **kwargs) -> Tuple[bool, Tuple[str, str, List[Document]]]:
try:
return True, (file.kb_name, file.filename, file.file2text(**kwargs))
except Exception as e:
msg = f"从文件 {file.kb_name}/{file.filename} 加载文档时出错:{e}"
logger.error(f'{e.__class__.__name__}: {msg}',
exc_info=e if log_verbose else None)
return False, (file.kb_name, file.filename, msg)
kwargs_list = []
for i, file in enumerate(files):
kwargs = {}
try:
if isinstance(file, tuple) and len(file) >= 2:
filename=file[0]
kb_name=file[1]
file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name)
elif isinstance(file, dict):
filename = file.pop("filename")
kb_name = file.pop("kb_name")
kwargs.update(file)
file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name)
kwargs["file"] = file
kwargs["chunk_size"] = chunk_size
kwargs["chunk_overlap"] = chunk_overlap
kwargs["zh_title_enhance"] = zh_title_enhance
kwargs_list.append(kwargs)
except Exception as e:
yield False, (kb_name, filename, str(e))
for result in run_in_thread_pool(func=file2docs, params=kwargs_list, pool=pool):
yield result
if __name__ == "__main__":
from pprint import pprint
kb_file = KnowledgeFile(filename="test.txt", knowledge_base_name="samples")
# kb_file.text_splitter_name = "RecursiveCharacterTextSplitter"
docs = kb_file.file2docs()
pprint(docs[-1])
docs = kb_file.file2text()
pprint(docs[-1])
| [
"langchain.docstore.document.Document",
"langchain.text_splitter.TextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.TextSplitter",
"langchain.text_splitter.TextSplitter.from_huggingface_tokenizer"
] | [((964, 1011), 'os.path.join', 'os.path.join', (['KB_ROOT_PATH', 'knowledge_base_name'], {}), '(KB_ROOT_PATH, knowledge_base_name)\n', (976, 1011), False, 'import os\n'), ((1789, 1807), 'server.utils.embedding_device', 'embedding_device', ([], {}), '()\n', (1805, 1807), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((1940, 1999), 'server.knowledge_base.kb_cache.base.embeddings_pool.load_embeddings', 'embeddings_pool.load_embeddings', ([], {'model': 'model', 'device': 'device'}), '(model=model, device=device)\n', (1971, 1999), False, 'from server.knowledge_base.kb_cache.base import embeddings_pool\n'), ((15816, 15881), 'server.utils.run_in_thread_pool', 'run_in_thread_pool', ([], {'func': 'file2docs', 'params': 'kwargs_list', 'pool': 'pool'}), '(func=file2docs, params=kwargs_list, pool=pool)\n', (15834, 15881), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((16147, 16163), 'pprint.pprint', 'pprint', (['docs[-1]'], {}), '(docs[-1])\n', (16153, 16163), False, 'from pprint import pprint\n'), ((16200, 16216), 'pprint.pprint', 'pprint', (['docs[-1]'], {}), '(docs[-1])\n', (16206, 16216), False, 'from pprint import pprint\n'), ((4174, 4193), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (4184, 4193), False, 'import json\n'), ((13844, 13873), 'os.path.isfile', 'os.path.isfile', (['self.filepath'], {}), '(self.filepath)\n', (13858, 13873), False, 'import os\n'), ((13915, 13946), 'os.path.getmtime', 'os.path.getmtime', (['self.filepath'], {}), '(self.filepath)\n', (13931, 13946), False, 'import os\n'), ((13987, 14017), 'os.path.getsize', 'os.path.getsize', (['self.filepath'], {}), '(self.filepath)\n', (14002, 14017), False, 'import os\n'), ((1445, 1469), 'os.listdir', 'os.listdir', (['KB_ROOT_PATH'], {}), '(KB_ROOT_PATH)\n', (1455, 1469), False, 'import os\n'), ((1641, 1661), 'os.listdir', 'os.listdir', (['doc_path'], {}), '(doc_path)\n', (1651, 1661), False, 'import os\n'), ((5418, 5461), 'importlib.import_module', 'importlib.import_module', (['"""document_loaders"""'], {}), "('document_loaders')\n", (5441, 5461), False, 'import importlib\n'), ((5514, 5567), 'importlib.import_module', 'importlib.import_module', (['"""langchain.document_loaders"""'], {}), "('langchain.document_loaders')\n", (5537, 5567), False, 'import importlib\n'), ((5742, 5829), 'configs.logger.error', 'logger.error', (['f"""{e.__class__.__name__}: {msg}"""'], {'exc_info': '(e if log_verbose else None)'}), "(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else\n None)\n", (5754, 5829), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((5881, 5934), 'importlib.import_module', 'importlib.import_module', (['"""langchain.document_loaders"""'], {}), "('langchain.document_loaders')\n", (5904, 5934), False, 'import importlib\n'), ((10617, 10667), 'importlib.import_module', 'importlib.import_module', (['"""langchain.text_splitter"""'], {}), "('langchain.text_splitter')\n", (10640, 10667), False, 'import importlib\n'), ((10779, 10825), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': '(250)', 'chunk_overlap': '(50)'}), '(chunk_size=250, chunk_overlap=50)\n', (10791, 10825), False, 'from langchain.text_splitter import TextSplitter\n'), ((11618, 11686), 'configs.logger.info', 'logger.info', (['f"""{self.document_loader_name} used for {self.filepath}"""'], {}), "(f'{self.document_loader_name} used for {self.filepath}')\n", (11629, 11686), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((12910, 12937), 'text_splitter.zh_title_enhance', 'func_zh_title_enhance', (['docs'], {}), '(docs)\n', (12931, 12937), True, 'from text_splitter import zh_title_enhance as func_zh_title_enhance\n'), ((1499, 1528), 'os.path.join', 'os.path.join', (['KB_ROOT_PATH', 'f'], {}), '(KB_ROOT_PATH, f)\n', (1511, 1528), False, 'import os\n'), ((1692, 1720), 'os.path.join', 'os.path.join', (['doc_path', 'file'], {}), '(doc_path, file)\n', (1704, 1720), False, 'import os\n'), ((3852, 3867), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (3856, 3867), False, 'from pathlib import Path\n'), ((4809, 4855), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (4817, 4855), False, 'from langchain.docstore.document import Document\n'), ((7841, 7881), 'importlib.import_module', 'importlib.import_module', (['"""text_splitter"""'], {}), "('text_splitter')\n", (7864, 7881), False, 'import importlib\n'), ((14757, 14844), 'configs.logger.error', 'logger.error', (['f"""{e.__class__.__name__}: {msg}"""'], {'exc_info': '(e if log_verbose else None)'}), "(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else\n None)\n", (14769, 14844), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((8049, 8099), 'importlib.import_module', 'importlib.import_module', (['"""langchain.text_splitter"""'], {}), "('langchain.text_splitter')\n", (8072, 8099), False, 'import importlib\n'), ((8324, 8521), 'langchain.text_splitter.TextSplitter.from_tiktoken_encoder', 'TextSplitter.from_tiktoken_encoder', ([], {'encoding_name': "text_splitter_dict[splitter_name]['tokenizer_name_or_path']", 'pipeline': '"""zh_core_web_sm"""', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(encoding_name=text_splitter_dict[\n splitter_name]['tokenizer_name_or_path'], pipeline='zh_core_web_sm',\n chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n", (8358, 8521), False, 'from langchain.text_splitter import TextSplitter\n'), ((9895, 10012), 'langchain.text_splitter.TextSplitter.from_huggingface_tokenizer', 'TextSplitter.from_huggingface_tokenizer', ([], {'tokenizer': 'tokenizer', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(tokenizer=tokenizer, chunk_size=\n chunk_size, chunk_overlap=chunk_overlap)\n', (9934, 10012), False, 'from langchain.text_splitter import TextSplitter\n'), ((11140, 11166), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (11156, 11166), False, 'import os\n'), ((8691, 8861), 'langchain.text_splitter.TextSplitter.from_tiktoken_encoder', 'TextSplitter.from_tiktoken_encoder', ([], {'encoding_name': "text_splitter_dict[splitter_name]['tokenizer_name_or_path']", 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(encoding_name=text_splitter_dict[\n splitter_name]['tokenizer_name_or_path'], chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n", (8725, 8861), False, 'from langchain.text_splitter import TextSplitter\n'), ((9160, 9194), 'server.utils.get_model_worker_config', 'get_model_worker_config', (['llm_model'], {}), '(llm_model)\n', (9183, 9194), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((9592, 9633), 'transformers.GPT2TokenizerFast.from_pretrained', 'GPT2TokenizerFast.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (9625, 9633), False, 'from transformers import GPT2TokenizerFast\n'), ((9699, 9818), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (["text_splitter_dict[splitter_name]['tokenizer_name_or_path']"], {'trust_remote_code': '(True)'}), "(text_splitter_dict[splitter_name][\n 'tokenizer_name_or_path'], trust_remote_code=True)\n", (9728, 9818), False, 'from transformers import AutoTokenizer\n'), ((10161, 10256), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'pipeline': '"""zh_core_web_sm"""', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(pipeline='zh_core_web_sm', chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n", (10173, 10256), False, 'from langchain.text_splitter import TextSplitter\n'), ((12717, 12748), 'os.path.basename', 'os.path.basename', (['self.filepath'], {}), '(self.filepath)\n', (12733, 12748), False, 'import os\n'), ((10407, 10471), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (10419, 10471), False, 'from langchain.text_splitter import TextSplitter\n')] |
"""Create a LangChain chain for question/answering."""
from langchain.callbacks.manager import AsyncCallbackManager
from langchain.callbacks.tracers import LangChainTracer
from langchain.chains import ConversationalRetrievalChain, RetrievalQAWithSourcesChain
from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT, QA_PROMPT
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.llms.huggingface_endpoint import HuggingFaceEndpoint
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores import Vectara
from langchain.memory import ConversationBufferMemory
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings
from langchain.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough, RunnableParallel
from langchain.schema import StrOutputParser
from operator import itemgetter
import os
#
def format_docs(docs) -> str:
return "\n\n".join(doc.page_content for doc in docs)
def get_chain(
vectorstore: VectorStore, question_handler, stream_handler
) -> RunnableParallel:
"""Create a chain for question/answering."""
load_dotenv()
manager = AsyncCallbackManager([])
question_manager = AsyncCallbackManager([question_handler])
stream_manager = AsyncCallbackManager([stream_handler])
hf_llm = HuggingFaceEndpoint(
endpoint_url="https://euo6lqs9bqkddhci.us-east-1.aws.endpoints.huggingface.cloud",
huggingfacehub_api_token=os.environ["HF_TOKEN"],
task="text-generation",
model_kwargs={
"temperature": 0.1,
"max_new_tokens": 488,
},
)
retriever = vectorstore.as_retriever(search_kwargs={"k": 2})
prompt_template = """\
Use the provided context to answer the user's question. If you don't know the answer, say you don't know.
Context:
{context}
Question:
{question}
Answer in french and do not start with 'Réponse:'
"""
rag_prompt = ChatPromptTemplate.from_template(prompt_template)
entry_point_chain = {
"context": lambda input: format_docs(input["documents"]),
"question": itemgetter("question"),
}
rag_chain = entry_point_chain | rag_prompt | hf_llm | StrOutputParser()
rag_chain_with_sources = RunnableParallel(
{"documents": retriever, "question": RunnablePassthrough()}
) | {
"documents": lambda input: [doc.metadata for doc in input["documents"]],
"answer": rag_chain,
}
return rag_chain_with_sources
| [
"langchain.prompts.ChatPromptTemplate.from_template",
"langchain.callbacks.manager.AsyncCallbackManager",
"langchain.schema.StrOutputParser",
"langchain_core.runnables.RunnablePassthrough",
"langchain.llms.huggingface_endpoint.HuggingFaceEndpoint"
] | [((1270, 1283), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1281, 1283), False, 'from dotenv import load_dotenv\n'), ((1298, 1322), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[]'], {}), '([])\n', (1318, 1322), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((1346, 1386), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[question_handler]'], {}), '([question_handler])\n', (1366, 1386), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((1408, 1446), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[stream_handler]'], {}), '([stream_handler])\n', (1428, 1446), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((1461, 1707), 'langchain.llms.huggingface_endpoint.HuggingFaceEndpoint', 'HuggingFaceEndpoint', ([], {'endpoint_url': '"""https://euo6lqs9bqkddhci.us-east-1.aws.endpoints.huggingface.cloud"""', 'huggingfacehub_api_token': "os.environ['HF_TOKEN']", 'task': '"""text-generation"""', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 488}"}), "(endpoint_url=\n 'https://euo6lqs9bqkddhci.us-east-1.aws.endpoints.huggingface.cloud',\n huggingfacehub_api_token=os.environ['HF_TOKEN'], task='text-generation',\n model_kwargs={'temperature': 0.1, 'max_new_tokens': 488})\n", (1480, 1707), False, 'from langchain.llms.huggingface_endpoint import HuggingFaceEndpoint\n'), ((2116, 2165), 'langchain.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2148, 2165), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((2279, 2301), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (2289, 2301), False, 'from operator import itemgetter\n'), ((2367, 2384), 'langchain.schema.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (2382, 2384), False, 'from langchain.schema import StrOutputParser\n'), ((2478, 2499), 'langchain_core.runnables.RunnablePassthrough', 'RunnablePassthrough', ([], {}), '()\n', (2497, 2499), False, 'from langchain_core.runnables import RunnablePassthrough, RunnableParallel\n')] |
from Google import Create_Service
import gspread
import langchain
from langchain.chat_models import ChatOpenAI
import pymysql
from langchain.document_loaders.csv_loader import UnstructuredCSVLoader
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain import PromptTemplate, LLMChain
import os
import csv
from twilio.rest import Client
from dotenv import load_dotenv
pymysql.install_as_MySQLdb()
load_dotenv()
OPENAI_API_TOKEN=os.getenv("OPENAI_API_TOKEN")
os.environ["OPENAI_API_TOKEN"] = OPENAI_API_TOKEN
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="./credentials.json"
# Your Account SID from twilio.com/console
account_sid = os.environ["TWILIO_ACCOUNT_SID"]
# Your Auth Token from twilio.com/consoles
auth_token = os.environ["TWILIO_AUTH_TOKEN"]
client = Client(account_sid, auth_token)
chat_llm=ChatOpenAI(model="gpt-4", openai_api_key=OPENAI_API_TOKEN)
#connect to the database
connection = pymysql.connect(host=os.environ["DB_HOST"],
user=os.environ["DB_USERNAME"],
password=os.environ["DB_PASSWORD"],
database= os.environ["DATABASE"],
ssl_ca='./cert.pem' # From AWS's cert.pem
)
#using service account credentials json file to instantiate the
Service_Account=gspread.service_account("credentials.json")
cursor=connection.cursor()
#get all clients
query = """
SELECT pers.phone, pers.fname, pr.programName
FROM Client c
JOIN Profile pr ON c.profileId = pr.id
JOIN Personal pers ON c.personalId = pers.id
WHERE NOT EXISTS (
SELECT *
FROM Note n
WHERE c.id = n.clientId AND n.note = 'Cancelled'
);
"""
cursor.execute(query)
#list of tuples where index 0 is number, index 1 is name, index 2 is program Name
client_programs = cursor.fetchall()
def fetch_user(pr_name, first_name):
spread=Service_Account.open(pr_name)
#retrieve client diary using their program name
user_info=spread.worksheet("Goals/Info")
#create a file to store the contents of their goals
with open("/tmp/context.csv", "w", newline='') as f:
writer=csv.writer(f)
writer.writerows(user_info.get_all_values())
#use unstuctured loader to initialize the data source for RAG
docs=UnstructuredCSVLoader('/tmp/context.csv').load()
#viola we have our loader, now what is the best way to summarize the user content as context for
#another model to use it as the very specific nature of response
# Define summarization prompt, using Directional Stimulus Prompting
# which utilizes stimulus/hints which are keywords in this case, relating to the client's ideal workout/lifestyle mantra to provide fine
#question specific guidance to GPT-4 for generating a summary
prompt_template = """Write a concise summary of the following client's workout goals based on the hint:
"{text}"
Hint: Goals; Change; Optimization; Improvement; Consistency; Frequency; Healthy; Unhealthy; Weight Loss; Fat Loss; Weight Gain; Muscle Gain; Diet; Eating; Sleep.
CONCISE SUMMARY:"""
prompt = PromptTemplate.from_template(prompt_template)
llm_chain = LLMChain(llm=chat_llm, prompt=prompt)
# Define StuffDocumentsChain
summary_chain = StuffDocumentsChain(
llm_chain=llm_chain, document_variable_name="text"
)
user_workout_context=summary_chain.run(docs)
#This is the final LLMChain prompt template, which will serve as a system prompt and the purpose of our daily motivational messages
temp="""
You are BolicBot, a super passionate and intelligent fitness coach and motivator,
who can capture the hearts and minds of your clients by helping them push out of their comfort zone.
Your sole purpose for existence is to drive people to be better versions of themselves.
Using your client's information here: {context} , and their name:{name}, send them a very short and concise text,
motivating them to push through their very specific problems.
"""
prompt=PromptTemplate(input_variables=["context", "name"], template=temp)
prompt.format(context=user_workout_context, name=first_name)
chain=LLMChain(llm=chat_llm, prompt=prompt)
return chain.run({"context": user_workout_context, "name": first_name})
for each_client in client_programs:
answer=fetch_user(each_client[2], each_client[1])
client.messages.create(to=each_client[0], from_=os.getenv("BOLIC_NUMBER"), body=answer)
cursor.close()
connection.close()
| [
"langchain.chat_models.ChatOpenAI",
"langchain.PromptTemplate",
"langchain.document_loaders.csv_loader.UnstructuredCSVLoader",
"langchain.PromptTemplate.from_template",
"langchain.LLMChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain"
] | [((402, 430), 'pymysql.install_as_MySQLdb', 'pymysql.install_as_MySQLdb', ([], {}), '()\n', (428, 430), False, 'import pymysql\n'), ((433, 446), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (444, 446), False, 'from dotenv import load_dotenv\n'), ((465, 494), 'os.getenv', 'os.getenv', (['"""OPENAI_API_TOKEN"""'], {}), "('OPENAI_API_TOKEN')\n", (474, 494), False, 'import os\n'), ((811, 842), 'twilio.rest.Client', 'Client', (['account_sid', 'auth_token'], {}), '(account_sid, auth_token)\n', (817, 842), False, 'from twilio.rest import Client\n'), ((854, 912), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-4"""', 'openai_api_key': 'OPENAI_API_TOKEN'}), "(model='gpt-4', openai_api_key=OPENAI_API_TOKEN)\n", (864, 912), False, 'from langchain.chat_models import ChatOpenAI\n'), ((953, 1126), 'pymysql.connect', 'pymysql.connect', ([], {'host': "os.environ['DB_HOST']", 'user': "os.environ['DB_USERNAME']", 'password': "os.environ['DB_PASSWORD']", 'database': "os.environ['DATABASE']", 'ssl_ca': '"""./cert.pem"""'}), "(host=os.environ['DB_HOST'], user=os.environ['DB_USERNAME'],\n password=os.environ['DB_PASSWORD'], database=os.environ['DATABASE'],\n ssl_ca='./cert.pem')\n", (968, 1126), False, 'import pymysql\n'), ((1385, 1428), 'gspread.service_account', 'gspread.service_account', (['"""credentials.json"""'], {}), "('credentials.json')\n", (1408, 1428), False, 'import gspread\n'), ((3246, 3291), 'langchain.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (3274, 3291), False, 'from langchain import PromptTemplate, LLMChain\n'), ((3312, 3349), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'chat_llm', 'prompt': 'prompt'}), '(llm=chat_llm, prompt=prompt)\n', (3320, 3349), False, 'from langchain import PromptTemplate, LLMChain\n'), ((3413, 3484), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_variable_name': '"""text"""'}), "(llm_chain=llm_chain, document_variable_name='text')\n", (3432, 3484), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((4272, 4338), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'name']", 'template': 'temp'}), "(input_variables=['context', 'name'], template=temp)\n", (4286, 4338), False, 'from langchain import PromptTemplate, LLMChain\n'), ((4422, 4459), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'chat_llm', 'prompt': 'prompt'}), '(llm=chat_llm, prompt=prompt)\n', (4430, 4459), False, 'from langchain import PromptTemplate, LLMChain\n'), ((2216, 2229), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2226, 2229), False, 'import csv\n'), ((2376, 2417), 'langchain.document_loaders.csv_loader.UnstructuredCSVLoader', 'UnstructuredCSVLoader', (['"""/tmp/context.csv"""'], {}), "('/tmp/context.csv')\n", (2397, 2417), False, 'from langchain.document_loaders.csv_loader import UnstructuredCSVLoader\n'), ((4692, 4717), 'os.getenv', 'os.getenv', (['"""BOLIC_NUMBER"""'], {}), "('BOLIC_NUMBER')\n", (4701, 4717), False, 'import os\n')] |
import time #← 実行時間を計測するためにtimeモジュールをインポート
import langchain
from langchain.cache import InMemoryCache #← InMemoryCacheをインポート
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
langchain.llm_cache = InMemoryCache() #← llm_cacheにInMemoryCacheを設定
chat = ChatOpenAI()
start = time.time() #← 実行開始時間を記録
result = chat([ #← 一度目の実行を行う
HumanMessage(content="こんにちは!")
])
end = time.time() #← 実行終了時間を記録
print(result.content)
print(f"実行時間: {end - start}秒")
start = time.time() #← 実行開始時間を記録
result = chat([ #← 同じ内容で二度目の実行を行うことでキャッシュが利用され、即時に実行完了している
HumanMessage(content="こんにちは!")
])
end = time.time() #← 実行終了時間を記録
print(result.content)
print(f"実行時間: {end - start}秒")
| [
"langchain.schema.HumanMessage",
"langchain.chat_models.ChatOpenAI",
"langchain.cache.InMemoryCache"
] | [((237, 252), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (250, 252), False, 'from langchain.cache import InMemoryCache\n'), ((291, 303), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (301, 303), False, 'from langchain.chat_models import ChatOpenAI\n'), ((312, 323), 'time.time', 'time.time', ([], {}), '()\n', (321, 323), False, 'import time\n'), ((411, 422), 'time.time', 'time.time', ([], {}), '()\n', (420, 422), False, 'import time\n'), ((498, 509), 'time.time', 'time.time', ([], {}), '()\n', (507, 509), False, 'import time\n'), ((627, 638), 'time.time', 'time.time', ([], {}), '()\n', (636, 638), False, 'import time\n'), ((370, 400), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""こんにちは!"""'}), "(content='こんにちは!')\n", (382, 400), False, 'from langchain.schema import HumanMessage\n'), ((586, 616), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""こんにちは!"""'}), "(content='こんにちは!')\n", (598, 616), False, 'from langchain.schema import HumanMessage\n')] |
import langchain
import openai
from dotenv import load_dotenv
from langchain.chains import ConversationChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.schema import HumanMessage
load_dotenv()
langchain.verbose = True
# openai.log = "debug"
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
memory = ConversationBufferMemory()
memory.chat_memory.add_user_message("Hi! I'm Oshima.")
memory.chat_memory.add_ai_message("Whats up?")
# Memory のチュートリアルの通り以下のように ConversationChain を使うと、
# role: user に全ての履歴が含まれてしまい、Chat Completions API の使い方として適切ではなくなる
conversation = ConversationChain(llm=chat, memory=memory)
conversation_result = conversation.predict(input="Do you know my name?")
print(conversation_result)
# 自前で履歴を与えると、role: assistant を活用した、Chat Completions API として適切なプロンプトになる
# messages = memory.chat_memory.messages
# messages.append(HumanMessage(content="Do you know my name?"))
# raw_chat_result = chat(messages)
# print(raw_chat_result.content)
| [
"langchain.memory.ConversationBufferMemory",
"langchain.chat_models.ChatOpenAI",
"langchain.chains.ConversationChain"
] | [((251, 264), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (262, 264), False, 'from dotenv import load_dotenv\n'), ((322, 375), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (332, 375), False, 'from langchain.chat_models import ChatOpenAI\n'), ((386, 412), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (410, 412), False, 'from langchain.memory import ConversationBufferMemory\n'), ((647, 689), 'langchain.chains.ConversationChain', 'ConversationChain', ([], {'llm': 'chat', 'memory': 'memory'}), '(llm=chat, memory=memory)\n', (664, 689), False, 'from langchain.chains import ConversationChain\n')] |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from concurrent.futures import ThreadPoolExecutor
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Coroutine,
Dict,
Generator,
List,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
)
from uuid import UUID
from tenacity import RetryCallState
import langchain
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers import run_collector
from langchain.callbacks.tracers.langchain import (
LangChainTracer,
)
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.callbacks.tracers.wandb import WandbTracer
from langchain.schema import (
AgentAction,
AgentFinish,
Document,
LLMResult,
)
from langchain.schema.messages import BaseMessage, get_buffer_string
from langchain.schema.output import ChatGenerationChunk, GenerationChunk
if TYPE_CHECKING:
from langsmith import Client as LangSmithClient
logger = logging.getLogger(__name__)
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar(
"openai_callback", default=None
)
tracing_callback_var: ContextVar[
Optional[LangChainTracerV1]
] = ContextVar( # noqa: E501
"tracing_callback", default=None
)
wandb_tracing_callback_var: ContextVar[
Optional[WandbTracer]
] = ContextVar( # noqa: E501
"tracing_wandb_callback", default=None
)
tracing_v2_callback_var: ContextVar[
Optional[LangChainTracer]
] = ContextVar( # noqa: E501
"tracing_callback_v2", default=None
)
run_collector_var: ContextVar[
Optional[run_collector.RunCollectorCallbackHandler]
] = ContextVar( # noqa: E501
"run_collector", default=None
)
def _get_debug() -> bool:
return langchain.debug
@contextmanager
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
"""Get the OpenAI callback handler in a context manager.
which conveniently exposes token and cost information.
Returns:
OpenAICallbackHandler: The OpenAI callback handler.
Example:
>>> with get_openai_callback() as cb:
... # Use the OpenAI callback handler
"""
cb = OpenAICallbackHandler()
openai_callback_var.set(cb)
yield cb
openai_callback_var.set(None)
@contextmanager
def tracing_enabled(
session_name: str = "default",
) -> Generator[TracerSessionV1, None, None]:
"""Get the Deprecated LangChainTracer in a context manager.
Args:
session_name (str, optional): The name of the session.
Defaults to "default".
Returns:
TracerSessionV1: The LangChainTracer session.
Example:
>>> with tracing_enabled() as session:
... # Use the LangChainTracer session
"""
cb = LangChainTracerV1()
session = cast(TracerSessionV1, cb.load_session(session_name))
tracing_callback_var.set(cb)
yield session
tracing_callback_var.set(None)
@contextmanager
def wandb_tracing_enabled(
session_name: str = "default",
) -> Generator[None, None, None]:
"""Get the WandbTracer in a context manager.
Args:
session_name (str, optional): The name of the session.
Defaults to "default".
Returns:
None
Example:
>>> with wandb_tracing_enabled() as session:
... # Use the WandbTracer session
"""
cb = WandbTracer()
wandb_tracing_callback_var.set(cb)
yield None
wandb_tracing_callback_var.set(None)
@contextmanager
def tracing_v2_enabled(
project_name: Optional[str] = None,
*,
example_id: Optional[Union[str, UUID]] = None,
tags: Optional[List[str]] = None,
client: Optional[LangSmithClient] = None,
) -> Generator[LangChainTracer, None, None]:
"""Instruct LangChain to log all runs in context to LangSmith.
Args:
project_name (str, optional): The name of the project.
Defaults to "default".
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
tags (List[str], optional): The tags to add to the run.
Defaults to None.
Returns:
None
Example:
>>> with tracing_v2_enabled():
... # LangChain code will automatically be traced
You can use this to fetch the LangSmith run URL:
>>> with tracing_v2_enabled() as cb:
... chain.invoke("foo")
... run_url = cb.get_run_url()
"""
if isinstance(example_id, str):
example_id = UUID(example_id)
cb = LangChainTracer(
example_id=example_id,
project_name=project_name,
tags=tags,
client=client,
)
tracing_v2_callback_var.set(cb)
yield cb
tracing_v2_callback_var.set(None)
@contextmanager
def collect_runs() -> Generator[run_collector.RunCollectorCallbackHandler, None, None]:
"""Collect all run traces in context.
Returns:
run_collector.RunCollectorCallbackHandler: The run collector callback handler.
Example:
>>> with collect_runs() as runs_cb:
chain.invoke("foo")
run_id = runs_cb.traced_runs[0].id
"""
cb = run_collector.RunCollectorCallbackHandler()
run_collector_var.set(cb)
yield cb
run_collector_var.set(None)
@contextmanager
def trace_as_chain_group(
group_name: str,
callback_manager: Optional[CallbackManager] = None,
*,
inputs: Optional[Dict[str, Any]] = None,
project_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
) -> Generator[CallbackManagerForChainGroup, None, None]:
"""Get a callback manager for a chain group in a context manager.
Useful for grouping different calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
callback_manager (CallbackManager, optional): The callback manager to use.
inputs (Dict[str, Any], optional): The inputs to the chain group.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
run_id (UUID, optional): The ID of the run.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Returns:
CallbackManagerForChainGroup: The callback manager for the chain group.
Example:
.. code-block:: python
llm_input = "Foo"
with trace_as_chain_group("group_name", inputs={"input": llm_input}) as manager:
# Use the callback manager for the chain group
res = llm.predict(llm_input, callbacks=manager)
manager.on_chain_end({"output": res})
""" # noqa: E501
cb = cast(
Callbacks,
[
LangChainTracer(
project_name=project_name,
example_id=example_id,
)
]
if callback_manager is None
else callback_manager,
)
cm = CallbackManager.configure(
inheritable_callbacks=cb,
inheritable_tags=tags,
)
run_manager = cm.on_chain_start({"name": group_name}, inputs or {}, run_id=run_id)
child_cm = run_manager.get_child()
group_cm = CallbackManagerForChainGroup(
child_cm.handlers,
child_cm.inheritable_handlers,
child_cm.parent_run_id,
parent_run_manager=run_manager,
tags=child_cm.tags,
inheritable_tags=child_cm.inheritable_tags,
metadata=child_cm.metadata,
inheritable_metadata=child_cm.inheritable_metadata,
)
try:
yield group_cm
except Exception as e:
if not group_cm.ended:
run_manager.on_chain_error(e)
raise e
else:
if not group_cm.ended:
run_manager.on_chain_end({})
@asynccontextmanager
async def atrace_as_chain_group(
group_name: str,
callback_manager: Optional[AsyncCallbackManager] = None,
*,
inputs: Optional[Dict[str, Any]] = None,
project_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
) -> AsyncGenerator[AsyncCallbackManagerForChainGroup, None]:
"""Get an async callback manager for a chain group in a context manager.
Useful for grouping different async calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
callback_manager (AsyncCallbackManager, optional): The async callback manager to use,
which manages tracing and other callback behavior.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
run_id (UUID, optional): The ID of the run.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Returns:
AsyncCallbackManager: The async callback manager for the chain group.
Example:
.. code-block:: python
llm_input = "Foo"
async with atrace_as_chain_group("group_name", inputs={"input": llm_input}) as manager:
# Use the async callback manager for the chain group
res = await llm.apredict(llm_input, callbacks=manager)
await manager.on_chain_end({"output": res})
""" # noqa: E501
cb = cast(
Callbacks,
[
LangChainTracer(
project_name=project_name,
example_id=example_id,
)
]
if callback_manager is None
else callback_manager,
)
cm = AsyncCallbackManager.configure(inheritable_callbacks=cb, inheritable_tags=tags)
run_manager = await cm.on_chain_start(
{"name": group_name}, inputs or {}, run_id=run_id
)
child_cm = run_manager.get_child()
group_cm = AsyncCallbackManagerForChainGroup(
child_cm.handlers,
child_cm.inheritable_handlers,
child_cm.parent_run_id,
parent_run_manager=run_manager,
tags=child_cm.tags,
inheritable_tags=child_cm.inheritable_tags,
metadata=child_cm.metadata,
inheritable_metadata=child_cm.inheritable_metadata,
)
try:
yield group_cm
except Exception as e:
if not group_cm.ended:
await run_manager.on_chain_error(e)
raise e
else:
if not group_cm.ended:
await run_manager.on_chain_end({})
def _handle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for CallbackManager."""
coros: List[Coroutine[Any, Any, Any]] = []
try:
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(
handler, ignore_condition_name
):
event = getattr(handler, event_name)(*args, **kwargs)
if asyncio.iscoroutine(event):
coros.append(event)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
_handle_event(
[handler],
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
handler_name = handler.__class__.__name__
logger.warning(
f"NotImplementedError in {handler_name}.{event_name}"
f" callback: {e}"
)
except Exception as e:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback: {e}"
)
if handler.raise_error:
raise e
finally:
if coros:
try:
# Raises RuntimeError if there is no current event loop.
asyncio.get_running_loop()
loop_running = True
except RuntimeError:
loop_running = False
if loop_running:
# If we try to submit this coroutine to the running loop
# we end up in a deadlock, as we'd have gotten here from a
# running coroutine, which we cannot interrupt to run this one.
# The solution is to create a new loop in a new thread.
with ThreadPoolExecutor(1) as executor:
executor.submit(_run_coros, coros).result()
else:
_run_coros(coros)
def _run_coros(coros: List[Coroutine[Any, Any, Any]]) -> None:
if hasattr(asyncio, "Runner"):
# Python 3.11+
# Run the coroutines in a new event loop, taking care to
# - install signal handlers
# - run pending tasks scheduled by `coros`
# - close asyncgens and executors
# - close the loop
with asyncio.Runner() as runner:
# Run the coroutine, get the result
for coro in coros:
runner.run(coro)
# Run pending tasks scheduled by coros until they are all done
while pending := asyncio.all_tasks(runner.get_loop()):
runner.run(asyncio.wait(pending))
else:
# Before Python 3.11 we need to run each coroutine in a new event loop
# as the Runner api is not available.
for coro in coros:
asyncio.run(coro)
async def _ahandle_event_for_handler(
handler: BaseCallbackHandler,
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
event = getattr(handler, event_name)
if asyncio.iscoroutinefunction(event):
await event(*args, **kwargs)
else:
if handler.run_inline:
event(*args, **kwargs)
else:
await asyncio.get_event_loop().run_in_executor(
None, functools.partial(event, *args, **kwargs)
)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
message_strings = [get_buffer_string(m) for m in args[1]]
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(
f"NotImplementedError in {handler.__class__.__name__}.{event_name}"
f" callback: {e}"
)
except Exception as e:
logger.warning(
f"Error in {handler.__class__.__name__}.{event_name} callback: {e}"
)
if handler.raise_error:
raise e
async def _ahandle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for AsyncCallbackManager."""
for handler in [h for h in handlers if h.run_inline]:
await _ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
await asyncio.gather(
*(
_ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
for handler in handlers
if not handler.run_inline
)
)
BRM = TypeVar("BRM", bound="BaseRunManager")
class BaseRunManager(RunManagerMixin):
"""Base class for run manager (a bound callback manager)."""
def __init__(
self,
*,
run_id: UUID,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler],
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
inheritable_tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
) -> None:
"""Initialize the run manager.
Args:
run_id (UUID): The ID of the run.
handlers (List[BaseCallbackHandler]): The list of handlers.
inheritable_handlers (List[BaseCallbackHandler]):
The list of inheritable handlers.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
tags (Optional[List[str]]): The list of tags.
inheritable_tags (Optional[List[str]]): The list of inheritable tags.
metadata (Optional[Dict[str, Any]]): The metadata.
inheritable_metadata (Optional[Dict[str, Any]]): The inheritable metadata.
"""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
self.tags = tags or []
self.inheritable_tags = inheritable_tags or []
self.metadata = metadata or {}
self.inheritable_metadata = inheritable_metadata or {}
@classmethod
def get_noop_manager(cls: Type[BRM]) -> BRM:
"""Return a manager that doesn't perform any operations.
Returns:
BaseRunManager: The noop manager.
"""
return cls(
run_id=uuid.uuid4(),
handlers=[],
inheritable_handlers=[],
tags=[],
inheritable_tags=[],
metadata={},
inheritable_metadata={},
)
class RunManager(BaseRunManager):
"""Sync Run Manager."""
def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received.
Args:
text (str): The received text.
Returns:
Any: The result of the callback.
"""
_handle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retry(
self,
retry_state: RetryCallState,
**kwargs: Any,
) -> None:
_handle_event(
self.handlers,
"on_retry",
"ignore_retry",
retry_state,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class ParentRunManager(RunManager):
"""Sync Parent Run Manager."""
def get_child(self, tag: Optional[str] = None) -> CallbackManager:
"""Get a child callback manager.
Args:
tag (str, optional): The tag for the child callback manager.
Defaults to None.
Returns:
CallbackManager: The child callback manager.
"""
manager = CallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
manager.add_metadata(self.inheritable_metadata)
if tag is not None:
manager.add_tags([tag], False)
return manager
class AsyncRunManager(BaseRunManager):
"""Async Run Manager."""
async def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received.
Args:
text (str): The received text.
Returns:
Any: The result of the callback.
"""
await _ahandle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_retry(
self,
retry_state: RetryCallState,
**kwargs: Any,
) -> None:
await _ahandle_event(
self.handlers,
"on_retry",
"ignore_retry",
retry_state,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncParentRunManager(AsyncRunManager):
"""Async Parent Run Manager."""
def get_child(self, tag: Optional[str] = None) -> AsyncCallbackManager:
"""Get a child callback manager.
Args:
tag (str, optional): The tag for the child callback manager.
Defaults to None.
Returns:
AsyncCallbackManager: The child callback manager.
"""
manager = AsyncCallbackManager(handlers=[], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
manager.add_tags(self.inheritable_tags)
manager.add_metadata(self.inheritable_metadata)
if tag is not None:
manager.add_tags([tag], False)
return manager
class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
"""Callback manager for LLM run."""
def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token (str): The new token.
"""
_handle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token=token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
chunk=chunk,
**kwargs,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response (LLMResult): The LLM result.
"""
_handle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_llm_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
_handle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
"""Async callback manager for LLM run."""
async def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token.
Args:
token (str): The new token.
"""
await _ahandle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token,
chunk=chunk,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response (LLMResult): The LLM result.
"""
await _ahandle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_llm_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await _ahandle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForChainRun(ParentRunManager, ChainManagerMixin):
"""Callback manager for chain run."""
def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None:
"""Run when chain ends running.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
_handle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
_handle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
"""
_handle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback.
"""
_handle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForChainRun(AsyncParentRunManager, ChainManagerMixin):
"""Async callback manager for chain run."""
async def on_chain_end(
self, outputs: Union[Dict[str, Any], Any], **kwargs: Any
) -> None:
"""Run when chain ends running.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
await _ahandle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await _ahandle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received.
Args:
action (AgentAction): The agent action.
Returns:
Any: The result of the callback.
"""
await _ahandle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received.
Args:
finish (AgentFinish): The agent finish.
Returns:
Any: The result of the callback.
"""
await _ahandle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForToolRun(ParentRunManager, ToolManagerMixin):
"""Callback manager for tool run."""
def on_tool_end(
self,
output: str,
**kwargs: Any,
) -> None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
"""
_handle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_tool_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
_handle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForToolRun(AsyncParentRunManager, ToolManagerMixin):
"""Async callback manager for tool run."""
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
"""
await _ahandle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_tool_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when tool errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
await _ahandle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManagerForRetrieverRun(ParentRunManager, RetrieverManagerMixin):
"""Callback manager for retriever run."""
def on_retriever_end(
self,
documents: Sequence[Document],
**kwargs: Any,
) -> None:
"""Run when retriever ends running."""
_handle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
def on_retriever_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when retriever errors."""
_handle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class AsyncCallbackManagerForRetrieverRun(
AsyncParentRunManager,
RetrieverManagerMixin,
):
"""Async callback manager for retriever run."""
async def on_retriever_end(
self, documents: Sequence[Document], **kwargs: Any
) -> None:
"""Run when retriever ends running."""
await _ahandle_event(
self.handlers,
"on_retriever_end",
"ignore_retriever",
documents,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
async def on_retriever_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when retriever errors."""
await _ahandle_event(
self.handlers,
"on_retriever_error",
"ignore_retriever",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
**kwargs,
)
class CallbackManager(BaseCallbackManager):
"""Callback manager that handles callbacks from LangChain."""
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
prompt as an LLM run.
"""
managers = []
for prompt in prompts:
run_id_ = uuid.uuid4()
_handle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
return managers
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
list of messages as an LLM run.
"""
managers = []
for message_list in messages:
run_id_ = uuid.uuid4()
_handle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
managers.append(
CallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
return managers
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Union[Dict[str, Any], Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
CallbackManagerForChainRun: The callback manager for the chain run.
"""
if run_id is None:
run_id = uuid.uuid4()
_handle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForToolRun:
"""Run when tool starts running.
Args:
serialized (Dict[str, Any]): The serialized tool.
input_str (str): The input to the tool.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run. Defaults to None.
Returns:
CallbackManagerForToolRun: The callback manager for the tool run.
"""
if run_id is None:
run_id = uuid.uuid4()
_handle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
def on_retriever_start(
self,
serialized: Dict[str, Any],
query: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForRetrieverRun:
"""Run when retriever starts running."""
if run_id is None:
run_id = uuid.uuid4()
_handle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
serialized,
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return CallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> CallbackManager:
"""Configure the callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
CallbackManager: The configured callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
verbose,
inheritable_tags,
local_tags,
inheritable_metadata,
local_metadata,
)
class CallbackManagerForChainGroup(CallbackManager):
"""Callback manager for the chain group."""
def __init__(
self,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler] | None = None,
parent_run_id: UUID | None = None,
*,
parent_run_manager: CallbackManagerForChainRun,
**kwargs: Any,
) -> None:
super().__init__(
handlers,
inheritable_handlers,
parent_run_id,
**kwargs,
)
self.parent_run_manager = parent_run_manager
self.ended = False
def on_chain_end(self, outputs: Union[Dict[str, Any], Any], **kwargs: Any) -> None:
"""Run when traced chain group ends.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
self.ended = True
return self.parent_run_manager.on_chain_end(outputs, **kwargs)
def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
self.ended = True
return self.parent_run_manager.on_chain_error(error, **kwargs)
class AsyncCallbackManager(BaseCallbackManager):
"""Async callback manager that handles callbacks from LangChain."""
@property
def is_async(self) -> bool:
"""Return whether the handler is async."""
return True
async def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
**kwargs: Any,
) -> List[AsyncCallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The list of prompts.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[AsyncCallbackManagerForLLMRun]: The list of async
callback managers, one for each LLM Run corresponding
to each prompt.
"""
tasks = []
managers = []
for prompt in prompts:
run_id_ = uuid.uuid4()
tasks.append(
_ahandle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
[prompt],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
await asyncio.gather(*tasks)
return managers
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any,
) -> List[AsyncCallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[AsyncCallbackManagerForLLMRun]: The list of
async callback managers, one for each LLM Run
corresponding to each inner message list.
"""
tasks = []
managers = []
for message_list in messages:
run_id_ = uuid.uuid4()
tasks.append(
_ahandle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
[message_list],
run_id=run_id_,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
)
managers.append(
AsyncCallbackManagerForLLMRun(
run_id=run_id_,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
)
await asyncio.gather(*tasks)
return managers
async def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Union[Dict[str, Any], Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForChainRun:
"""Run when chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Union[Dict[str, Any], Any]): The inputs to the chain.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
AsyncCallbackManagerForChainRun: The async callback manager
for the chain run.
"""
if run_id is None:
run_id = uuid.uuid4()
await _ahandle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForChainRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForToolRun:
"""Run when tool starts running.
Args:
serialized (Dict[str, Any]): The serialized tool.
input_str (str): The input to the tool.
run_id (UUID, optional): The ID of the run. Defaults to None.
parent_run_id (UUID, optional): The ID of the parent run.
Defaults to None.
Returns:
AsyncCallbackManagerForToolRun: The async callback manager
for the tool run.
"""
if run_id is None:
run_id = uuid.uuid4()
await _ahandle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForToolRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
async def on_retriever_start(
self,
serialized: Dict[str, Any],
query: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForRetrieverRun:
"""Run when retriever starts running."""
if run_id is None:
run_id = uuid.uuid4()
await _ahandle_event(
self.handlers,
"on_retriever_start",
"ignore_retriever",
serialized,
query,
run_id=run_id,
parent_run_id=self.parent_run_id,
tags=self.tags,
metadata=self.metadata,
**kwargs,
)
return AsyncCallbackManagerForRetrieverRun(
run_id=run_id,
handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id,
tags=self.tags,
inheritable_tags=self.inheritable_tags,
metadata=self.metadata,
inheritable_metadata=self.inheritable_metadata,
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> AsyncCallbackManager:
"""Configure the async callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
AsyncCallbackManager: The configured async callback manager.
"""
return _configure(
cls,
inheritable_callbacks,
local_callbacks,
verbose,
inheritable_tags,
local_tags,
inheritable_metadata,
local_metadata,
)
class AsyncCallbackManagerForChainGroup(AsyncCallbackManager):
"""Async callback manager for the chain group."""
def __init__(
self,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler] | None = None,
parent_run_id: UUID | None = None,
*,
parent_run_manager: AsyncCallbackManagerForChainRun,
**kwargs: Any,
) -> None:
super().__init__(
handlers,
inheritable_handlers,
parent_run_id,
**kwargs,
)
self.parent_run_manager = parent_run_manager
self.ended = False
async def on_chain_end(
self, outputs: Union[Dict[str, Any], Any], **kwargs: Any
) -> None:
"""Run when traced chain group ends.
Args:
outputs (Union[Dict[str, Any], Any]): The outputs of the chain.
"""
self.ended = True
await self.parent_run_manager.on_chain_end(outputs, **kwargs)
async def on_chain_error(
self,
error: BaseException,
**kwargs: Any,
) -> None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
self.ended = True
await self.parent_run_manager.on_chain_error(error, **kwargs)
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
def env_var_is_set(env_var: str) -> bool:
"""Check if an environment variable is set.
Args:
env_var (str): The name of the environment variable.
Returns:
bool: True if the environment variable is set, False otherwise.
"""
return env_var in os.environ and os.environ[env_var] not in (
"",
"0",
"false",
"False",
)
def _configure(
callback_manager_cls: Type[T],
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
inheritable_tags: Optional[List[str]] = None,
local_tags: Optional[List[str]] = None,
inheritable_metadata: Optional[Dict[str, Any]] = None,
local_metadata: Optional[Dict[str, Any]] = None,
) -> T:
"""Configure the callback manager.
Args:
callback_manager_cls (Type[T]): The callback manager class.
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags. Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
T: The configured callback manager.
"""
callback_manager = callback_manager_cls(handlers=[])
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks_.copy(),
inheritable_handlers=inheritable_callbacks_.copy(),
)
else:
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks.handlers.copy(),
inheritable_handlers=inheritable_callbacks.inheritable_handlers.copy(),
parent_run_id=inheritable_callbacks.parent_run_id,
tags=inheritable_callbacks.tags.copy(),
inheritable_tags=inheritable_callbacks.inheritable_tags.copy(),
metadata=inheritable_callbacks.metadata.copy(),
inheritable_metadata=inheritable_callbacks.inheritable_metadata.copy(),
)
local_handlers_ = (
local_callbacks
if isinstance(local_callbacks, list)
else (local_callbacks.handlers if local_callbacks else [])
)
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
if inheritable_tags or local_tags:
callback_manager.add_tags(inheritable_tags or [])
callback_manager.add_tags(local_tags or [], False)
if inheritable_metadata or local_metadata:
callback_manager.add_metadata(inheritable_metadata or {})
callback_manager.add_metadata(local_metadata or {}, False)
tracer = tracing_callback_var.get()
wandb_tracer = wandb_tracing_callback_var.get()
open_ai = openai_callback_var.get()
tracing_enabled_ = (
env_var_is_set("LANGCHAIN_TRACING")
or tracer is not None
or env_var_is_set("LANGCHAIN_HANDLER")
)
wandb_tracing_enabled_ = (
env_var_is_set("LANGCHAIN_WANDB_TRACING") or wandb_tracer is not None
)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = (
env_var_is_set("LANGCHAIN_TRACING_V2") or tracer_v2 is not None
)
tracer_project = os.environ.get(
"LANGCHAIN_PROJECT", os.environ.get("LANGCHAIN_SESSION", "default")
)
run_collector_ = run_collector_var.get()
debug = _get_debug()
if (
verbose
or debug
or tracing_enabled_
or tracing_v2_enabled_
or wandb_tracing_enabled_
or open_ai is not None
):
if verbose and not any(
isinstance(handler, StdOutCallbackHandler)
for handler in callback_manager.handlers
):
if debug:
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), False)
if debug and not any(
isinstance(handler, ConsoleCallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(ConsoleCallbackHandler(), True)
if tracing_enabled_ and not any(
isinstance(handler, LangChainTracerV1)
for handler in callback_manager.handlers
):
if tracer:
callback_manager.add_handler(tracer, True)
else:
handler = LangChainTracerV1()
handler.load_session(tracer_project)
callback_manager.add_handler(handler, True)
if wandb_tracing_enabled_ and not any(
isinstance(handler, WandbTracer) for handler in callback_manager.handlers
):
if wandb_tracer:
callback_manager.add_handler(wandb_tracer, True)
else:
handler = WandbTracer()
callback_manager.add_handler(handler, True)
if tracing_v2_enabled_ and not any(
isinstance(handler, LangChainTracer)
for handler in callback_manager.handlers
):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(project_name=tracer_project)
callback_manager.add_handler(handler, True)
except Exception as e:
logger.warning(
"Unable to load requested LangChainTracer."
" To disable this warning,"
" unset the LANGCHAIN_TRACING_V2 environment variables.",
e,
)
if open_ai is not None and not any(
handler is open_ai # direct pointer comparison
for handler in callback_manager.handlers
):
callback_manager.add_handler(open_ai, True)
if run_collector_ is not None and not any(
handler is run_collector_ # direct pointer comparison
for handler in callback_manager.handlers
):
callback_manager.add_handler(run_collector_, False)
return callback_manager
| [
"langchain.schema.messages.get_buffer_string",
"langchain.callbacks.tracers.langchain_v1.LangChainTracerV1",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.tracers.run_collector.RunCollectorCallbackHandler",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.callbacks.openai_info.OpenAICallbackHandler"
] | [((1530, 1557), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1547, 1557), False, 'import logging\n'), ((1626, 1669), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1636, 1669), False, 'from contextvars import ContextVar\n'), ((1746, 1790), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback"""'], {'default': 'None'}), "('tracing_callback', default=None)\n", (1756, 1790), False, 'from contextvars import ContextVar\n'), ((1881, 1931), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_wandb_callback"""'], {'default': 'None'}), "('tracing_wandb_callback', default=None)\n", (1891, 1931), False, 'from contextvars import ContextVar\n'), ((2024, 2071), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback_v2"""'], {'default': 'None'}), "('tracing_callback_v2', default=None)\n", (2034, 2071), False, 'from contextvars import ContextVar\n'), ((2183, 2224), 'contextvars.ContextVar', 'ContextVar', (['"""run_collector"""'], {'default': 'None'}), "('run_collector', default=None)\n", (2193, 2224), False, 'from contextvars import ContextVar\n'), ((16857, 16895), 'typing.TypeVar', 'TypeVar', (['"""BRM"""'], {'bound': '"""BaseRunManager"""'}), "('BRM', bound='BaseRunManager')\n", (16864, 16895), False, 'from typing import TYPE_CHECKING, Any, AsyncGenerator, Coroutine, Dict, Generator, List, Optional, Sequence, Type, TypeVar, Union, cast\n'), ((55286, 55337), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'CallbackManager', 'AsyncCallbackManager'], {}), "('T', CallbackManager, AsyncCallbackManager)\n", (55293, 55337), False, 'from typing import TYPE_CHECKING, Any, AsyncGenerator, Coroutine, Dict, Generator, List, Optional, Sequence, Type, TypeVar, Union, cast\n'), ((2714, 2737), 'langchain.callbacks.openai_info.OpenAICallbackHandler', 'OpenAICallbackHandler', ([], {}), '()\n', (2735, 2737), False, 'from langchain.callbacks.openai_info import OpenAICallbackHandler\n'), ((3303, 3322), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (3320, 3322), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((3905, 3918), 'langchain.callbacks.tracers.wandb.WandbTracer', 'WandbTracer', ([], {}), '()\n', (3916, 3918), False, 'from langchain.callbacks.tracers.wandb import WandbTracer\n'), ((5066, 5161), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'example_id': 'example_id', 'project_name': 'project_name', 'tags': 'tags', 'client': 'client'}), '(example_id=example_id, project_name=project_name, tags=tags,\n client=client)\n', (5081, 5161), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((5695, 5738), 'langchain.callbacks.tracers.run_collector.RunCollectorCallbackHandler', 'run_collector.RunCollectorCallbackHandler', ([], {}), '()\n', (5736, 5738), False, 'from langchain.callbacks.tracers import run_collector\n'), ((5040, 5056), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (5044, 5056), False, 'from uuid import UUID\n'), ((59305, 59351), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_SESSION"""', '"""default"""'], {}), "('LANGCHAIN_SESSION', 'default')\n", (59319, 59351), False, 'import os\n'), ((14159, 14175), 'asyncio.Runner', 'asyncio.Runner', ([], {}), '()\n', (14173, 14175), False, 'import asyncio\n'), ((14666, 14683), 'asyncio.run', 'asyncio.run', (['coro'], {}), '(coro)\n', (14677, 14683), False, 'import asyncio\n'), ((15029, 15063), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['event'], {}), '(event)\n', (15056, 15063), False, 'import asyncio\n'), ((34984, 34996), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (34994, 34996), False, 'import uuid\n'), ((36584, 36596), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (36594, 36596), False, 'import uuid\n'), ((38168, 38180), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (38178, 38180), False, 'import uuid\n'), ((39631, 39643), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (39641, 39643), False, 'import uuid\n'), ((40711, 40723), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (40721, 40723), False, 'import uuid\n'), ((45328, 45340), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (45338, 45340), False, 'import uuid\n'), ((46333, 46355), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (46347, 46355), False, 'import asyncio\n'), ((47153, 47165), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (47163, 47165), False, 'import uuid\n'), ((48178, 48200), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (48192, 48200), False, 'import asyncio\n'), ((48907, 48919), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (48917, 48919), False, 'import uuid\n'), ((50437, 50449), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (50447, 50449), False, 'import uuid\n'), ((51540, 51552), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (51550, 51552), False, 'import uuid\n'), ((7492, 7557), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'project_name': 'project_name', 'example_id': 'example_id'}), '(project_name=project_name, example_id=example_id)\n', (7507, 7557), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((10252, 10317), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'project_name': 'project_name', 'example_id': 'example_id'}), '(project_name=project_name, example_id=example_id)\n', (10267, 10317), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((13167, 13193), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (13191, 13193), False, 'import asyncio\n'), ((18728, 18740), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (18738, 18740), False, 'import uuid\n'), ((60081, 60105), 'langchain.callbacks.tracers.stdout.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (60103, 60105), False, 'from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler\n'), ((60395, 60414), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (60412, 60414), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((60810, 60823), 'langchain.callbacks.tracers.wandb.WandbTracer', 'WandbTracer', ([], {}), '()\n', (60821, 60823), False, 'from langchain.callbacks.tracers.wandb import WandbTracer\n'), ((11911, 11937), 'asyncio.iscoroutine', 'asyncio.iscoroutine', (['event'], {}), '(event)\n', (11930, 11937), False, 'import asyncio\n'), ((13651, 13672), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', (['(1)'], {}), '(1)\n', (13669, 13672), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((14469, 14490), 'asyncio.wait', 'asyncio.wait', (['pending'], {}), '(pending)\n', (14481, 14490), False, 'import asyncio\n'), ((15510, 15530), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (15527, 15530), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n'), ((59858, 59881), 'langchain.callbacks.stdout.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (59879, 59881), False, 'from langchain.callbacks.stdout import StdOutCallbackHandler\n'), ((61198, 61242), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'project_name': 'tracer_project'}), '(project_name=tracer_project)\n', (61213, 61242), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((15330, 15371), 'functools.partial', 'functools.partial', (['event', '*args'], {}), '(event, *args, **kwargs)\n', (15347, 15371), False, 'import functools\n'), ((12175, 12195), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (12192, 12195), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n'), ((15258, 15282), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (15280, 15282), False, 'import asyncio\n')] |
import os
import streamlit as st
import time
import langchain
from langchain.chains import RetrievalQAWithSourcesChain, RetrievalQA
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import UnstructuredURLLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAI, OpenAIEmbeddings
import configparser
config = configparser.RawConfigParser()
config.read('../config.config')
openapi_key = config.get('Keys', 'openapi_key')
os.environ['OPENAI_API_KEY'] = openapi_key
st.title("URL Insighter 🔗🔍")
st.sidebar.title("🔗URLs...")
urls = []
for i in range(3):
url = st.sidebar.text_input(f"URL {i+1}")
urls.append(url)
folder_name = st.sidebar.text_input('Title')
process_url_clicked = st.sidebar.button("Process URLs")
main_placeholder = st.empty()
llm = OpenAI(temperature=0.9, max_tokens=500)
if process_url_clicked:
# load data
loader = UnstructuredURLLoader(urls=urls)
main_placeholder.text("Data Loading...Started...✅✅✅")
data = loader.load()
# split data
text_splitter = RecursiveCharacterTextSplitter(
separators=['\n\n', '\n', '.', ','],
chunk_size=1000
)
main_placeholder.text("Text Splitter...Started...✅✅✅")
docs = text_splitter.split_documents(data)
embeddings = OpenAIEmbeddings()
vectorstore_openai = FAISS.from_documents(docs, embeddings)
main_placeholder.text("Embedding Vector Started Building...✅✅✅")
time.sleep(2)
vectorstore_openai.save_local(folder_name)
query = main_placeholder.text_input("Question: ")
if query:
if os.path.exists(folder_name):
vectorstore = FAISS.load_local(folder_name, OpenAIEmbeddings())
chain = RetrievalQAWithSourcesChain.from_llm(llm=llm, retriever=vectorstore.as_retriever())
result = chain({"question": query}, return_only_outputs=True)
# result will be a dictionary of this format --> {"answer": "", "sources": [] }
st.header("Answer")
st.write(result["answer"])
sources = result.get("sources", "")
if sources:
st.subheader("Sources:")
sources_list = sources.split("\n") # Split the sources by newline
for source in sources_list:
st.write(source)
| [
"langchain_community.document_loaders.UnstructuredURLLoader",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain_community.vectorstores.FAISS.from_documents",
"langchain_openai.OpenAI",
"langchain_openai.OpenAIEmbeddings"
] | [((485, 515), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (513, 515), False, 'import configparser\n'), ((640, 668), 'streamlit.title', 'st.title', (['"""URL Insighter 🔗🔍"""'], {}), "('URL Insighter 🔗🔍')\n", (648, 668), True, 'import streamlit as st\n'), ((669, 697), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""🔗URLs..."""'], {}), "('🔗URLs...')\n", (685, 697), True, 'import streamlit as st\n'), ((810, 840), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Title"""'], {}), "('Title')\n", (831, 840), True, 'import streamlit as st\n'), ((863, 896), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Process URLs"""'], {}), "('Process URLs')\n", (880, 896), True, 'import streamlit as st\n'), ((917, 927), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (925, 927), True, 'import streamlit as st\n'), ((934, 973), 'langchain_openai.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)', 'max_tokens': '(500)'}), '(temperature=0.9, max_tokens=500)\n', (940, 973), False, 'from langchain_openai import OpenAI, OpenAIEmbeddings\n'), ((738, 775), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['f"""URL {i + 1}"""'], {}), "(f'URL {i + 1}')\n", (759, 775), True, 'import streamlit as st\n'), ((1028, 1060), 'langchain_community.document_loaders.UnstructuredURLLoader', 'UnstructuredURLLoader', ([], {'urls': 'urls'}), '(urls=urls)\n', (1049, 1060), False, 'from langchain_community.document_loaders import UnstructuredURLLoader\n'), ((1181, 1269), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'separators': "['\\n\\n', '\\n', '.', ',']", 'chunk_size': '(1000)'}), "(separators=['\\n\\n', '\\n', '.', ','],\n chunk_size=1000)\n", (1211, 1269), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1411, 1429), 'langchain_openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1427, 1429), False, 'from langchain_openai import OpenAI, OpenAIEmbeddings\n'), ((1455, 1493), 'langchain_community.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['docs', 'embeddings'], {}), '(docs, embeddings)\n', (1475, 1493), False, 'from langchain_community.vectorstores import FAISS\n'), ((1567, 1580), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1577, 1580), False, 'import time\n'), ((1697, 1724), 'os.path.exists', 'os.path.exists', (['folder_name'], {}), '(folder_name)\n', (1711, 1724), False, 'import os\n'), ((2064, 2083), 'streamlit.header', 'st.header', (['"""Answer"""'], {}), "('Answer')\n", (2073, 2083), True, 'import streamlit as st\n'), ((2092, 2118), 'streamlit.write', 'st.write', (["result['answer']"], {}), "(result['answer'])\n", (2100, 2118), True, 'import streamlit as st\n'), ((1778, 1796), 'langchain_openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1794, 1796), False, 'from langchain_openai import OpenAI, OpenAIEmbeddings\n'), ((2196, 2220), 'streamlit.subheader', 'st.subheader', (['"""Sources:"""'], {}), "('Sources:')\n", (2208, 2220), True, 'import streamlit as st\n'), ((2356, 2372), 'streamlit.write', 'st.write', (['source'], {}), '(source)\n', (2364, 2372), True, 'import streamlit as st\n')] |
from PyPDF2 import PdfReader
import os
import pandas as pd
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.cache import InMemoryCache
import langchain
from ocr.ocr import process_pdf
from docs.search import doc_search
from extract.extract import knowledge_graph
from data.processing import rowify
from edi.edi_formatter import pandas_to_edi
from openai.error import InvalidRequestError
langchain.llm_cache = InMemoryCache()
llm = OpenAI(temperature=0)
embeddings = OpenAIEmbeddings()
chain = load_qa_chain(llm, chain_type="stuff")
pdf_inputs = []
key_ids = ""
query = f"""
Using the unique count of {key_ids} in this document, do the following:
For each {key_ids}, extract the following information corresponding to the {key_ids}:
"""
rules_template = f"""Just give me the answer with {key_ids} line separated and nothing else."""
pdf_data = []
pdf_dir = '/pdfs/'
fils = os.listdir(pdf_dir)
for fil in fils:
print("processing: " + fil)
try:
pdf_file = pdf_dir + fil
texts = process_pdf(pdf_file)
docsearch = doc_search(texts, embeddings)
hwb_data = knowledge_graph(
key_id="",
docsearch=docsearch,
pdf_inputs=pdf_inputs,
query=query,
rules_template=rules_template,
chain=chain
)
mwb = fil.split('-')[1]
rows = rowify(hwb_data, extra=[mwb])
pdf_data.extend(rows)
except InvalidRequestError:
print(fil, "File needs handler.")
cols = []
df = pd.DataFrame(columns=cols, data=pdf_data)
edi_data = pandas_to_edi(
edi_type='211',
df=df,
edi_key_col="",
edi_data_col="",
) | [
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.llms.OpenAI",
"langchain.chains.question_answering.load_qa_chain",
"langchain.cache.InMemoryCache"
] | [((634, 649), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (647, 649), False, 'from langchain.cache import InMemoryCache\n'), ((656, 677), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (662, 677), False, 'from langchain.llms import OpenAI\n'), ((692, 710), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (708, 710), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((720, 758), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (733, 758), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((1113, 1132), 'os.listdir', 'os.listdir', (['pdf_dir'], {}), '(pdf_dir)\n', (1123, 1132), False, 'import os\n'), ((1741, 1782), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'cols', 'data': 'pdf_data'}), '(columns=cols, data=pdf_data)\n', (1753, 1782), True, 'import pandas as pd\n'), ((1795, 1864), 'edi.edi_formatter.pandas_to_edi', 'pandas_to_edi', ([], {'edi_type': '"""211"""', 'df': 'df', 'edi_key_col': '""""""', 'edi_data_col': '""""""'}), "(edi_type='211', df=df, edi_key_col='', edi_data_col='')\n", (1808, 1864), False, 'from edi.edi_formatter import pandas_to_edi\n'), ((1241, 1262), 'ocr.ocr.process_pdf', 'process_pdf', (['pdf_file'], {}), '(pdf_file)\n', (1252, 1262), False, 'from ocr.ocr import process_pdf\n'), ((1283, 1312), 'docs.search.doc_search', 'doc_search', (['texts', 'embeddings'], {}), '(texts, embeddings)\n', (1293, 1312), False, 'from docs.search import doc_search\n'), ((1332, 1463), 'extract.extract.knowledge_graph', 'knowledge_graph', ([], {'key_id': '""""""', 'docsearch': 'docsearch', 'pdf_inputs': 'pdf_inputs', 'query': 'query', 'rules_template': 'rules_template', 'chain': 'chain'}), "(key_id='', docsearch=docsearch, pdf_inputs=pdf_inputs,\n query=query, rules_template=rules_template, chain=chain)\n", (1347, 1463), False, 'from extract.extract import knowledge_graph\n'), ((1590, 1619), 'data.processing.rowify', 'rowify', (['hwb_data'], {'extra': '[mwb]'}), '(hwb_data, extra=[mwb])\n', (1596, 1619), False, 'from data.processing import rowify\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2023/2/24 16:23
# @Author : Jack
# @File : main.py
# @Software: PyCharm
import asyncio
import logging
import socket
import sys
import consul
import langchain
import os
import grpc
from langchain import PromptTemplate, LLMChain
from langchain.chat_models import ChatOpenAI
from proto import chatgpt_pb2_grpc, chatgpt_pb2
from callback import StreamingLLMCallbackHandler
default_port = 8099
def get_host_ip():
"""
查询本机ip地址
:return: ip
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
def register_service(consul_addr: str, consul_port: int, srv_port: int) -> consul.Consul:
local_ip = get_host_ip()
client = consul.Consul(host=consul_addr, port=consul_port, verify=False)
client.agent.service.register(
name="chatgpt",
address=local_ip,
port=srv_port,
service_id=f"chatgpt-{local_ip}",
timeout=10
)
return client
def init_chatgpt() -> langchain.LLMChain:
llm = ChatOpenAI(streaming=True, verbose=True, temperature=0.6)
# Get prompt template
template = ("""Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
{human_input}
Assistant:""")
chat_prompt = PromptTemplate(
input_variables=["human_input"],
template=template
)
# Construct Chain
chain = LLMChain(llm=llm, prompt=chat_prompt, callbacks=[], verbose=True)
return chain
# Fill your openai api key.
os.environ["OPENAI_API_KEY"] = ""
class ChatgptService(chatgpt_pb2_grpc.ChatgptServicer):
def __init__(self, chain):
self.chain = chain
async def Send(self, request: chatgpt_pb2.Message, context: grpc.aio.ServicerContext):
stream_handler = StreamingLLMCallbackHandler(context, chatgpt_pb2)
await self.chain.acall(
{"human_input": request.content},
callbacks=[stream_handler]
)
async def serve() -> None:
"""
Run grpc service
"""
server = grpc.aio.server()
chain = init_chatgpt()
chatgpt_pb2_grpc.add_ChatgptServicer_to_server(ChatgptService(chain), server=server)
server.add_insecure_port(f"[::]:{default_port}")
await server.start()
print(f"Server started, listening on {default_port}")
await server.wait_for_termination()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
# Fill your consul service address and port.
client = register_service("127.0.0.1", 8500, default_port)
try:
asyncio.get_event_loop().run_until_complete(serve())
except KeyboardInterrupt:
client.agent.service.deregister(f"chatgpt-{get_host_ip()}")
print("\nExiting...")
sys.exit()
| [
"langchain.LLMChain",
"langchain.chat_models.ChatOpenAI",
"langchain.PromptTemplate"
] | [((558, 606), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (571, 606), False, 'import socket\n'), ((875, 938), 'consul.Consul', 'consul.Consul', ([], {'host': 'consul_addr', 'port': 'consul_port', 'verify': '(False)'}), '(host=consul_addr, port=consul_port, verify=False)\n', (888, 938), False, 'import consul\n'), ((1198, 1255), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'streaming': '(True)', 'verbose': '(True)', 'temperature': '(0.6)'}), '(streaming=True, verbose=True, temperature=0.6)\n', (1208, 1255), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2592, 2658), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['human_input']", 'template': 'template'}), "(input_variables=['human_input'], template=template)\n", (2606, 2658), False, 'from langchain import PromptTemplate, LLMChain\n'), ((2720, 2785), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'chat_prompt', 'callbacks': '[]', 'verbose': '(True)'}), '(llm=llm, prompt=chat_prompt, callbacks=[], verbose=True)\n', (2728, 2785), False, 'from langchain import PromptTemplate, LLMChain\n'), ((3378, 3395), 'grpc.aio.server', 'grpc.aio.server', ([], {}), '()\n', (3393, 3395), False, 'import grpc\n'), ((3731, 3770), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (3750, 3770), False, 'import logging\n'), ((3111, 3160), 'callback.StreamingLLMCallbackHandler', 'StreamingLLMCallbackHandler', (['context', 'chatgpt_pb2'], {}), '(context, chatgpt_pb2)\n', (3138, 3160), False, 'from callback import StreamingLLMCallbackHandler\n'), ((4097, 4107), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4105, 4107), False, 'import sys\n'), ((3904, 3928), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3926, 3928), False, 'import asyncio\n')] |
import langchain_visualizer # isort:skip # noqa: F401
import asyncio
import vcr_langchain as vcr
from langchain import PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import OpenAI
# ========================== Start of langchain example code ==========================
# https://langchain.readthedocs.io/en/latest/modules/chains/getting_started.html
llm = OpenAI(temperature=0)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
chain = LLMChain(llm=llm, prompt=prompt)
# ================================== Execute example ==================================
@vcr.use_cassette()
async def llm_chain_demo():
return chain.run("colorful socks")
def test_llm_usage_succeeds():
"""Check that the chain can run normally"""
result = asyncio.get_event_loop().run_until_complete(llm_chain_demo())
assert result.strip() == "Socktastic!"
if __name__ == "__main__":
from langchain_visualizer import visualize
visualize(llm_chain_demo)
| [
"langchain_visualizer.visualize",
"langchain.llms.OpenAI",
"langchain.chains.LLMChain",
"langchain.PromptTemplate"
] | [((387, 408), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (393, 408), False, 'from langchain.llms import OpenAI\n'), ((418, 534), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['product']", 'template': '"""What is a good name for a company that makes {product}?"""'}), "(input_variables=['product'], template=\n 'What is a good name for a company that makes {product}?')\n", (432, 534), False, 'from langchain import PromptTemplate\n'), ((550, 582), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (558, 582), False, 'from langchain.chains import LLMChain\n'), ((676, 694), 'vcr_langchain.use_cassette', 'vcr.use_cassette', ([], {}), '()\n', (692, 694), True, 'import vcr_langchain as vcr\n'), ((1042, 1067), 'langchain_visualizer.visualize', 'visualize', (['llm_chain_demo'], {}), '(llm_chain_demo)\n', (1051, 1067), False, 'from langchain_visualizer import visualize\n'), ((856, 880), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (878, 880), False, 'import asyncio\n')] |
"""Test logic on base chain class."""
from typing import Any, Dict, List, Optional
import pytest
from langchain.callbacks.base import CallbackManager
from langchain.chains.base import Chain
from langchain.schema import BaseMemory
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
class FakeMemory(BaseMemory):
"""Fake memory class for testing purposes."""
@property
def memory_variables(self) -> List[str]:
"""Return baz variable."""
return ["baz"]
def load_memory_variables(
self, inputs: Optional[Dict[str, Any]] = None
) -> Dict[str, str]:
"""Return baz variable."""
return {"baz": "foo"}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Pass."""
pass
def clear(self) -> None:
"""Pass."""
pass
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: List[str] = ["foo"]
the_output_keys: List[str] = ["bar"]
@property
def input_keys(self) -> List[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> List[str]:
"""Output key of bar."""
return self.the_output_keys
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
if self.be_correct:
return {"bar": "baz"}
else:
return {"baz": "bar"}
def test_bad_inputs() -> None:
"""Test errors are raised if input keys are not found."""
chain = FakeChain()
with pytest.raises(ValueError):
chain({"foobar": "baz"})
def test_bad_outputs() -> None:
"""Test errors are raised if outputs keys are not found."""
chain = FakeChain(be_correct=False)
with pytest.raises(ValueError):
chain({"foo": "baz"})
def test_correct_call() -> None:
"""Test correct call of fake chain."""
chain = FakeChain()
output = chain({"foo": "bar"})
assert output == {"foo": "bar", "bar": "baz"}
def test_single_input_correct() -> None:
"""Test passing single input works."""
chain = FakeChain()
output = chain("bar")
assert output == {"foo": "bar", "bar": "baz"}
def test_single_input_error() -> None:
"""Test passing single input errors as expected."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain("bar")
def test_run_single_arg() -> None:
"""Test run method with single arg."""
chain = FakeChain()
output = chain.run("bar")
assert output == "baz"
def test_run_multiple_args_error() -> None:
"""Test run method with multiple args errors as expected."""
chain = FakeChain()
with pytest.raises(ValueError):
chain.run("bar", "foo")
def test_run_kwargs() -> None:
"""Test run method with kwargs."""
chain = FakeChain(the_input_keys=["foo", "bar"])
output = chain.run(foo="bar", bar="foo")
assert output == "baz"
def test_run_kwargs_error() -> None:
"""Test run method with kwargs errors as expected."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain.run(foo="bar", baz="foo")
def test_run_args_and_kwargs_error() -> None:
"""Test run method with args and kwargs."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain.run("bar", foo="bar")
def test_multiple_output_keys_error() -> None:
"""Test run with multiple output keys errors as expected."""
chain = FakeChain(the_output_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain.run("bar")
def test_run_arg_with_memory() -> None:
"""Test run method works when arg is passed."""
chain = FakeChain(the_input_keys=["foo", "baz"], memory=FakeMemory())
chain.run("bar")
def test_run_with_callback() -> None:
"""Test run method works when callback manager is passed."""
handler = FakeCallbackHandler()
chain = FakeChain(
callback_manager=CallbackManager(handlers=[handler]), verbose=True
)
output = chain.run("bar")
assert output == "baz"
assert handler.starts == 1
assert handler.ends == 1
assert handler.errors == 0
def test_run_with_callback_not_verbose() -> None:
"""Test run method works when callback manager is passed and not verbose."""
import langchain
langchain.verbose = False
handler = FakeCallbackHandler()
chain = FakeChain(callback_manager=CallbackManager(handlers=[handler]))
output = chain.run("bar")
assert output == "baz"
assert handler.starts == 0
assert handler.ends == 0
assert handler.errors == 0
| [
"langchain.callbacks.base.CallbackManager"
] | [((3986, 4007), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4005, 4007), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((4460, 4481), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4479, 4481), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((1597, 1622), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1610, 1622), False, 'import pytest\n'), ((1804, 1829), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1817, 1829), False, 'import pytest\n'), ((2393, 2418), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2406, 2418), False, 'import pytest\n'), ((2746, 2771), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2759, 2771), False, 'import pytest\n'), ((3161, 3186), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3174, 3186), False, 'import pytest\n'), ((3386, 3411), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3399, 3411), False, 'import pytest\n'), ((3626, 3651), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3639, 3651), False, 'import pytest\n'), ((4056, 4091), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4071, 4091), False, 'from langchain.callbacks.base import CallbackManager\n'), ((4521, 4556), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4536, 4556), False, 'from langchain.callbacks.base import CallbackManager\n')] |
"""Test caching for LLMs and ChatModels."""
from typing import Dict, Generator, List, Union
import pytest
from _pytest.fixtures import FixtureRequest
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
import langchain
from langchain.cache import (
InMemoryCache,
SQLAlchemyCache,
)
from langchain.chat_models import FakeListChatModel
from langchain.chat_models.base import BaseChatModel, dumps
from langchain.llms import FakeListLLM
from langchain.llms.base import BaseLLM
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
Generation,
HumanMessage,
)
def get_sqlite_cache() -> SQLAlchemyCache:
return SQLAlchemyCache(engine=create_engine("sqlite://"))
CACHE_OPTIONS = [
InMemoryCache,
get_sqlite_cache,
]
@pytest.fixture(autouse=True, params=CACHE_OPTIONS)
def set_cache_and_teardown(request: FixtureRequest) -> Generator[None, None, None]:
# Will be run before each test
cache_instance = request.param
langchain.llm_cache = cache_instance()
if langchain.llm_cache:
langchain.llm_cache.clear()
else:
raise ValueError("Cache not set. This should never happen.")
yield
# Will be run after each test
if langchain.llm_cache:
langchain.llm_cache.clear()
else:
raise ValueError("Cache not set. This should never happen.")
def test_llm_caching() -> None:
prompt = "How are you?"
response = "Test response"
cached_response = "Cached test response"
llm = FakeListLLM(responses=[response])
if langchain.llm_cache:
langchain.llm_cache.update(
prompt=prompt,
llm_string=create_llm_string(llm),
return_val=[Generation(text=cached_response)],
)
assert llm(prompt) == cached_response
else:
raise ValueError(
"The cache not set. This should never happen, as the pytest fixture "
"`set_cache_and_teardown` always sets the cache."
)
def test_old_sqlite_llm_caching() -> None:
if isinstance(langchain.llm_cache, SQLAlchemyCache):
prompt = "How are you?"
response = "Test response"
cached_response = "Cached test response"
llm = FakeListLLM(responses=[response])
items = [
langchain.llm_cache.cache_schema(
prompt=prompt,
llm=create_llm_string(llm),
response=cached_response,
idx=0,
)
]
with Session(langchain.llm_cache.engine) as session, session.begin():
for item in items:
session.merge(item)
assert llm(prompt) == cached_response
def test_chat_model_caching() -> None:
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
response = "Test response"
cached_response = "Cached test response"
cached_message = AIMessage(content=cached_response)
llm = FakeListChatModel(responses=[response])
if langchain.llm_cache:
langchain.llm_cache.update(
prompt=dumps(prompt),
llm_string=llm._get_llm_string(),
return_val=[ChatGeneration(message=cached_message)],
)
result = llm(prompt)
assert isinstance(result, AIMessage)
assert result.content == cached_response
else:
raise ValueError(
"The cache not set. This should never happen, as the pytest fixture "
"`set_cache_and_teardown` always sets the cache."
)
def test_chat_model_caching_params() -> None:
prompt: List[BaseMessage] = [HumanMessage(content="How are you?")]
response = "Test response"
cached_response = "Cached test response"
cached_message = AIMessage(content=cached_response)
llm = FakeListChatModel(responses=[response])
if langchain.llm_cache:
langchain.llm_cache.update(
prompt=dumps(prompt),
llm_string=llm._get_llm_string(functions=[]),
return_val=[ChatGeneration(message=cached_message)],
)
result = llm(prompt, functions=[])
assert isinstance(result, AIMessage)
assert result.content == cached_response
result_no_params = llm(prompt)
assert isinstance(result_no_params, AIMessage)
assert result_no_params.content == response
else:
raise ValueError(
"The cache not set. This should never happen, as the pytest fixture "
"`set_cache_and_teardown` always sets the cache."
)
def create_llm_string(llm: Union[BaseLLM, BaseChatModel]) -> str:
_dict: Dict = llm.dict()
_dict["stop"] = None
return str(sorted([(k, v) for k, v in _dict.items()]))
| [
"langchain.schema.Generation",
"langchain.schema.AIMessage",
"langchain.chat_models.base.dumps",
"langchain.schema.ChatGeneration",
"langchain.schema.HumanMessage",
"langchain.llm_cache.clear",
"langchain.chat_models.FakeListChatModel",
"langchain.llms.FakeListLLM"
] | [((796, 846), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)', 'params': 'CACHE_OPTIONS'}), '(autouse=True, params=CACHE_OPTIONS)\n', (810, 846), False, 'import pytest\n'), ((1524, 1557), 'langchain.llms.FakeListLLM', 'FakeListLLM', ([], {'responses': '[response]'}), '(responses=[response])\n', (1535, 1557), False, 'from langchain.llms import FakeListLLM\n'), ((2895, 2929), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'cached_response'}), '(content=cached_response)\n', (2904, 2929), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage\n'), ((2940, 2979), 'langchain.chat_models.FakeListChatModel', 'FakeListChatModel', ([], {'responses': '[response]'}), '(responses=[response])\n', (2957, 2979), False, 'from langchain.chat_models import FakeListChatModel\n'), ((3728, 3762), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'cached_response'}), '(content=cached_response)\n', (3737, 3762), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage\n'), ((3773, 3812), 'langchain.chat_models.FakeListChatModel', 'FakeListChatModel', ([], {'responses': '[response]'}), '(responses=[response])\n', (3790, 3812), False, 'from langchain.chat_models import FakeListChatModel\n'), ((1080, 1107), 'langchain.llm_cache.clear', 'langchain.llm_cache.clear', ([], {}), '()\n', (1105, 1107), False, 'import langchain\n'), ((1269, 1296), 'langchain.llm_cache.clear', 'langchain.llm_cache.clear', ([], {}), '()\n', (1294, 1296), False, 'import langchain\n'), ((2233, 2266), 'langchain.llms.FakeListLLM', 'FakeListLLM', ([], {'responses': '[response]'}), '(responses=[response])\n', (2244, 2266), False, 'from langchain.llms import FakeListLLM\n'), ((2760, 2796), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""How are you?"""'}), "(content='How are you?')\n", (2772, 2796), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage\n'), ((3593, 3629), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""How are you?"""'}), "(content='How are you?')\n", (3605, 3629), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage\n'), ((702, 728), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite://"""'], {}), "('sqlite://')\n", (715, 728), False, 'from sqlalchemy import create_engine\n'), ((2508, 2543), 'sqlalchemy.orm.Session', 'Session', (['langchain.llm_cache.engine'], {}), '(langchain.llm_cache.engine)\n', (2515, 2543), False, 'from sqlalchemy.orm import Session\n'), ((3063, 3076), 'langchain.chat_models.base.dumps', 'dumps', (['prompt'], {}), '(prompt)\n', (3068, 3076), False, 'from langchain.chat_models.base import BaseChatModel, dumps\n'), ((3896, 3909), 'langchain.chat_models.base.dumps', 'dumps', (['prompt'], {}), '(prompt)\n', (3901, 3909), False, 'from langchain.chat_models.base import BaseChatModel, dumps\n'), ((1720, 1752), 'langchain.schema.Generation', 'Generation', ([], {'text': 'cached_response'}), '(text=cached_response)\n', (1730, 1752), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage\n'), ((3148, 3186), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'cached_message'}), '(message=cached_message)\n', (3162, 3186), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage\n'), ((3993, 4031), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'cached_message'}), '(message=cached_message)\n', (4007, 4031), False, 'from langchain.schema import AIMessage, BaseMessage, ChatGeneration, Generation, HumanMessage\n')] |
import json
import pytest
from langchain.prompts import ChatPromptTemplate
from langchain.schema.exceptions import LangChainException
from langchain.schema.messages import HumanMessage
from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError
pytest_plugins = ("pytest_asyncio",)
def test_bedrock_caller_load_settings(mocker, mock_settings):
mocked_boto3_client = mocker.patch(
"llm_api.backends.bedrock.BedrockCaller.get_boto3_client"
)
mocked_bedrock_client = mocker.patch(
"llm_api.backends.bedrock.BedrockCaller.get_client"
)
caller = BedrockCaller(mock_settings)
expected_test_key = mock_settings.aws_secret_access_key.get_secret_value()
assert caller.settings.aws_secret_access_key.get_secret_value() == expected_test_key
mocked_boto3_client.assert_called_once()
mocked_bedrock_client.assert_called_once()
def test_generate_openai_prompt_success():
user_input = "What day is it today?"
prompt_output = BedrockCaller.generate_prompt()
assert isinstance(prompt_output, ChatPromptTemplate)
prompt_output = prompt_output.format_messages(text=user_input)
expected_prompt_elements = 5
assert len(prompt_output) == expected_prompt_elements
assert isinstance(prompt_output[-1], HumanMessage)
assert prompt_output[-1].content == user_input
@pytest.mark.asyncio
async def test_call_model_success(mocker, mock_settings):
caller = BedrockCaller(mock_settings)
expected_entities = ["William Shakespeare", "Globe Theatre"]
mocked_result = {
"entities": [
{
"uri": "William Shakespeare",
"description": "English playwright, poet, and actor",
"wikipedia_url": "https://en.wikipedia.org/wiki/William_Shakespeare",
},
{
"uri": "Globe Theatre",
"description": "Theatre in London associated with William Shakespeare",
"wikipedia_url": "https://en.wikipedia.org/wiki/Globe_Theatre",
},
],
"connections": [
{
"from": "William Shakespeare",
"to": "Globe Theatre",
"label": "performed plays at",
},
],
"user_search": "Who is Shakespeare?",
}
mocked_result = "Test json ```json" + json.dumps(mocked_result) + "```"
mocker.patch(
"langchain.schema.runnable.base.RunnableSequence.ainvoke",
return_value=mocked_result,
)
user_template = "{text}"
test_prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a test system"),
("system", "Provide a valid JSON response to the user."),
("user", user_template),
]
)
test_search = "Who is Shakespeare?"
response = await caller.call_model(test_prompt, test_search)
assert expected_entities == [entity["uri"] for entity in response["entities"]]
@pytest.mark.asyncio
async def test_call_model_failure_index_error(mocker, mock_settings):
caller = BedrockCaller(mock_settings)
mocked_result = {
"entities": [
{
"uri": "William Shakespeare",
"description": "English playwright, poet, and actor",
"wikipedia_url": "https://en.wikipedia.org/wiki/William_Shakespeare",
},
{
"uri": "Globe Theatre",
"description": "Theatre in London associated with William Shakespeare",
"wikipedia_url": "https://en.wikipedia.org/wiki/Globe_Theatre",
},
],
"connections": [
{
"from": "William Shakespeare",
"to": "Globe Theatre",
"label": "performed plays at",
},
],
"user_search": "Who is Shakespeare?",
}
mocked_result = "Test json" + json.dumps(mocked_result)
mocker.patch(
"langchain.schema.runnable.base.RunnableSequence.ainvoke",
return_value=mocked_result,
)
expected_error_message = "Unable to parse model output as expected."
user_template = "{text}"
test_prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a test system"),
("system", "Provide a valid JSON response to the user."),
("user", user_template),
]
)
test_search = "Who is Shakespeare?"
with pytest.raises(BedrockModelCallError) as exception:
await caller.call_model(test_prompt, test_search)
assert expected_error_message in str(exception.value)
@pytest.mark.asyncio
async def test_call_model_failure_json_decode_error(mocker, mock_settings):
caller = BedrockCaller(mock_settings)
mocked_result = {
"entities": [
{
"uri": "William Shakespeare",
"description": "English playwright, poet, and actor",
"wikipedia_url": "https://en.wikipedia.org/wiki/William_Shakespeare",
},
{
"uri": "Globe Theatre",
"description": "Theatre in London associated with William Shakespeare",
"wikipedia_url": "https://en.wikipedia.org/wiki/Globe_Theatre",
},
],
"connections": [
{
"from": "William Shakespeare",
"to": "Globe Theatre",
"label": "performed plays at",
},
],
"user_search": "Who is Shakespeare?",
}
mocked_result = "Test ```json," + json.dumps(mocked_result) + "```"
mocker.patch(
"langchain.schema.runnable.base.RunnableSequence.ainvoke",
return_value=mocked_result,
)
expected_error_message = "Error decoding model output."
user_template = "{text}"
test_prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a test system"),
("system", "Provide a valid JSON response to the user."),
("user", user_template),
]
)
test_search = "Who is Shakespeare?"
with pytest.raises(BedrockModelCallError) as exception:
await caller.call_model(test_prompt, test_search)
assert expected_error_message in str(exception.value)
@pytest.mark.asyncio
async def test_call_model_failure_api_error(mocker, mock_settings):
caller = BedrockCaller(mock_settings)
mocked_client_call = mocker.patch(
"langchain.schema.runnable.base.RunnableSequence.ainvoke"
)
expected_error_message = "Error calling model."
mocked_client_call.side_effect = ValueError()
user_template = "{text}"
test_prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a test system"),
("system", "Provide a valid JSON response to the user."),
("user", user_template),
]
)
test_search = "Who is Shakespeare?"
with pytest.raises(BedrockModelCallError) as exception:
await caller.call_model(test_prompt, test_search)
assert expected_error_message in str(exception.value)
@pytest.mark.asyncio
async def test_call_model_failure_langchain_error(mocker, mock_settings):
caller = BedrockCaller(mock_settings)
mocked_client_call = mocker.patch(
"langchain.schema.runnable.base.RunnableSequence.ainvoke"
)
expected_error_message = "Error sending prompt to LLM."
mocked_client_call.side_effect = LangChainException()
user_template = "{text}"
test_prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a test system"),
("system", "Provide a valid JSON response to the user."),
("user", user_template),
]
)
test_search = "Who is Shakespeare?"
with pytest.raises(BedrockModelCallError) as exception:
await caller.call_model(test_prompt, test_search)
assert expected_error_message in str(exception.value)
| [
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.schema.exceptions.LangChainException"
] | [((597, 625), 'llm_api.backends.bedrock.BedrockCaller', 'BedrockCaller', (['mock_settings'], {}), '(mock_settings)\n', (610, 625), False, 'from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError\n'), ((994, 1025), 'llm_api.backends.bedrock.BedrockCaller.generate_prompt', 'BedrockCaller.generate_prompt', ([], {}), '()\n', (1023, 1025), False, 'from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError\n'), ((1443, 1471), 'llm_api.backends.bedrock.BedrockCaller', 'BedrockCaller', (['mock_settings'], {}), '(mock_settings)\n', (1456, 1471), False, 'from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError\n'), ((2561, 2724), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('system', 'You are a test system'), ('system',\n 'Provide a valid JSON response to the user.'), ('user', user_template)]"], {}), "([('system', 'You are a test system'), (\n 'system', 'Provide a valid JSON response to the user.'), ('user',\n user_template)])\n", (2593, 2724), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((3073, 3101), 'llm_api.backends.bedrock.BedrockCaller', 'BedrockCaller', (['mock_settings'], {}), '(mock_settings)\n', (3086, 3101), False, 'from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError\n'), ((4183, 4346), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('system', 'You are a test system'), ('system',\n 'Provide a valid JSON response to the user.'), ('user', user_template)]"], {}), "([('system', 'You are a test system'), (\n 'system', 'Provide a valid JSON response to the user.'), ('user',\n user_template)])\n", (4215, 4346), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((4728, 4756), 'llm_api.backends.bedrock.BedrockCaller', 'BedrockCaller', (['mock_settings'], {}), '(mock_settings)\n', (4741, 4756), False, 'from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError\n'), ((5837, 6000), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('system', 'You are a test system'), ('system',\n 'Provide a valid JSON response to the user.'), ('user', user_template)]"], {}), "([('system', 'You are a test system'), (\n 'system', 'Provide a valid JSON response to the user.'), ('user',\n user_template)])\n", (5869, 6000), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((6374, 6402), 'llm_api.backends.bedrock.BedrockCaller', 'BedrockCaller', (['mock_settings'], {}), '(mock_settings)\n', (6387, 6402), False, 'from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError\n'), ((6666, 6829), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('system', 'You are a test system'), ('system',\n 'Provide a valid JSON response to the user.'), ('user', user_template)]"], {}), "([('system', 'You are a test system'), (\n 'system', 'Provide a valid JSON response to the user.'), ('user',\n user_template)])\n", (6698, 6829), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((7210, 7238), 'llm_api.backends.bedrock.BedrockCaller', 'BedrockCaller', (['mock_settings'], {}), '(mock_settings)\n', (7223, 7238), False, 'from llm_api.backends.bedrock import BedrockCaller, BedrockModelCallError\n'), ((7449, 7469), 'langchain.schema.exceptions.LangChainException', 'LangChainException', ([], {}), '()\n', (7467, 7469), False, 'from langchain.schema.exceptions import LangChainException\n'), ((7518, 7681), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (["[('system', 'You are a test system'), ('system',\n 'Provide a valid JSON response to the user.'), ('user', user_template)]"], {}), "([('system', 'You are a test system'), (\n 'system', 'Provide a valid JSON response to the user.'), ('user',\n user_template)])\n", (7550, 7681), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((3909, 3934), 'json.dumps', 'json.dumps', (['mocked_result'], {}), '(mocked_result)\n', (3919, 3934), False, 'import json\n'), ((4449, 4485), 'pytest.raises', 'pytest.raises', (['BedrockModelCallError'], {}), '(BedrockModelCallError)\n', (4462, 4485), False, 'import pytest\n'), ((6103, 6139), 'pytest.raises', 'pytest.raises', (['BedrockModelCallError'], {}), '(BedrockModelCallError)\n', (6116, 6139), False, 'import pytest\n'), ((6932, 6968), 'pytest.raises', 'pytest.raises', (['BedrockModelCallError'], {}), '(BedrockModelCallError)\n', (6945, 6968), False, 'import pytest\n'), ((7784, 7820), 'pytest.raises', 'pytest.raises', (['BedrockModelCallError'], {}), '(BedrockModelCallError)\n', (7797, 7820), False, 'import pytest\n'), ((2353, 2378), 'json.dumps', 'json.dumps', (['mocked_result'], {}), '(mocked_result)\n', (2363, 2378), False, 'import json\n'), ((5568, 5593), 'json.dumps', 'json.dumps', (['mocked_result'], {}), '(mocked_result)\n', (5578, 5593), False, 'import json\n')] |
"""Test Tracer classes."""
from __future__ import annotations
import json
from datetime import datetime
from typing import Tuple
from unittest.mock import patch
from uuid import UUID, uuid4
import pytest
from freezegun import freeze_time
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.schemas import Run, RunTypeEnum, TracerSession
from langchain.schema import LLMResult
_SESSION_ID = UUID("4fbf7c55-2727-4711-8964-d821ed4d4e2a")
_TENANT_ID = UUID("57a08cc4-73d2-4236-8378-549099d07fad")
@pytest.fixture
def lang_chain_tracer_v2(monkeypatch: pytest.MonkeyPatch) -> LangChainTracer:
monkeypatch.setenv("LANGCHAIN_TENANT_ID", "test-tenant-id")
monkeypatch.setenv("LANGCHAIN_ENDPOINT", "http://test-endpoint.com")
monkeypatch.setenv("LANGCHAIN_API_KEY", "foo")
tracer = LangChainTracer()
return tracer
# Mock a sample TracerSession object
@pytest.fixture
def sample_tracer_session_v2() -> TracerSession:
return TracerSession(id=_SESSION_ID, name="Sample session", tenant_id=_TENANT_ID)
@freeze_time("2023-01-01")
@pytest.fixture
def sample_runs() -> Tuple[Run, Run, Run]:
llm_run = Run(
id="57a08cc4-73d2-4236-8370-549099d07fad",
name="llm_run",
execution_order=1,
child_execution_order=1,
parent_run_id="57a08cc4-73d2-4236-8371-549099d07fad",
start_time=datetime.utcnow(),
end_time=datetime.utcnow(),
session_id=1,
inputs={"prompts": []},
outputs=LLMResult(generations=[[]]).dict(),
serialized={},
extra={},
run_type=RunTypeEnum.llm,
)
chain_run = Run(
id="57a08cc4-73d2-4236-8371-549099d07fad",
name="chain_run",
execution_order=1,
start_time=datetime.utcnow(),
end_time=datetime.utcnow(),
child_execution_order=1,
serialized={},
inputs={},
outputs={},
child_runs=[llm_run],
extra={},
run_type=RunTypeEnum.chain,
)
tool_run = Run(
id="57a08cc4-73d2-4236-8372-549099d07fad",
name="tool_run",
execution_order=1,
child_execution_order=1,
inputs={"input": "test"},
start_time=datetime.utcnow(),
end_time=datetime.utcnow(),
outputs=None,
serialized={},
child_runs=[],
extra={},
run_type=RunTypeEnum.tool,
)
return llm_run, chain_run, tool_run
def test_persist_run(
lang_chain_tracer_v2: LangChainTracer,
sample_tracer_session_v2: TracerSession,
sample_runs: Tuple[Run, Run, Run],
) -> None:
"""Test that persist_run method calls requests.post once per method call."""
with patch("langchain.callbacks.tracers.langchain.requests.post") as post, patch(
"langchain.callbacks.tracers.langchain.requests.get"
) as get:
post.return_value.raise_for_status.return_value = None
lang_chain_tracer_v2.session = sample_tracer_session_v2
for run in sample_runs:
lang_chain_tracer_v2.run_map[str(run.id)] = run
for run in sample_runs:
lang_chain_tracer_v2._end_trace(run)
assert post.call_count == 3
assert get.call_count == 0
def test_persist_run_with_example_id(
lang_chain_tracer_v2: LangChainTracer,
sample_tracer_session_v2: TracerSession,
sample_runs: Tuple[Run, Run, Run],
) -> None:
"""Test the example ID is assigned only to the parent run and not the children."""
example_id = uuid4()
llm_run, chain_run, tool_run = sample_runs
chain_run.child_runs = [tool_run]
tool_run.child_runs = [llm_run]
with patch("langchain.callbacks.tracers.langchain.requests.post") as post, patch(
"langchain.callbacks.tracers.langchain.requests.get"
) as get:
post.return_value.raise_for_status.return_value = None
lang_chain_tracer_v2.session = sample_tracer_session_v2
lang_chain_tracer_v2.example_id = example_id
lang_chain_tracer_v2._persist_run(chain_run)
assert post.call_count == 3
assert get.call_count == 0
posted_data = [
json.loads(call_args[1]["data"]) for call_args in post.call_args_list
]
assert posted_data[0]["id"] == str(chain_run.id)
assert posted_data[0]["reference_example_id"] == str(example_id)
assert posted_data[1]["id"] == str(tool_run.id)
assert not posted_data[1].get("reference_example_id")
assert posted_data[2]["id"] == str(llm_run.id)
assert not posted_data[2].get("reference_example_id")
| [
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.schema.LLMResult",
"langchain.callbacks.tracers.schemas.TracerSession"
] | [((441, 485), 'uuid.UUID', 'UUID', (['"""4fbf7c55-2727-4711-8964-d821ed4d4e2a"""'], {}), "('4fbf7c55-2727-4711-8964-d821ed4d4e2a')\n", (445, 485), False, 'from uuid import UUID, uuid4\n'), ((499, 543), 'uuid.UUID', 'UUID', (['"""57a08cc4-73d2-4236-8378-549099d07fad"""'], {}), "('57a08cc4-73d2-4236-8378-549099d07fad')\n", (503, 543), False, 'from uuid import UUID, uuid4\n'), ((1070, 1095), 'freezegun.freeze_time', 'freeze_time', (['"""2023-01-01"""'], {}), "('2023-01-01')\n", (1081, 1095), False, 'from freezegun import freeze_time\n'), ((841, 858), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {}), '()\n', (856, 858), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((992, 1066), 'langchain.callbacks.tracers.schemas.TracerSession', 'TracerSession', ([], {'id': '_SESSION_ID', 'name': '"""Sample session"""', 'tenant_id': '_TENANT_ID'}), "(id=_SESSION_ID, name='Sample session', tenant_id=_TENANT_ID)\n", (1005, 1066), False, 'from langchain.callbacks.tracers.schemas import Run, RunTypeEnum, TracerSession\n'), ((3506, 3513), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (3511, 3513), False, 'from uuid import UUID, uuid4\n'), ((2700, 2760), 'unittest.mock.patch', 'patch', (['"""langchain.callbacks.tracers.langchain.requests.post"""'], {}), "('langchain.callbacks.tracers.langchain.requests.post')\n", (2705, 2760), False, 'from unittest.mock import patch\n'), ((2770, 2829), 'unittest.mock.patch', 'patch', (['"""langchain.callbacks.tracers.langchain.requests.get"""'], {}), "('langchain.callbacks.tracers.langchain.requests.get')\n", (2775, 2829), False, 'from unittest.mock import patch\n'), ((3644, 3704), 'unittest.mock.patch', 'patch', (['"""langchain.callbacks.tracers.langchain.requests.post"""'], {}), "('langchain.callbacks.tracers.langchain.requests.post')\n", (3649, 3704), False, 'from unittest.mock import patch\n'), ((3714, 3773), 'unittest.mock.patch', 'patch', (['"""langchain.callbacks.tracers.langchain.requests.get"""'], {}), "('langchain.callbacks.tracers.langchain.requests.get')\n", (3719, 3773), False, 'from unittest.mock import patch\n'), ((1390, 1407), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1405, 1407), False, 'from datetime import datetime\n'), ((1426, 1443), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1441, 1443), False, 'from datetime import datetime\n'), ((1776, 1793), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1791, 1793), False, 'from datetime import datetime\n'), ((1812, 1829), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1827, 1829), False, 'from datetime import datetime\n'), ((2226, 2243), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2241, 2243), False, 'from datetime import datetime\n'), ((2262, 2279), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2277, 2279), False, 'from datetime import datetime\n'), ((4137, 4169), 'json.loads', 'json.loads', (["call_args[1]['data']"], {}), "(call_args[1]['data'])\n", (4147, 4169), False, 'import json\n'), ((1515, 1542), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': '[[]]'}), '(generations=[[]])\n', (1524, 1542), False, 'from langchain.schema import LLMResult\n')] |
import langchain_visualizer # isort:skip # noqa: F401
import asyncio
from typing import Any, Dict, List, Optional
import vcr_langchain as vcr
from langchain import PromptTemplate
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains import LLMChain
from langchain.chains.base import Chain
from langchain.llms import OpenAI
# ========================== Start of langchain example code ==========================
# https://langchain.readthedocs.io/en/latest/modules/chains/getting_started.html
class ConcatenateChain(Chain):
chain_1: LLMChain
chain_2: LLMChain
@property
def input_keys(self) -> List[str]:
# Union of the input keys of the two chains.
all_input_vars = set(self.chain_1.input_keys).union(
set(self.chain_2.input_keys)
)
return list(all_input_vars)
@property
def output_keys(self) -> List[str]:
return ["concat_output"]
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
output_1 = self.chain_1.run(inputs)
output_2 = self.chain_2.run(inputs)
return {"concat_output": output_1 + output_2}
llm = OpenAI()
prompt_1 = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
chain_1 = LLMChain(llm=llm, prompt=prompt_1)
prompt_2 = PromptTemplate(
input_variables=["product"],
template="What is a good slogan for a company that makes {product}?",
)
chain_2 = LLMChain(llm=llm, prompt=prompt_2)
concat_chain = ConcatenateChain(chain_1=chain_1, chain_2=chain_2)
chain = concat_chain
# ================================== Execute example ==================================
@vcr.use_cassette()
async def custom_chain_demo():
return chain.run("colorful socks")
def test_llm_usage_succeeds():
"""Check that the chain can run normally"""
result = asyncio.get_event_loop().run_until_complete(custom_chain_demo())
assert (
result.strip()
== 'Sock Spectacular.\n\n"Step Up Your Style with Colorful Socks!"'
)
if __name__ == "__main__":
from langchain_visualizer import visualize
visualize(custom_chain_demo)
| [
"langchain_visualizer.visualize",
"langchain.llms.OpenAI",
"langchain.chains.LLMChain",
"langchain.PromptTemplate"
] | [((1254, 1262), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (1260, 1262), False, 'from langchain.llms import OpenAI\n'), ((1275, 1391), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['product']", 'template': '"""What is a good name for a company that makes {product}?"""'}), "(input_variables=['product'], template=\n 'What is a good name for a company that makes {product}?')\n", (1289, 1391), False, 'from langchain import PromptTemplate\n'), ((1408, 1442), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_1'}), '(llm=llm, prompt=prompt_1)\n', (1416, 1442), False, 'from langchain.chains import LLMChain\n'), ((1455, 1573), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['product']", 'template': '"""What is a good slogan for a company that makes {product}?"""'}), "(input_variables=['product'], template=\n 'What is a good slogan for a company that makes {product}?')\n", (1469, 1573), False, 'from langchain import PromptTemplate\n'), ((1590, 1624), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_2'}), '(llm=llm, prompt=prompt_2)\n', (1598, 1624), False, 'from langchain.chains import LLMChain\n'), ((1806, 1824), 'vcr_langchain.use_cassette', 'vcr.use_cassette', ([], {}), '()\n', (1822, 1824), True, 'import vcr_langchain as vcr\n'), ((2253, 2281), 'langchain_visualizer.visualize', 'visualize', (['custom_chain_demo'], {}), '(custom_chain_demo)\n', (2262, 2281), False, 'from langchain_visualizer import visualize\n'), ((1989, 2013), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2011, 2013), False, 'import asyncio\n')] |
"""A tracer that runs evaluators over completed runs."""
from __future__ import annotations
import logging
from concurrent.futures import Future, ThreadPoolExecutor
from typing import Any, Dict, List, Optional, Sequence, Set, Union
from uuid import UUID
import langsmith
from langsmith.evaluation.evaluator import EvaluationResult
from langchain.callbacks import manager
from langchain.callbacks.tracers import langchain as langchain_tracer
from langchain.callbacks.tracers.base import BaseTracer
from langchain.callbacks.tracers.schemas import Run
logger = logging.getLogger(__name__)
class EvaluatorCallbackHandler(BaseTracer):
"""A tracer that runs a run evaluator whenever a run is persisted.
Parameters
----------
evaluators : Sequence[RunEvaluator]
The run evaluators to apply to all top level runs.
max_workers : int, optional
The maximum number of worker threads to use for running the evaluators.
If not specified, it will default to the number of evaluators.
client : LangSmith Client, optional
The LangSmith client instance to use for evaluating the runs.
If not specified, a new instance will be created.
example_id : Union[UUID, str], optional
The example ID to be associated with the runs.
project_name : str, optional
The LangSmith project name to be organize eval chain runs under.
Attributes
----------
example_id : Union[UUID, None]
The example ID associated with the runs.
client : Client
The LangSmith client instance used for evaluating the runs.
evaluators : Sequence[RunEvaluator]
The sequence of run evaluators to be executed.
executor : ThreadPoolExecutor
The thread pool executor used for running the evaluators.
futures : Set[Future]
The set of futures representing the running evaluators.
skip_unfinished : bool
Whether to skip runs that are not finished or raised
an error.
project_name : Optional[str]
The LangSmith project name to be organize eval chain runs under.
"""
name = "evaluator_callback_handler"
def __init__(
self,
evaluators: Sequence[langsmith.RunEvaluator],
max_workers: Optional[int] = None,
client: Optional[langsmith.Client] = None,
example_id: Optional[Union[UUID, str]] = None,
skip_unfinished: bool = True,
project_name: Optional[str] = "evaluators",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.example_id = (
UUID(example_id) if isinstance(example_id, str) else example_id
)
self.client = client or langchain_tracer.get_client()
self.evaluators = evaluators
self.max_workers = max_workers or len(evaluators)
self.futures: Set[Future] = set()
self.skip_unfinished = skip_unfinished
self.project_name = project_name
self.logged_eval_results: Dict[str, List[EvaluationResult]] = {}
def _evaluate_in_project(self, run: Run, evaluator: langsmith.RunEvaluator) -> None:
"""Evaluate the run in the project.
Parameters
----------
run : Run
The run to be evaluated.
evaluator : RunEvaluator
The evaluator to use for evaluating the run.
"""
try:
if self.project_name is None:
eval_result = self.client.evaluate_run(run, evaluator)
with manager.tracing_v2_enabled(
project_name=self.project_name, tags=["eval"], client=self.client
):
eval_result = self.client.evaluate_run(run, evaluator)
except Exception as e:
logger.error(
f"Error evaluating run {run.id} with "
f"{evaluator.__class__.__name__}: {e}",
exc_info=True,
)
raise e
example_id = str(run.reference_example_id)
self.logged_eval_results.setdefault(example_id, []).append(eval_result)
def _persist_run(self, run: Run) -> None:
"""Run the evaluator on the run.
Parameters
----------
run : Run
The run to be evaluated.
"""
if self.skip_unfinished and not run.outputs:
logger.debug(f"Skipping unfinished run {run.id}")
return
run_ = run.copy()
run_.reference_example_id = self.example_id
if self.max_workers > 0:
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
list(
executor.map(
self._evaluate_in_project,
[run_ for _ in range(len(self.evaluators))],
self.evaluators,
)
)
else:
for evaluator in self.evaluators:
self._evaluate_in_project(run_, evaluator)
| [
"langchain.callbacks.tracers.langchain.get_client",
"langchain.callbacks.manager.tracing_v2_enabled"
] | [((562, 589), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (579, 589), False, 'import logging\n'), ((2581, 2597), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (2585, 2597), False, 'from uuid import UUID\n'), ((2687, 2716), 'langchain.callbacks.tracers.langchain.get_client', 'langchain_tracer.get_client', ([], {}), '()\n', (2714, 2716), True, 'from langchain.callbacks.tracers import langchain as langchain_tracer\n'), ((3489, 3586), 'langchain.callbacks.manager.tracing_v2_enabled', 'manager.tracing_v2_enabled', ([], {'project_name': 'self.project_name', 'tags': "['eval']", 'client': 'self.client'}), "(project_name=self.project_name, tags=['eval'],\n client=self.client)\n", (3515, 3586), False, 'from langchain.callbacks import manager\n'), ((4506, 4554), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'self.max_workers'}), '(max_workers=self.max_workers)\n', (4524, 4554), False, 'from concurrent.futures import Future, ThreadPoolExecutor\n')] |
"""A tracer that runs evaluators over completed runs."""
from __future__ import annotations
import logging
from concurrent.futures import Future, ThreadPoolExecutor
from typing import Any, Dict, List, Optional, Sequence, Set, Union
from uuid import UUID
import langsmith
from langsmith.evaluation.evaluator import EvaluationResult
from langchain.callbacks import manager
from langchain.callbacks.tracers import langchain as langchain_tracer
from langchain.callbacks.tracers.base import BaseTracer
from langchain.callbacks.tracers.schemas import Run
logger = logging.getLogger(__name__)
class EvaluatorCallbackHandler(BaseTracer):
"""A tracer that runs a run evaluator whenever a run is persisted.
Parameters
----------
evaluators : Sequence[RunEvaluator]
The run evaluators to apply to all top level runs.
max_workers : int, optional
The maximum number of worker threads to use for running the evaluators.
If not specified, it will default to the number of evaluators.
client : LangSmith Client, optional
The LangSmith client instance to use for evaluating the runs.
If not specified, a new instance will be created.
example_id : Union[UUID, str], optional
The example ID to be associated with the runs.
project_name : str, optional
The LangSmith project name to be organize eval chain runs under.
Attributes
----------
example_id : Union[UUID, None]
The example ID associated with the runs.
client : Client
The LangSmith client instance used for evaluating the runs.
evaluators : Sequence[RunEvaluator]
The sequence of run evaluators to be executed.
executor : ThreadPoolExecutor
The thread pool executor used for running the evaluators.
futures : Set[Future]
The set of futures representing the running evaluators.
skip_unfinished : bool
Whether to skip runs that are not finished or raised
an error.
project_name : Optional[str]
The LangSmith project name to be organize eval chain runs under.
"""
name = "evaluator_callback_handler"
def __init__(
self,
evaluators: Sequence[langsmith.RunEvaluator],
max_workers: Optional[int] = None,
client: Optional[langsmith.Client] = None,
example_id: Optional[Union[UUID, str]] = None,
skip_unfinished: bool = True,
project_name: Optional[str] = "evaluators",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.example_id = (
UUID(example_id) if isinstance(example_id, str) else example_id
)
self.client = client or langchain_tracer.get_client()
self.evaluators = evaluators
self.max_workers = max_workers or len(evaluators)
self.futures: Set[Future] = set()
self.skip_unfinished = skip_unfinished
self.project_name = project_name
self.logged_eval_results: Dict[str, List[EvaluationResult]] = {}
def _evaluate_in_project(self, run: Run, evaluator: langsmith.RunEvaluator) -> None:
"""Evaluate the run in the project.
Parameters
----------
run : Run
The run to be evaluated.
evaluator : RunEvaluator
The evaluator to use for evaluating the run.
"""
try:
if self.project_name is None:
eval_result = self.client.evaluate_run(run, evaluator)
with manager.tracing_v2_enabled(
project_name=self.project_name, tags=["eval"], client=self.client
):
eval_result = self.client.evaluate_run(run, evaluator)
except Exception as e:
logger.error(
f"Error evaluating run {run.id} with "
f"{evaluator.__class__.__name__}: {e}",
exc_info=True,
)
raise e
example_id = str(run.reference_example_id)
self.logged_eval_results.setdefault(example_id, []).append(eval_result)
def _persist_run(self, run: Run) -> None:
"""Run the evaluator on the run.
Parameters
----------
run : Run
The run to be evaluated.
"""
if self.skip_unfinished and not run.outputs:
logger.debug(f"Skipping unfinished run {run.id}")
return
run_ = run.copy()
run_.reference_example_id = self.example_id
if self.max_workers > 0:
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
list(
executor.map(
self._evaluate_in_project,
[run_ for _ in range(len(self.evaluators))],
self.evaluators,
)
)
else:
for evaluator in self.evaluators:
self._evaluate_in_project(run_, evaluator)
| [
"langchain.callbacks.tracers.langchain.get_client",
"langchain.callbacks.manager.tracing_v2_enabled"
] | [((562, 589), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (579, 589), False, 'import logging\n'), ((2581, 2597), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (2585, 2597), False, 'from uuid import UUID\n'), ((2687, 2716), 'langchain.callbacks.tracers.langchain.get_client', 'langchain_tracer.get_client', ([], {}), '()\n', (2714, 2716), True, 'from langchain.callbacks.tracers import langchain as langchain_tracer\n'), ((3489, 3586), 'langchain.callbacks.manager.tracing_v2_enabled', 'manager.tracing_v2_enabled', ([], {'project_name': 'self.project_name', 'tags': "['eval']", 'client': 'self.client'}), "(project_name=self.project_name, tags=['eval'],\n client=self.client)\n", (3515, 3586), False, 'from langchain.callbacks import manager\n'), ((4506, 4554), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'self.max_workers'}), '(max_workers=self.max_workers)\n', (4524, 4554), False, 'from concurrent.futures import Future, ThreadPoolExecutor\n')] |
"""A tracer that runs evaluators over completed runs."""
from __future__ import annotations
import logging
from concurrent.futures import Future, ThreadPoolExecutor
from typing import Any, Dict, List, Optional, Sequence, Set, Union
from uuid import UUID
import langsmith
from langsmith.evaluation.evaluator import EvaluationResult
from langchain.callbacks import manager
from langchain.callbacks.tracers import langchain as langchain_tracer
from langchain.callbacks.tracers.base import BaseTracer
from langchain.callbacks.tracers.schemas import Run
logger = logging.getLogger(__name__)
class EvaluatorCallbackHandler(BaseTracer):
"""A tracer that runs a run evaluator whenever a run is persisted.
Parameters
----------
evaluators : Sequence[RunEvaluator]
The run evaluators to apply to all top level runs.
max_workers : int, optional
The maximum number of worker threads to use for running the evaluators.
If not specified, it will default to the number of evaluators.
client : LangSmith Client, optional
The LangSmith client instance to use for evaluating the runs.
If not specified, a new instance will be created.
example_id : Union[UUID, str], optional
The example ID to be associated with the runs.
project_name : str, optional
The LangSmith project name to be organize eval chain runs under.
Attributes
----------
example_id : Union[UUID, None]
The example ID associated with the runs.
client : Client
The LangSmith client instance used for evaluating the runs.
evaluators : Sequence[RunEvaluator]
The sequence of run evaluators to be executed.
executor : ThreadPoolExecutor
The thread pool executor used for running the evaluators.
futures : Set[Future]
The set of futures representing the running evaluators.
skip_unfinished : bool
Whether to skip runs that are not finished or raised
an error.
project_name : Optional[str]
The LangSmith project name to be organize eval chain runs under.
"""
name = "evaluator_callback_handler"
def __init__(
self,
evaluators: Sequence[langsmith.RunEvaluator],
max_workers: Optional[int] = None,
client: Optional[langsmith.Client] = None,
example_id: Optional[Union[UUID, str]] = None,
skip_unfinished: bool = True,
project_name: Optional[str] = "evaluators",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.example_id = (
UUID(example_id) if isinstance(example_id, str) else example_id
)
self.client = client or langchain_tracer.get_client()
self.evaluators = evaluators
self.max_workers = max_workers or len(evaluators)
self.futures: Set[Future] = set()
self.skip_unfinished = skip_unfinished
self.project_name = project_name
self.logged_eval_results: Dict[str, List[EvaluationResult]] = {}
def _evaluate_in_project(self, run: Run, evaluator: langsmith.RunEvaluator) -> None:
"""Evaluate the run in the project.
Parameters
----------
run : Run
The run to be evaluated.
evaluator : RunEvaluator
The evaluator to use for evaluating the run.
"""
try:
if self.project_name is None:
eval_result = self.client.evaluate_run(run, evaluator)
with manager.tracing_v2_enabled(
project_name=self.project_name, tags=["eval"], client=self.client
):
eval_result = self.client.evaluate_run(run, evaluator)
except Exception as e:
logger.error(
f"Error evaluating run {run.id} with "
f"{evaluator.__class__.__name__}: {e}",
exc_info=True,
)
raise e
example_id = str(run.reference_example_id)
self.logged_eval_results.setdefault(example_id, []).append(eval_result)
def _persist_run(self, run: Run) -> None:
"""Run the evaluator on the run.
Parameters
----------
run : Run
The run to be evaluated.
"""
if self.skip_unfinished and not run.outputs:
logger.debug(f"Skipping unfinished run {run.id}")
return
run_ = run.copy()
run_.reference_example_id = self.example_id
if self.max_workers > 0:
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
list(
executor.map(
self._evaluate_in_project,
[run_ for _ in range(len(self.evaluators))],
self.evaluators,
)
)
else:
for evaluator in self.evaluators:
self._evaluate_in_project(run_, evaluator)
| [
"langchain.callbacks.tracers.langchain.get_client",
"langchain.callbacks.manager.tracing_v2_enabled"
] | [((562, 589), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (579, 589), False, 'import logging\n'), ((2581, 2597), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (2585, 2597), False, 'from uuid import UUID\n'), ((2687, 2716), 'langchain.callbacks.tracers.langchain.get_client', 'langchain_tracer.get_client', ([], {}), '()\n', (2714, 2716), True, 'from langchain.callbacks.tracers import langchain as langchain_tracer\n'), ((3489, 3586), 'langchain.callbacks.manager.tracing_v2_enabled', 'manager.tracing_v2_enabled', ([], {'project_name': 'self.project_name', 'tags': "['eval']", 'client': 'self.client'}), "(project_name=self.project_name, tags=['eval'],\n client=self.client)\n", (3515, 3586), False, 'from langchain.callbacks import manager\n'), ((4506, 4554), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'self.max_workers'}), '(max_workers=self.max_workers)\n', (4524, 4554), False, 'from concurrent.futures import Future, ThreadPoolExecutor\n')] |
"""Push and pull to the LangChain Hub."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from langchain.load.dump import dumps
from langchain.load.load import loads
from langchain.utils import get_from_env
if TYPE_CHECKING:
from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client:
try:
from langchainhub import Client
except ImportError as e:
raise ImportError(
"Could not import langchainhub, please install with `pip install "
"langchainhub`."
) from e
api_url = api_url or get_from_env("api_url", "LANGCHAIN_HUB_API_URL")
api_key = api_key or get_from_env("api_key", "LANGCHAIN_HUB_API_KEY", default="")
api_key = api_key or get_from_env("api_key", "LANGCHAIN_API_KEY")
return Client(api_url, api_key=api_key)
def push(
repo_full_name: str,
object: Any,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = "latest",
) -> str:
"""
Pushes an object to the hub and returns the URL.
"""
client = _get_client(api_url=api_url, api_key=api_key)
manifest_json = dumps(object)
resp = client.push(
repo_full_name, manifest_json, parent_commit_hash=parent_commit_hash
)
commit_hash: str = resp["commit"]["commit_hash"]
return commit_hash
def pull(
owner_repo_commit: str,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pulls an object from the hub and returns it.
"""
client = _get_client(api_url=api_url, api_key=api_key)
resp: str = client.pull(owner_repo_commit)
return loads(resp)
| [
"langchainhub.Client",
"langchain.utils.get_from_env",
"langchain.load.load.loads",
"langchain.load.dump.dumps"
] | [((862, 894), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (868, 894), False, 'from langchainhub import Client\n'), ((1234, 1247), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1239, 1247), False, 'from langchain.load.dump import dumps\n'), ((1740, 1751), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (1745, 1751), False, 'from langchain.load.load import loads\n'), ((646, 694), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_url"""', '"""LANGCHAIN_HUB_API_URL"""'], {}), "('api_url', 'LANGCHAIN_HUB_API_URL')\n", (658, 694), False, 'from langchain.utils import get_from_env\n'), ((720, 780), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""LANGCHAIN_HUB_API_KEY"""'], {'default': '""""""'}), "('api_key', 'LANGCHAIN_HUB_API_KEY', default='')\n", (732, 780), False, 'from langchain.utils import get_from_env\n'), ((806, 850), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""LANGCHAIN_API_KEY"""'], {}), "('api_key', 'LANGCHAIN_API_KEY')\n", (818, 850), False, 'from langchain.utils import get_from_env\n')] |
"""Push and pull to the LangChain Hub."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from langchain.load.dump import dumps
from langchain.load.load import loads
from langchain.utils import get_from_env
if TYPE_CHECKING:
from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client:
try:
from langchainhub import Client
except ImportError as e:
raise ImportError(
"Could not import langchainhub, please install with `pip install "
"langchainhub`."
) from e
api_url = api_url or get_from_env("api_url", "LANGCHAIN_HUB_API_URL")
api_key = api_key or get_from_env("api_key", "LANGCHAIN_HUB_API_KEY", default="")
api_key = api_key or get_from_env("api_key", "LANGCHAIN_API_KEY")
return Client(api_url, api_key=api_key)
def push(
repo_full_name: str,
object: Any,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = "latest",
) -> str:
"""
Pushes an object to the hub and returns the URL.
"""
client = _get_client(api_url=api_url, api_key=api_key)
manifest_json = dumps(object)
resp = client.push(
repo_full_name, manifest_json, parent_commit_hash=parent_commit_hash
)
commit_hash: str = resp["commit"]["commit_hash"]
return commit_hash
def pull(
owner_repo_commit: str,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pulls an object from the hub and returns it.
"""
client = _get_client(api_url=api_url, api_key=api_key)
resp: str = client.pull(owner_repo_commit)
return loads(resp)
| [
"langchainhub.Client",
"langchain.utils.get_from_env",
"langchain.load.load.loads",
"langchain.load.dump.dumps"
] | [((862, 894), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (868, 894), False, 'from langchainhub import Client\n'), ((1234, 1247), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1239, 1247), False, 'from langchain.load.dump import dumps\n'), ((1740, 1751), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (1745, 1751), False, 'from langchain.load.load import loads\n'), ((646, 694), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_url"""', '"""LANGCHAIN_HUB_API_URL"""'], {}), "('api_url', 'LANGCHAIN_HUB_API_URL')\n", (658, 694), False, 'from langchain.utils import get_from_env\n'), ((720, 780), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""LANGCHAIN_HUB_API_KEY"""'], {'default': '""""""'}), "('api_key', 'LANGCHAIN_HUB_API_KEY', default='')\n", (732, 780), False, 'from langchain.utils import get_from_env\n'), ((806, 850), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""LANGCHAIN_API_KEY"""'], {}), "('api_key', 'LANGCHAIN_API_KEY')\n", (818, 850), False, 'from langchain.utils import get_from_env\n')] |
import os
import utils
import traceback
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chains import ConversationChain
from langchain.llms import OpenAI
import langchain
from langchain.cache import InMemoryCache
from langchain.llms import OpenAI
from langchain.chains.conversation.memory import ConversationSummaryBufferMemory,ConversationBufferMemory,ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from embeddings import EmbeddingsManager
from flask import Flask, send_from_directory
import json
import time
import threading
import secrets
import string
import hashlib
from flask import request
from langchain.cache import InMemoryCache,SQLiteCache
import re
import requests
from waitress import serve
from translator import Translator
import sys
from query.discoursequery import DiscourseQuery
from query.embeddingsquery import EmbeddingsQuery
from Summary import Summary
import uuid
from langchain.llms import NLPCloud
from langchain.llms import AI21
from langchain.llms import Cohere
from SmartCache import SmartCache
CONFIG=None
QUERIERS=[]
args=sys.argv
confiFile=args[1] if len(args)>1 else "config.json"
print("Use config file", confiFile)
with open(confiFile, "r") as f:
CONFIG=json.load(f)
EmbeddingsManager.init(CONFIG)
Summary.init(CONFIG)
QUERIERS=[
EmbeddingsQuery(CONFIG),
DiscourseQuery(
CONFIG,CONFIG["JME_HUB_URL"],
searchFilter=CONFIG["JME_HUB_SEARCH_FILTER"],
knowledgeCutoff=CONFIG["JME_HUB_KNOWLEDGE_CUTOFF"]
)
]
Translator.init(CONFIG)
def getAffineDocs(question,context,keywords,shortQuestion, wordSalad=None, unitFilter=None,
maxFragmentsToReturn=3, maxFragmentsToSelect=12,merge=False):
affineDocs=[]
for q in QUERIERS:
print("Get affine docs from",q,"using question",question,"with context",context,"and keywords",keywords)
t=time.time()
v=q.getAffineDocs(
question, context, keywords,shortQuestion, wordSalad, unitFilter,
maxFragmentsToReturn=maxFragmentsToReturn,
maxFragmentsToSelect=maxFragmentsToSelect,
merge=merge
)
print("Completed in",time.time()-t,"seconds.")
if v!=None:
affineDocs.extend(v)
return affineDocs
def rewriteError(error):
if error.startswith("Rate limit reached ") :
return "Rate limit."
def rewrite(question):
# replace app, applet, game, application with simple application
question=re.sub(r"\b(app|applet|game|application)\b", "simple application", question, flags=re.IGNORECASE)
return question
def createChain():
# Backward compatibility
model_name=CONFIG.get("OPENAI_MODEL","text-davinci-003")
llm_name="openai"
########
llmx=CONFIG.get("LLM_MODEL",None) # "openai:text-davinci-003" "cohere:xlarge"
if llmx!=None:
if ":" in llmx:
llm_name,model_name=llmx.split(":")
else:
llm_name,model_name=llmx.split(".")
template = ""
template_path="prompts/"+llm_name+"."+model_name+".txt"
if not os.path.exists(template_path):
template_path="prompts/openai.text-davinci-003.txt"
with open(template_path, "r") as f:
template=f.read()
prompt = PromptTemplate(
input_variables=[ "history", "question", "summaries"],
template=template
)
llm=None
history_length=700
if llm_name=="openai":
max_tokens=512
temperature=0.0
if model_name=="text-davinci-003":
max_tokens=512
elif model_name=="code-davinci-002":
max_tokens=1024
#history_length=1024
llm=OpenAI(
temperature=temperature,
model_name=model_name,
max_tokens=max_tokens,
)
elif llm_name=="cohere":
llm=Cohere(
model=model_name,
max_tokens=700
)
history_length=200
elif llm_name=="ai21":
llm=AI21(
temperature=0.7,
model=model_name,
)
elif llm_name=="nlpcloud":
llm=NLPCloud(
model_name=model_name,
)
else:
raise Exception("Unknown LLM "+llm_name)
print("Use model ",model_name,"from",llm_name)
memory=ConversationSummaryBufferMemory(llm=llm, max_token_limit=history_length,human_prefix="QUESTION",ai_prefix="ANSWER", memory_key="history", input_key="question")
chain = load_qa_with_sources_chain(
llm,
memory=memory,
prompt=prompt,
verbose=True,
)
return chain
def extractQuestionData(question,wordSalad):
shortQuestion=Summary.summarizeMarkdown(question,min_length=100,max_length=1024,withCodeBlocks=False)
context=Summary.summarizeText(wordSalad,min_length=20,max_length=32)
keywords=[]
keywords.extend(Summary.getKeywords(shortQuestion,2))
keywords.extend(Summary.getKeywords(Summary.summarizeText(wordSalad,min_length=10,max_length=20),3))
return [question,shortQuestion,context,keywords,wordSalad]
def queryChain(chain,question):
wordSalad=""
for h in chain.memory.buffer: wordSalad+=h+" "
wordSalad+=" "+question
[question,shortQuestion,context,keywords,wordSalad]=utils.enqueue(lambda :extractQuestionData(question,wordSalad))
affineDocs=utils.enqueue(lambda :getAffineDocs(question,context,keywords,shortQuestion,wordSalad))
print("Found ",len(affineDocs), " affine docs")
print("Q: ", shortQuestion)
output=chain({"input_documents": affineDocs, "question": shortQuestion}, return_only_outputs=True)
print("A :",output)
return output
sessions={}
langchain.llm_cache = SmartCache(CONFIG)#SQLiteCache(database_path=CONFIG["CACHE_PATH"]+"/langchain.db")
def clearSessions():
while True:
time.sleep(60*5)
for session in sessions:
if sessions[session]["timeout"] < time.time():
del sessions[session]
threading.Thread(target=clearSessions).start()
def createSessionSecret():
hex_chars = string.hexdigits
timeHash=hashlib.sha256(str(time.time()).encode("utf-8")).hexdigest()[:12]
return ''.join(secrets.choice(hex_chars) for i in range(64))+timeHash
app = Flask(__name__)
@app.route("/langs")
def langs():
return json.dumps(Translator.getLangs())
@app.route("/session",methods = ['POST'])
def session():
body=request.get_json()
lang=body["lang"] if "lang" in body else "en"
if lang=="auto":
lang="en"
if not "sessionSecret" in body or body["sessionSecret"].strip()=="":
sessionSecret=createSessionSecret()
else:
sessionSecret=body["sessionSecret"]
if sessionSecret not in sessions:
sessions[sessionSecret]={
"chain": createChain(),
"timeout": time.time()+60*30
}
else:
sessions[sessionSecret]["timeout"]=time.time()+60*30
welcomeText=""
welcomeText+=Translator.translate("en", lang,"Hi there! I'm an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics.")
welcomeText+="<br><br>"
welcomeText+="<footer><span class=\"material-symbols-outlined\">tips_and_updates</span><span>"+Translator.translate("en", lang,"This chat bot is intended to provide helpful information, but accuracy is not guaranteed.")+"</span></footer>"
return json.dumps( {
"sessionSecret": sessionSecret,
"helloText":Translator.translate("en",lang,"Who are you?"),
"welcomeText":welcomeText
})
@app.route("/query",methods = ['POST'])
def query():
try:
body=request.get_json()
question=rewrite(body["question"])
lang=body["lang"] if "lang" in body else "en"
if lang == "auto":
lang=Translator.detect(question)
if lang!="en":
question=Translator.translate(lang,"en",question)
if len(question)==0:
raise Exception("Question is empty")
sessionSecret=body["sessionSecret"]
if sessionSecret not in sessions:
return json.dumps({"error": "Session expired"})
chain=sessions[sessionSecret]["chain"]
output=queryChain(chain,question)
if lang!="en":
output["output_text"]=Translator.translate("en",lang,output["output_text"])
#print(chain.memory.buffer)
return json.dumps(output)
except Exception as e:
print(e)
print(traceback.format_exc())
errorStr=str(e)
errorStr=rewriteError(errorStr)
return json.dumps({"error": errorStr})
@app.route('/<path:filename>')
def serveFrontend(filename):
return send_from_directory('frontend/', filename)
@app.route('/')
def serveIndex():
return send_from_directory('frontend/', "index.html")
@app.route('/docs', methods=['POST'])
def docs():
body=request.get_json()
question=body["question"]
maxFragmentsToReturn=int(body.get("maxFragmentsToReturn",3))
maxFragmentsToSelect=int(body.get("maxFragmentsToReturn",6))
wordSalad=body.get("context","")+" "+question
[question,shortQuestion,context,keywords,wordSalad]=utils.enqueue(lambda : extractQuestionData(question,wordSalad))
affineDocs=utils.enqueue(lambda : getAffineDocs(
question,context,keywords,shortQuestion,wordSalad,
maxFragmentsToReturn=maxFragmentsToReturn,
maxFragmentsToSelect=maxFragmentsToSelect
))
plainDocs=[
{
"content":doc.page_content,
"metadata":doc.metadata
} for doc in affineDocs
]
return json.dumps(plainDocs)
serve(app, host="0.0.0.0", port=8080, connection_limit=1000)
| [
"langchain.chains.qa_with_sources.load_qa_with_sources_chain",
"langchain.llms.Cohere",
"langchain.llms.OpenAI",
"langchain.prompts.PromptTemplate",
"langchain.llms.AI21",
"langchain.llms.NLPCloud",
"langchain.chains.conversation.memory.ConversationSummaryBufferMemory"
] | [((5785, 5803), 'SmartCache.SmartCache', 'SmartCache', (['CONFIG'], {}), '(CONFIG)\n', (5795, 5803), False, 'from SmartCache import SmartCache\n'), ((6330, 6345), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (6335, 6345), False, 'from flask import Flask, send_from_directory\n'), ((9830, 9890), 'waitress.serve', 'serve', (['app'], {'host': '"""0.0.0.0"""', 'port': '(8080)', 'connection_limit': '(1000)'}), "(app, host='0.0.0.0', port=8080, connection_limit=1000)\n", (9835, 9890), False, 'from waitress import serve\n'), ((1263, 1275), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1272, 1275), False, 'import json\n'), ((1280, 1310), 'embeddings.EmbeddingsManager.init', 'EmbeddingsManager.init', (['CONFIG'], {}), '(CONFIG)\n', (1302, 1310), False, 'from embeddings import EmbeddingsManager\n'), ((1315, 1335), 'Summary.Summary.init', 'Summary.init', (['CONFIG'], {}), '(CONFIG)\n', (1327, 1335), False, 'from Summary import Summary\n'), ((1591, 1614), 'translator.Translator.init', 'Translator.init', (['CONFIG'], {}), '(CONFIG)\n', (1606, 1614), False, 'from translator import Translator\n'), ((2557, 2659), 're.sub', 're.sub', (['"""\\\\b(app|applet|game|application)\\\\b"""', '"""simple application"""', 'question'], {'flags': 're.IGNORECASE'}), "('\\\\b(app|applet|game|application)\\\\b', 'simple application',\n question, flags=re.IGNORECASE)\n", (2563, 2659), False, 'import re\n'), ((3341, 3432), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['history', 'question', 'summaries']", 'template': 'template'}), "(input_variables=['history', 'question', 'summaries'],\n template=template)\n", (3355, 3432), False, 'from langchain.prompts import PromptTemplate\n'), ((4371, 4540), 'langchain.chains.conversation.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'llm': 'llm', 'max_token_limit': 'history_length', 'human_prefix': '"""QUESTION"""', 'ai_prefix': '"""ANSWER"""', 'memory_key': '"""history"""', 'input_key': '"""question"""'}), "(llm=llm, max_token_limit=history_length,\n human_prefix='QUESTION', ai_prefix='ANSWER', memory_key='history',\n input_key='question')\n", (4402, 4540), False, 'from langchain.chains.conversation.memory import ConversationSummaryBufferMemory, ConversationBufferMemory, ConversationBufferWindowMemory\n'), ((4543, 4618), 'langchain.chains.qa_with_sources.load_qa_with_sources_chain', 'load_qa_with_sources_chain', (['llm'], {'memory': 'memory', 'prompt': 'prompt', 'verbose': '(True)'}), '(llm, memory=memory, prompt=prompt, verbose=True)\n', (4569, 4618), False, 'from langchain.chains.qa_with_sources import load_qa_with_sources_chain\n'), ((4748, 4842), 'Summary.Summary.summarizeMarkdown', 'Summary.summarizeMarkdown', (['question'], {'min_length': '(100)', 'max_length': '(1024)', 'withCodeBlocks': '(False)'}), '(question, min_length=100, max_length=1024,\n withCodeBlocks=False)\n', (4773, 4842), False, 'from Summary import Summary\n'), ((4849, 4911), 'Summary.Summary.summarizeText', 'Summary.summarizeText', (['wordSalad'], {'min_length': '(20)', 'max_length': '(32)'}), '(wordSalad, min_length=20, max_length=32)\n', (4870, 4911), False, 'from Summary import Summary\n'), ((6497, 6515), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (6513, 6515), False, 'from flask import request\n'), ((7046, 7280), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""Hi there! I\'m an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics."""'], {}), '(\'en\', lang,\n "Hi there! I\'m an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics."\n )\n', (7066, 7280), False, 'from translator import Translator\n'), ((8890, 8932), 'flask.send_from_directory', 'send_from_directory', (['"""frontend/"""', 'filename'], {}), "('frontend/', filename)\n", (8909, 8932), False, 'from flask import Flask, send_from_directory\n'), ((8979, 9025), 'flask.send_from_directory', 'send_from_directory', (['"""frontend/"""', '"""index.html"""'], {}), "('frontend/', 'index.html')\n", (8998, 9025), False, 'from flask import Flask, send_from_directory\n'), ((9086, 9104), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (9102, 9104), False, 'from flask import request\n'), ((9806, 9827), 'json.dumps', 'json.dumps', (['plainDocs'], {}), '(plainDocs)\n', (9816, 9827), False, 'import json\n'), ((1359, 1382), 'query.embeddingsquery.EmbeddingsQuery', 'EmbeddingsQuery', (['CONFIG'], {}), '(CONFIG)\n', (1374, 1382), False, 'from query.embeddingsquery import EmbeddingsQuery\n'), ((1392, 1545), 'query.discoursequery.DiscourseQuery', 'DiscourseQuery', (['CONFIG', "CONFIG['JME_HUB_URL']"], {'searchFilter': "CONFIG['JME_HUB_SEARCH_FILTER']", 'knowledgeCutoff': "CONFIG['JME_HUB_KNOWLEDGE_CUTOFF']"}), "(CONFIG, CONFIG['JME_HUB_URL'], searchFilter=CONFIG[\n 'JME_HUB_SEARCH_FILTER'], knowledgeCutoff=CONFIG[\n 'JME_HUB_KNOWLEDGE_CUTOFF'])\n", (1406, 1545), False, 'from query.discoursequery import DiscourseQuery\n'), ((1943, 1954), 'time.time', 'time.time', ([], {}), '()\n', (1952, 1954), False, 'import time\n'), ((3165, 3194), 'os.path.exists', 'os.path.exists', (['template_path'], {}), '(template_path)\n', (3179, 3194), False, 'import os\n'), ((3764, 3841), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': 'temperature', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, model_name=model_name, max_tokens=max_tokens)\n', (3770, 3841), False, 'from langchain.llms import OpenAI\n'), ((4946, 4983), 'Summary.Summary.getKeywords', 'Summary.getKeywords', (['shortQuestion', '(2)'], {}), '(shortQuestion, 2)\n', (4965, 4983), False, 'from Summary import Summary\n'), ((5914, 5932), 'time.sleep', 'time.sleep', (['(60 * 5)'], {}), '(60 * 5)\n', (5924, 5932), False, 'import time\n'), ((6061, 6099), 'threading.Thread', 'threading.Thread', ([], {'target': 'clearSessions'}), '(target=clearSessions)\n', (6077, 6099), False, 'import threading\n'), ((6407, 6428), 'translator.Translator.getLangs', 'Translator.getLangs', ([], {}), '()\n', (6426, 6428), False, 'from translator import Translator\n'), ((7801, 7819), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (7817, 7819), False, 'from flask import request\n'), ((8605, 8623), 'json.dumps', 'json.dumps', (['output'], {}), '(output)\n', (8615, 8623), False, 'import json\n'), ((3930, 3970), 'langchain.llms.Cohere', 'Cohere', ([], {'model': 'model_name', 'max_tokens': '(700)'}), '(model=model_name, max_tokens=700)\n', (3936, 3970), False, 'from langchain.llms import Cohere\n'), ((5024, 5086), 'Summary.Summary.summarizeText', 'Summary.summarizeText', (['wordSalad'], {'min_length': '(10)', 'max_length': '(20)'}), '(wordSalad, min_length=10, max_length=20)\n', (5045, 5086), False, 'from Summary import Summary\n'), ((6992, 7003), 'time.time', 'time.time', ([], {}), '()\n', (7001, 7003), False, 'import time\n'), ((7398, 7532), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""This chat bot is intended to provide helpful information, but accuracy is not guaranteed."""'], {}), "('en', lang,\n 'This chat bot is intended to provide helpful information, but accuracy is not guaranteed.'\n )\n", (7418, 7532), False, 'from translator import Translator\n'), ((7636, 7684), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""Who are you?"""'], {}), "('en', lang, 'Who are you?')\n", (7656, 7684), False, 'from translator import Translator\n'), ((7971, 7998), 'translator.Translator.detect', 'Translator.detect', (['question'], {}), '(question)\n', (7988, 7998), False, 'from translator import Translator\n'), ((8044, 8086), 'translator.Translator.translate', 'Translator.translate', (['lang', '"""en"""', 'question'], {}), "(lang, 'en', question)\n", (8064, 8086), False, 'from translator import Translator\n'), ((8290, 8330), 'json.dumps', 'json.dumps', (["{'error': 'Session expired'}"], {}), "({'error': 'Session expired'})\n", (8300, 8330), False, 'import json\n'), ((8499, 8554), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', "output['output_text']"], {}), "('en', lang, output['output_text'])\n", (8519, 8554), False, 'from translator import Translator\n'), ((8785, 8816), 'json.dumps', 'json.dumps', (["{'error': errorStr}"], {}), "({'error': errorStr})\n", (8795, 8816), False, 'import json\n'), ((2241, 2252), 'time.time', 'time.time', ([], {}), '()\n', (2250, 2252), False, 'import time\n'), ((4072, 4111), 'langchain.llms.AI21', 'AI21', ([], {'temperature': '(0.7)', 'model': 'model_name'}), '(temperature=0.7, model=model_name)\n', (4076, 4111), False, 'from langchain.llms import AI21\n'), ((6010, 6021), 'time.time', 'time.time', ([], {}), '()\n', (6019, 6021), False, 'import time\n'), ((6267, 6292), 'secrets.choice', 'secrets.choice', (['hex_chars'], {}), '(hex_chars)\n', (6281, 6292), False, 'import secrets\n'), ((6911, 6922), 'time.time', 'time.time', ([], {}), '()\n', (6920, 6922), False, 'import time\n'), ((8682, 8704), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8702, 8704), False, 'import traceback\n'), ((4193, 4224), 'langchain.llms.NLPCloud', 'NLPCloud', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (4201, 4224), False, 'from langchain.llms import NLPCloud\n'), ((6201, 6212), 'time.time', 'time.time', ([], {}), '()\n', (6210, 6212), False, 'import time\n')] |
import csv
from ctypes import Array
from typing import Any, Coroutine, List, Tuple
import io
import time
import re
import os
from fastapi import UploadFile
import asyncio
import langchain
from langchain.chat_models import ChatOpenAI
from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent
from langchain.tools import HumanInputRun, PythonAstREPLTool
from langchain.callbacks.tracers import ConsoleCallbackHandler
from langchain.callbacks import HumanApprovalCallbackHandler
from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory
from langchain import PromptTemplate
import pandas as pd
from langchain.output_parsers import PydanticOutputParser, OutputFixingParser
from util.tools import SessionHumanInputRun
import util.config as config
from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue
import redis
r = redis.from_url(os.environ.get("REDIS_URL"))
#r = redis.from_url('redis://:password@localhost:6379')
class Processor:
def __init__(self, session):
self.session = session
async def extract_csv_description(self, df: UploadFile|str, llm, memory) -> Coroutine[Any, Any, Tuple[pd.DataFrame, str]] :
df = pd.read_csv(df)
agent = create_pandas_dataframe_agent(llm=llm,df=df, agent_executor_kwargs={'handle_parsing_errors':True, 'memory':memory},
early_stopping_method="generate", verbose=True,
temperature=0,agent_type=AgentType.OPENAI_FUNCTIONS,)
descriptions = agent.run("""Describe what is the column name of each of the column table in detail in the following format:
<name of column 1>: <description of column 1>\n
<name of column 2>: <description of column 2>""", callbacks=[ConsoleCallbackHandler()])
return df, descriptions
async def _human_prompt(prompt, session):
r.publish(f'human_prompt_{session}', prompt)
async def _human_input(session):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe(f'human_input_{session}')
message = None
while True:
message = p.get_message()
if message and message['type']=='message':
break
print("waiting for human input")
await asyncio.sleep(1)
return message['data'].decode('utf-8')
async def process_files(self, table_file, template_file, file_guid):
table_string = table_file.decode('utf-8')
template_string = template_file.decode('utf-8')
llm = ChatOpenAI(openai_api_key=config.OPENAI_API_KEY, temperature=0, model="gpt-3.5-turbo-0613", )
memory = ConversationSummaryBufferMemory(llm=llm,memory_key="chat_history", return_messages=True, max_token_limit=1500)
table_df, table_descriptions = await self.extract_csv_description(io.StringIO(table_string), llm, memory=memory)
r.publish(f'{self.session}_response', 'table_descriptions')
r.publish(f'{self.session}_response', table_descriptions)
template_df, template_descriptions = await self.extract_csv_description(io.StringIO(template_string), llm, memory=memory)
r.publish(f'{self.session}_response', 'template_descriptions')
r.publish(f'{self.session}_response', template_descriptions)
dfs =[table_df, template_df]
human_tool = SessionHumanInputRun(session=self.session)
human_tool.description = '''
Use this tool to take human input.
If the mapping is ambiguous, ask 'human' a question with options in the following format.
Make the human confirm the mapping by selecting the appropriate number.
- Question: The template column <template column name> should be mapped to which one of the table columns
(1: <table column name 1>, 2: <table column name 2> (Recommended), 3:<table column name 3>, ...)? Select the appropriate number or specify the column name.
'''
human_tool.prompt_func= Processor._human_prompt
human_tool.input_func = Processor._human_input
mappings = await self.get_mappings(llm, table_descriptions, template_descriptions, human_tool)
codes = await self.get_template_formatting_code(llm, table_df, template_df, human_tool, mappings, memory)
new_table_df = table_df.loc[:,[code.table_column for code in codes]]
for code in codes:
new_table_df[code.table_column].apply(lambda x: self.format_value(x,code=code.code))
r.set(f"{self.session}_{file_guid}", new_table_df.to_msgpack(compress='zlib'))
r.publish(f'{self.session}_response', f'file_guid:{file_guid}')
def format_value(self, source_value, code):
value = TransformValue(source=source_value,destination=source_value)
try:
exec(code, {'value':value})
except Exception as e:
r.publish(f'{self.session}_response',f'ERROR: \nCode: \n {code} \n Failed with error: \n{e}')
print(e)
return value.destination
async def get_mappings(self,llm, table_descriptions, template_descriptions, human_tool):
parser = PydanticOutputParser(pydantic_object=TemplateMappingList)
new_parser = OutputFixingParser.from_llm(parser=parser,llm=llm)
agent = initialize_agent(
[human_tool],
llm,
agent=AgentType.OPENAI_FUNCTIONS,
handle_parsing_errors=True,
early_stopping_method="force",
temperature=0.3,
output_parser=new_parser,
)
descriptions = await agent.arun("""Map all the columns of the Template descriptions to columns of the table Descriptions:
- Table Descriptions:
""" + table_descriptions + """
- Template Descriptions:
""" + template_descriptions + """
Use the table and template descriptions above to determine the mapping based on similarity, formats and distribution.
If the table column names are ambiguous take human input.
""",callbacks=[ConsoleCallbackHandler()],)
print(descriptions)
mappings = new_parser.parse(descriptions)
return mappings
async def get_template_formatting_code(self, llm, table_df, template_df, human_tool, mappings: TemplateMappingList, memory):
dfs = []
dfs.append(table_df)
dfs.append(template_df)
df_locals = {}
df_locals[f"table_df"] = table_df
df_locals[f"template_df"] = template_df
parser = PydanticOutputParser(pydantic_object=TemplateMappingCode)
new_parser = OutputFixingParser.from_llm(parser=parser,llm=llm)
codes=[]
#The code should be in the format of a Python function taking as input a string and returning a string.
for mapping in mappings.template_mappings:
human_tool.description = f'''
Use this tool to get human approval. Always show the samples and code. The human can edit the code and approve it.
'''
table_df_samples = table_df[mapping.table_column].sample(5).to_list()
template_df_samples = template_df[mapping.template_column].sample(5).to_list()
agent = initialize_agent(
[PythonAstREPLTool(locals=df_locals)],
llm,
agent=AgentType.OPENAI_FUNCTIONS,
handle_parsing_errors=True,
early_stopping_method="force",
temperature=0.3,
output_parser=new_parser,
memory = memory,
memory_key = 'chat_history'
)
#The AI can determine the format of the column values only after sampling.
#As shown in the output below, generate the code as a Python function taking as input a string and returning a string and also include a call to the generated function.
code = agent.run(f'''Provide the code to bring the format of values in table_df column '{mapping.table_column}'
to the format of values in template_df column '{mapping.template_column}' based off the values, data types and formats.
Additional samples to be used to generate the code:
'{mapping.table_column}' sample values: [{table_df_samples}]
'{mapping.template_column}' samples values: [{template_df_samples}]
The input to the code will be a value object with the following attributes:
- source: The value of the table_df column '{mapping.table_column}'.
- destination: The value of the template_df column '{mapping.template_column}'.
Show the sample values using which the code is generated.
For example, for date columns, they may be in different formats, and it is necessary to change the format from dd.mm.yyyy to mm.dd.yyyy.
Final Answer:
```
```python
def format_value(source_value):
<code to transform source_value into destination_value>
return destination_value
value.destination = format_value(value.source)
```
```
Final Answer should contain the samples and code.
''', callbacks=[ConsoleCallbackHandler(), ])
print(code)
human_code = await human_tool.arun(code + '\nSpecify the code with ```python``` tags.')
regex = r"```python((.|\n|\t)*?)```"
code = human_code if re.match(regex, human_code) else code
matches = re.findall(regex, code)
code = ''
for match in matches:
code = code + '\n'+ '\n'.join(match)
codes.append(TemplateMappingCode(template_column=mapping.template_column,
table_column=mapping.table_column,
code=code))
return codes | [
"langchain.agents.initialize_agent",
"langchain.chat_models.ChatOpenAI",
"langchain.tools.PythonAstREPLTool",
"langchain.memory.ConversationSummaryBufferMemory",
"langchain.callbacks.tracers.ConsoleCallbackHandler",
"langchain.agents.create_pandas_dataframe_agent",
"langchain.output_parsers.OutputFixingParser.from_llm",
"langchain.output_parsers.PydanticOutputParser"
] | [((963, 990), 'os.environ.get', 'os.environ.get', (['"""REDIS_URL"""'], {}), "('REDIS_URL')\n", (977, 990), False, 'import os\n'), ((1270, 1285), 'pandas.read_csv', 'pd.read_csv', (['df'], {}), '(df)\n', (1281, 1285), True, 'import pandas as pd\n'), ((1302, 1537), 'langchain.agents.create_pandas_dataframe_agent', 'create_pandas_dataframe_agent', ([], {'llm': 'llm', 'df': 'df', 'agent_executor_kwargs': "{'handle_parsing_errors': True, 'memory': memory}", 'early_stopping_method': '"""generate"""', 'verbose': '(True)', 'temperature': '(0)', 'agent_type': 'AgentType.OPENAI_FUNCTIONS'}), "(llm=llm, df=df, agent_executor_kwargs={\n 'handle_parsing_errors': True, 'memory': memory}, early_stopping_method\n ='generate', verbose=True, temperature=0, agent_type=AgentType.\n OPENAI_FUNCTIONS)\n", (1331, 1537), False, 'from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent\n'), ((2683, 2779), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'config.OPENAI_API_KEY', 'temperature': '(0)', 'model': '"""gpt-3.5-turbo-0613"""'}), "(openai_api_key=config.OPENAI_API_KEY, temperature=0, model=\n 'gpt-3.5-turbo-0613')\n", (2693, 2779), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2794, 2909), 'langchain.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'llm': 'llm', 'memory_key': '"""chat_history"""', 'return_messages': '(True)', 'max_token_limit': '(1500)'}), "(llm=llm, memory_key='chat_history',\n return_messages=True, max_token_limit=1500)\n", (2825, 2909), False, 'from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory\n'), ((3498, 3540), 'util.tools.SessionHumanInputRun', 'SessionHumanInputRun', ([], {'session': 'self.session'}), '(session=self.session)\n', (3518, 3540), False, 'from util.tools import SessionHumanInputRun\n'), ((4868, 4929), 'util.model.TransformValue', 'TransformValue', ([], {'source': 'source_value', 'destination': 'source_value'}), '(source=source_value, destination=source_value)\n', (4882, 4929), False, 'from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue\n'), ((5288, 5345), 'langchain.output_parsers.PydanticOutputParser', 'PydanticOutputParser', ([], {'pydantic_object': 'TemplateMappingList'}), '(pydantic_object=TemplateMappingList)\n', (5308, 5345), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((5367, 5418), 'langchain.output_parsers.OutputFixingParser.from_llm', 'OutputFixingParser.from_llm', ([], {'parser': 'parser', 'llm': 'llm'}), '(parser=parser, llm=llm)\n', (5394, 5418), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((5434, 5614), 'langchain.agents.initialize_agent', 'initialize_agent', (['[human_tool]', 'llm'], {'agent': 'AgentType.OPENAI_FUNCTIONS', 'handle_parsing_errors': '(True)', 'early_stopping_method': '"""force"""', 'temperature': '(0.3)', 'output_parser': 'new_parser'}), "([human_tool], llm, agent=AgentType.OPENAI_FUNCTIONS,\n handle_parsing_errors=True, early_stopping_method='force', temperature=\n 0.3, output_parser=new_parser)\n", (5450, 5614), False, 'from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent\n'), ((6899, 6956), 'langchain.output_parsers.PydanticOutputParser', 'PydanticOutputParser', ([], {'pydantic_object': 'TemplateMappingCode'}), '(pydantic_object=TemplateMappingCode)\n', (6919, 6956), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((6978, 7029), 'langchain.output_parsers.OutputFixingParser.from_llm', 'OutputFixingParser.from_llm', ([], {'parser': 'parser', 'llm': 'llm'}), '(parser=parser, llm=llm)\n', (7005, 7029), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((10532, 10555), 're.findall', 're.findall', (['regex', 'code'], {}), '(regex, code)\n', (10542, 10555), False, 'import re\n'), ((2425, 2441), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (2438, 2441), False, 'import asyncio\n'), ((2988, 3013), 'io.StringIO', 'io.StringIO', (['table_string'], {}), '(table_string)\n', (2999, 3013), False, 'import io\n'), ((3250, 3278), 'io.StringIO', 'io.StringIO', (['template_string'], {}), '(template_string)\n', (3261, 3278), False, 'import io\n'), ((10459, 10486), 're.match', 're.match', (['regex', 'human_code'], {}), '(regex, human_code)\n', (10467, 10486), False, 'import re\n'), ((10690, 10801), 'util.model.TemplateMappingCode', 'TemplateMappingCode', ([], {'template_column': 'mapping.template_column', 'table_column': 'mapping.table_column', 'code': 'code'}), '(template_column=mapping.template_column, table_column=\n mapping.table_column, code=code)\n', (10709, 10801), False, 'from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue\n'), ((1905, 1929), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (1927, 1929), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n'), ((7662, 7697), 'langchain.tools.PythonAstREPLTool', 'PythonAstREPLTool', ([], {'locals': 'df_locals'}), '(locals=df_locals)\n', (7679, 7697), False, 'from langchain.tools import HumanInputRun, PythonAstREPLTool\n'), ((6408, 6432), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (6430, 6432), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n'), ((10224, 10248), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (10246, 10248), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n')] |
import csv
from ctypes import Array
from typing import Any, Coroutine, List, Tuple
import io
import time
import re
import os
from fastapi import UploadFile
import asyncio
import langchain
from langchain.chat_models import ChatOpenAI
from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent
from langchain.tools import HumanInputRun, PythonAstREPLTool
from langchain.callbacks.tracers import ConsoleCallbackHandler
from langchain.callbacks import HumanApprovalCallbackHandler
from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory
from langchain import PromptTemplate
import pandas as pd
from langchain.output_parsers import PydanticOutputParser, OutputFixingParser
from util.tools import SessionHumanInputRun
import util.config as config
from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue
import redis
r = redis.from_url(os.environ.get("REDIS_URL"))
#r = redis.from_url('redis://:password@localhost:6379')
class Processor:
def __init__(self, session):
self.session = session
async def extract_csv_description(self, df: UploadFile|str, llm, memory) -> Coroutine[Any, Any, Tuple[pd.DataFrame, str]] :
df = pd.read_csv(df)
agent = create_pandas_dataframe_agent(llm=llm,df=df, agent_executor_kwargs={'handle_parsing_errors':True, 'memory':memory},
early_stopping_method="generate", verbose=True,
temperature=0,agent_type=AgentType.OPENAI_FUNCTIONS,)
descriptions = agent.run("""Describe what is the column name of each of the column table in detail in the following format:
<name of column 1>: <description of column 1>\n
<name of column 2>: <description of column 2>""", callbacks=[ConsoleCallbackHandler()])
return df, descriptions
async def _human_prompt(prompt, session):
r.publish(f'human_prompt_{session}', prompt)
async def _human_input(session):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe(f'human_input_{session}')
message = None
while True:
message = p.get_message()
if message and message['type']=='message':
break
print("waiting for human input")
await asyncio.sleep(1)
return message['data'].decode('utf-8')
async def process_files(self, table_file, template_file, file_guid):
table_string = table_file.decode('utf-8')
template_string = template_file.decode('utf-8')
llm = ChatOpenAI(openai_api_key=config.OPENAI_API_KEY, temperature=0, model="gpt-3.5-turbo-0613", )
memory = ConversationSummaryBufferMemory(llm=llm,memory_key="chat_history", return_messages=True, max_token_limit=1500)
table_df, table_descriptions = await self.extract_csv_description(io.StringIO(table_string), llm, memory=memory)
r.publish(f'{self.session}_response', 'table_descriptions')
r.publish(f'{self.session}_response', table_descriptions)
template_df, template_descriptions = await self.extract_csv_description(io.StringIO(template_string), llm, memory=memory)
r.publish(f'{self.session}_response', 'template_descriptions')
r.publish(f'{self.session}_response', template_descriptions)
dfs =[table_df, template_df]
human_tool = SessionHumanInputRun(session=self.session)
human_tool.description = '''
Use this tool to take human input.
If the mapping is ambiguous, ask 'human' a question with options in the following format.
Make the human confirm the mapping by selecting the appropriate number.
- Question: The template column <template column name> should be mapped to which one of the table columns
(1: <table column name 1>, 2: <table column name 2> (Recommended), 3:<table column name 3>, ...)? Select the appropriate number or specify the column name.
'''
human_tool.prompt_func= Processor._human_prompt
human_tool.input_func = Processor._human_input
mappings = await self.get_mappings(llm, table_descriptions, template_descriptions, human_tool)
codes = await self.get_template_formatting_code(llm, table_df, template_df, human_tool, mappings, memory)
new_table_df = table_df.loc[:,[code.table_column for code in codes]]
for code in codes:
new_table_df[code.table_column].apply(lambda x: self.format_value(x,code=code.code))
r.set(f"{self.session}_{file_guid}", new_table_df.to_msgpack(compress='zlib'))
r.publish(f'{self.session}_response', f'file_guid:{file_guid}')
def format_value(self, source_value, code):
value = TransformValue(source=source_value,destination=source_value)
try:
exec(code, {'value':value})
except Exception as e:
r.publish(f'{self.session}_response',f'ERROR: \nCode: \n {code} \n Failed with error: \n{e}')
print(e)
return value.destination
async def get_mappings(self,llm, table_descriptions, template_descriptions, human_tool):
parser = PydanticOutputParser(pydantic_object=TemplateMappingList)
new_parser = OutputFixingParser.from_llm(parser=parser,llm=llm)
agent = initialize_agent(
[human_tool],
llm,
agent=AgentType.OPENAI_FUNCTIONS,
handle_parsing_errors=True,
early_stopping_method="force",
temperature=0.3,
output_parser=new_parser,
)
descriptions = await agent.arun("""Map all the columns of the Template descriptions to columns of the table Descriptions:
- Table Descriptions:
""" + table_descriptions + """
- Template Descriptions:
""" + template_descriptions + """
Use the table and template descriptions above to determine the mapping based on similarity, formats and distribution.
If the table column names are ambiguous take human input.
""",callbacks=[ConsoleCallbackHandler()],)
print(descriptions)
mappings = new_parser.parse(descriptions)
return mappings
async def get_template_formatting_code(self, llm, table_df, template_df, human_tool, mappings: TemplateMappingList, memory):
dfs = []
dfs.append(table_df)
dfs.append(template_df)
df_locals = {}
df_locals[f"table_df"] = table_df
df_locals[f"template_df"] = template_df
parser = PydanticOutputParser(pydantic_object=TemplateMappingCode)
new_parser = OutputFixingParser.from_llm(parser=parser,llm=llm)
codes=[]
#The code should be in the format of a Python function taking as input a string and returning a string.
for mapping in mappings.template_mappings:
human_tool.description = f'''
Use this tool to get human approval. Always show the samples and code. The human can edit the code and approve it.
'''
table_df_samples = table_df[mapping.table_column].sample(5).to_list()
template_df_samples = template_df[mapping.template_column].sample(5).to_list()
agent = initialize_agent(
[PythonAstREPLTool(locals=df_locals)],
llm,
agent=AgentType.OPENAI_FUNCTIONS,
handle_parsing_errors=True,
early_stopping_method="force",
temperature=0.3,
output_parser=new_parser,
memory = memory,
memory_key = 'chat_history'
)
#The AI can determine the format of the column values only after sampling.
#As shown in the output below, generate the code as a Python function taking as input a string and returning a string and also include a call to the generated function.
code = agent.run(f'''Provide the code to bring the format of values in table_df column '{mapping.table_column}'
to the format of values in template_df column '{mapping.template_column}' based off the values, data types and formats.
Additional samples to be used to generate the code:
'{mapping.table_column}' sample values: [{table_df_samples}]
'{mapping.template_column}' samples values: [{template_df_samples}]
The input to the code will be a value object with the following attributes:
- source: The value of the table_df column '{mapping.table_column}'.
- destination: The value of the template_df column '{mapping.template_column}'.
Show the sample values using which the code is generated.
For example, for date columns, they may be in different formats, and it is necessary to change the format from dd.mm.yyyy to mm.dd.yyyy.
Final Answer:
```
```python
def format_value(source_value):
<code to transform source_value into destination_value>
return destination_value
value.destination = format_value(value.source)
```
```
Final Answer should contain the samples and code.
''', callbacks=[ConsoleCallbackHandler(), ])
print(code)
human_code = await human_tool.arun(code + '\nSpecify the code with ```python``` tags.')
regex = r"```python((.|\n|\t)*?)```"
code = human_code if re.match(regex, human_code) else code
matches = re.findall(regex, code)
code = ''
for match in matches:
code = code + '\n'+ '\n'.join(match)
codes.append(TemplateMappingCode(template_column=mapping.template_column,
table_column=mapping.table_column,
code=code))
return codes | [
"langchain.agents.initialize_agent",
"langchain.chat_models.ChatOpenAI",
"langchain.tools.PythonAstREPLTool",
"langchain.memory.ConversationSummaryBufferMemory",
"langchain.callbacks.tracers.ConsoleCallbackHandler",
"langchain.agents.create_pandas_dataframe_agent",
"langchain.output_parsers.OutputFixingParser.from_llm",
"langchain.output_parsers.PydanticOutputParser"
] | [((963, 990), 'os.environ.get', 'os.environ.get', (['"""REDIS_URL"""'], {}), "('REDIS_URL')\n", (977, 990), False, 'import os\n'), ((1270, 1285), 'pandas.read_csv', 'pd.read_csv', (['df'], {}), '(df)\n', (1281, 1285), True, 'import pandas as pd\n'), ((1302, 1537), 'langchain.agents.create_pandas_dataframe_agent', 'create_pandas_dataframe_agent', ([], {'llm': 'llm', 'df': 'df', 'agent_executor_kwargs': "{'handle_parsing_errors': True, 'memory': memory}", 'early_stopping_method': '"""generate"""', 'verbose': '(True)', 'temperature': '(0)', 'agent_type': 'AgentType.OPENAI_FUNCTIONS'}), "(llm=llm, df=df, agent_executor_kwargs={\n 'handle_parsing_errors': True, 'memory': memory}, early_stopping_method\n ='generate', verbose=True, temperature=0, agent_type=AgentType.\n OPENAI_FUNCTIONS)\n", (1331, 1537), False, 'from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent\n'), ((2683, 2779), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'config.OPENAI_API_KEY', 'temperature': '(0)', 'model': '"""gpt-3.5-turbo-0613"""'}), "(openai_api_key=config.OPENAI_API_KEY, temperature=0, model=\n 'gpt-3.5-turbo-0613')\n", (2693, 2779), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2794, 2909), 'langchain.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'llm': 'llm', 'memory_key': '"""chat_history"""', 'return_messages': '(True)', 'max_token_limit': '(1500)'}), "(llm=llm, memory_key='chat_history',\n return_messages=True, max_token_limit=1500)\n", (2825, 2909), False, 'from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory\n'), ((3498, 3540), 'util.tools.SessionHumanInputRun', 'SessionHumanInputRun', ([], {'session': 'self.session'}), '(session=self.session)\n', (3518, 3540), False, 'from util.tools import SessionHumanInputRun\n'), ((4868, 4929), 'util.model.TransformValue', 'TransformValue', ([], {'source': 'source_value', 'destination': 'source_value'}), '(source=source_value, destination=source_value)\n', (4882, 4929), False, 'from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue\n'), ((5288, 5345), 'langchain.output_parsers.PydanticOutputParser', 'PydanticOutputParser', ([], {'pydantic_object': 'TemplateMappingList'}), '(pydantic_object=TemplateMappingList)\n', (5308, 5345), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((5367, 5418), 'langchain.output_parsers.OutputFixingParser.from_llm', 'OutputFixingParser.from_llm', ([], {'parser': 'parser', 'llm': 'llm'}), '(parser=parser, llm=llm)\n', (5394, 5418), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((5434, 5614), 'langchain.agents.initialize_agent', 'initialize_agent', (['[human_tool]', 'llm'], {'agent': 'AgentType.OPENAI_FUNCTIONS', 'handle_parsing_errors': '(True)', 'early_stopping_method': '"""force"""', 'temperature': '(0.3)', 'output_parser': 'new_parser'}), "([human_tool], llm, agent=AgentType.OPENAI_FUNCTIONS,\n handle_parsing_errors=True, early_stopping_method='force', temperature=\n 0.3, output_parser=new_parser)\n", (5450, 5614), False, 'from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent\n'), ((6899, 6956), 'langchain.output_parsers.PydanticOutputParser', 'PydanticOutputParser', ([], {'pydantic_object': 'TemplateMappingCode'}), '(pydantic_object=TemplateMappingCode)\n', (6919, 6956), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((6978, 7029), 'langchain.output_parsers.OutputFixingParser.from_llm', 'OutputFixingParser.from_llm', ([], {'parser': 'parser', 'llm': 'llm'}), '(parser=parser, llm=llm)\n', (7005, 7029), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((10532, 10555), 're.findall', 're.findall', (['regex', 'code'], {}), '(regex, code)\n', (10542, 10555), False, 'import re\n'), ((2425, 2441), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (2438, 2441), False, 'import asyncio\n'), ((2988, 3013), 'io.StringIO', 'io.StringIO', (['table_string'], {}), '(table_string)\n', (2999, 3013), False, 'import io\n'), ((3250, 3278), 'io.StringIO', 'io.StringIO', (['template_string'], {}), '(template_string)\n', (3261, 3278), False, 'import io\n'), ((10459, 10486), 're.match', 're.match', (['regex', 'human_code'], {}), '(regex, human_code)\n', (10467, 10486), False, 'import re\n'), ((10690, 10801), 'util.model.TemplateMappingCode', 'TemplateMappingCode', ([], {'template_column': 'mapping.template_column', 'table_column': 'mapping.table_column', 'code': 'code'}), '(template_column=mapping.template_column, table_column=\n mapping.table_column, code=code)\n', (10709, 10801), False, 'from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue\n'), ((1905, 1929), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (1927, 1929), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n'), ((7662, 7697), 'langchain.tools.PythonAstREPLTool', 'PythonAstREPLTool', ([], {'locals': 'df_locals'}), '(locals=df_locals)\n', (7679, 7697), False, 'from langchain.tools import HumanInputRun, PythonAstREPLTool\n'), ((6408, 6432), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (6430, 6432), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n'), ((10224, 10248), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (10246, 10248), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n')] |
from typing import Dict, List, Optional
from langchain.agents.load_tools import (
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from langflow.custom import customs
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.tools.constants import (
ALL_TOOLS_NAMES,
CUSTOM_TOOLS,
FILE_TOOLS,
OTHER_TOOLS,
)
from langflow.interface.tools.util import get_tool_params
from langflow.settings import settings
from langflow.template.field.base import TemplateField
from langflow.template.template.base import Template
from langflow.utils import util
from langflow.utils.util import build_template_from_class
TOOL_INPUTS = {
"str": TemplateField(
field_type="str",
required=True,
is_list=False,
show=True,
placeholder="",
value="",
),
"llm": TemplateField(
field_type="BaseLanguageModel", required=True, is_list=False, show=True
),
"func": TemplateField(
field_type="function",
required=True,
is_list=False,
show=True,
multiline=True,
),
"code": TemplateField(
field_type="str",
required=True,
is_list=False,
show=True,
value="",
multiline=True,
),
"path": TemplateField(
field_type="file",
required=True,
is_list=False,
show=True,
value="",
suffixes=[".json", ".yaml", ".yml"],
fileTypes=["json", "yaml", "yml"],
),
}
class ToolCreator(LangChainTypeCreator):
type_name: str = "tools"
tools_dict: Optional[Dict] = None
@property
def type_to_loader_dict(self) -> Dict:
if self.tools_dict is None:
all_tools = {}
for tool, tool_fcn in ALL_TOOLS_NAMES.items():
tool_params = get_tool_params(tool_fcn)
tool_name = tool_params.get("name") or tool
if tool_name in settings.tools or settings.dev:
if tool_name == "JsonSpec":
tool_params["path"] = tool_params.pop("dict_") # type: ignore
all_tools[tool_name] = {
"type": tool,
"params": tool_params,
"fcn": tool_fcn,
}
self.tools_dict = all_tools
return self.tools_dict
def get_signature(self, name: str) -> Optional[Dict]:
"""Get the signature of a tool."""
base_classes = ["Tool", "BaseTool"]
fields = []
params = []
tool_params = {}
# Raise error if name is not in tools
if name not in self.type_to_loader_dict.keys():
raise ValueError("Tool not found")
tool_type: str = self.type_to_loader_dict[name]["type"] # type: ignore
# if tool_type in _BASE_TOOLS.keys():
# params = []
if tool_type in _LLM_TOOLS.keys():
params = ["llm"]
elif tool_type in _EXTRA_LLM_TOOLS.keys():
extra_keys = _EXTRA_LLM_TOOLS[tool_type][1]
params = ["llm"] + extra_keys
elif tool_type in _EXTRA_OPTIONAL_TOOLS.keys():
extra_keys = _EXTRA_OPTIONAL_TOOLS[tool_type][1]
params = extra_keys
# elif tool_type == "Tool":
# params = ["name", "description", "func"]
elif tool_type in CUSTOM_TOOLS:
# Get custom tool params
params = self.type_to_loader_dict[name]["params"] # type: ignore
base_classes = ["function"]
if node := customs.get_custom_nodes("tools").get(tool_type):
return node
elif tool_type in FILE_TOOLS:
params = self.type_to_loader_dict[name]["params"] # type: ignore
base_classes += [name]
elif tool_type in OTHER_TOOLS:
tool_dict = build_template_from_class(tool_type, OTHER_TOOLS)
fields = tool_dict["template"]
# Pop unnecessary fields and add name
fields.pop("_type") # type: ignore
fields.pop("return_direct") # type: ignore
fields.pop("verbose") # type: ignore
tool_params = {
"name": fields.pop("name")["value"], # type: ignore
"description": fields.pop("description")["value"], # type: ignore
}
fields = [
TemplateField(name=name, field_type=field["type"], **field)
for name, field in fields.items() # type: ignore
]
base_classes += tool_dict["base_classes"]
# Copy the field and add the name
for param in params:
field = TOOL_INPUTS.get(param, TOOL_INPUTS["str"]).copy()
field.name = param
field.advanced = False
if param == "aiosession":
field.show = False
field.required = False
fields.append(field)
template = Template(fields=fields, type_name=tool_type)
tool_params = {**tool_params, **self.type_to_loader_dict[name]["params"]}
return {
"template": util.format_dict(template.to_dict()),
**tool_params,
"base_classes": base_classes,
}
def to_list(self) -> List[str]:
"""List all load tools"""
return list(self.type_to_loader_dict.keys())
tool_creator = ToolCreator()
| [
"langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys",
"langchain.agents.load_tools._LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys"
] | [((690, 792), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'placeholder': '""""""', 'value': '""""""'}), "(field_type='str', required=True, is_list=False, show=True,\n placeholder='', value='')\n", (703, 792), False, 'from langflow.template.field.base import TemplateField\n'), ((856, 946), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""BaseLanguageModel"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)'}), "(field_type='BaseLanguageModel', required=True, is_list=False,\n show=True)\n", (869, 946), False, 'from langflow.template.field.base import TemplateField\n'), ((970, 1068), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""function"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'multiline': '(True)'}), "(field_type='function', required=True, is_list=False, show=\n True, multiline=True)\n", (983, 1068), False, 'from langflow.template.field.base import TemplateField\n'), ((1124, 1226), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'multiline': '(True)'}), "(field_type='str', required=True, is_list=False, show=True,\n value='', multiline=True)\n", (1137, 1226), False, 'from langflow.template.field.base import TemplateField\n'), ((1291, 1454), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""file"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'suffixes': "['.json', '.yaml', '.yml']", 'fileTypes': "['json', 'yaml', 'yml']"}), "(field_type='file', required=True, is_list=False, show=True,\n value='', suffixes=['.json', '.yaml', '.yml'], fileTypes=['json',\n 'yaml', 'yml'])\n", (1304, 1454), False, 'from langflow.template.field.base import TemplateField\n'), ((4975, 5019), 'langflow.template.template.base.Template', 'Template', ([], {'fields': 'fields', 'type_name': 'tool_type'}), '(fields=fields, type_name=tool_type)\n', (4983, 5019), False, 'from langflow.template.template.base import Template\n'), ((1779, 1802), 'langflow.interface.tools.constants.ALL_TOOLS_NAMES.items', 'ALL_TOOLS_NAMES.items', ([], {}), '()\n', (1800, 1802), False, 'from langflow.interface.tools.constants import ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS\n'), ((2927, 2944), 'langchain.agents.load_tools._LLM_TOOLS.keys', '_LLM_TOOLS.keys', ([], {}), '()\n', (2942, 2944), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((1834, 1859), 'langflow.interface.tools.util.get_tool_params', 'get_tool_params', (['tool_fcn'], {}), '(tool_fcn)\n', (1849, 1859), False, 'from langflow.interface.tools.util import get_tool_params\n'), ((3001, 3024), 'langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys', '_EXTRA_LLM_TOOLS.keys', ([], {}), '()\n', (3022, 3024), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3150, 3178), 'langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys', '_EXTRA_OPTIONAL_TOOLS.keys', ([], {}), '()\n', (3176, 3178), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3874, 3923), 'langflow.utils.util.build_template_from_class', 'build_template_from_class', (['tool_type', 'OTHER_TOOLS'], {}), '(tool_type, OTHER_TOOLS)\n', (3899, 3923), False, 'from langflow.utils.util import build_template_from_class\n'), ((3582, 3615), 'langflow.custom.customs.get_custom_nodes', 'customs.get_custom_nodes', (['"""tools"""'], {}), "('tools')\n", (3606, 3615), False, 'from langflow.custom import customs\n'), ((4407, 4466), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'name': 'name', 'field_type': "field['type']"}), "(name=name, field_type=field['type'], **field)\n", (4420, 4466), False, 'from langflow.template.field.base import TemplateField\n')] |
from typing import Dict, List, Optional
from langchain.agents.load_tools import (
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from langflow.custom import customs
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.tools.constants import (
ALL_TOOLS_NAMES,
CUSTOM_TOOLS,
FILE_TOOLS,
OTHER_TOOLS,
)
from langflow.interface.tools.util import get_tool_params
from langflow.settings import settings
from langflow.template.field.base import TemplateField
from langflow.template.template.base import Template
from langflow.utils import util
from langflow.utils.util import build_template_from_class
TOOL_INPUTS = {
"str": TemplateField(
field_type="str",
required=True,
is_list=False,
show=True,
placeholder="",
value="",
),
"llm": TemplateField(
field_type="BaseLanguageModel", required=True, is_list=False, show=True
),
"func": TemplateField(
field_type="function",
required=True,
is_list=False,
show=True,
multiline=True,
),
"code": TemplateField(
field_type="str",
required=True,
is_list=False,
show=True,
value="",
multiline=True,
),
"path": TemplateField(
field_type="file",
required=True,
is_list=False,
show=True,
value="",
suffixes=[".json", ".yaml", ".yml"],
fileTypes=["json", "yaml", "yml"],
),
}
class ToolCreator(LangChainTypeCreator):
type_name: str = "tools"
tools_dict: Optional[Dict] = None
@property
def type_to_loader_dict(self) -> Dict:
if self.tools_dict is None:
all_tools = {}
for tool, tool_fcn in ALL_TOOLS_NAMES.items():
tool_params = get_tool_params(tool_fcn)
tool_name = tool_params.get("name") or tool
if tool_name in settings.tools or settings.dev:
if tool_name == "JsonSpec":
tool_params["path"] = tool_params.pop("dict_") # type: ignore
all_tools[tool_name] = {
"type": tool,
"params": tool_params,
"fcn": tool_fcn,
}
self.tools_dict = all_tools
return self.tools_dict
def get_signature(self, name: str) -> Optional[Dict]:
"""Get the signature of a tool."""
base_classes = ["Tool", "BaseTool"]
fields = []
params = []
tool_params = {}
# Raise error if name is not in tools
if name not in self.type_to_loader_dict.keys():
raise ValueError("Tool not found")
tool_type: str = self.type_to_loader_dict[name]["type"] # type: ignore
# if tool_type in _BASE_TOOLS.keys():
# params = []
if tool_type in _LLM_TOOLS.keys():
params = ["llm"]
elif tool_type in _EXTRA_LLM_TOOLS.keys():
extra_keys = _EXTRA_LLM_TOOLS[tool_type][1]
params = ["llm"] + extra_keys
elif tool_type in _EXTRA_OPTIONAL_TOOLS.keys():
extra_keys = _EXTRA_OPTIONAL_TOOLS[tool_type][1]
params = extra_keys
# elif tool_type == "Tool":
# params = ["name", "description", "func"]
elif tool_type in CUSTOM_TOOLS:
# Get custom tool params
params = self.type_to_loader_dict[name]["params"] # type: ignore
base_classes = ["function"]
if node := customs.get_custom_nodes("tools").get(tool_type):
return node
elif tool_type in FILE_TOOLS:
params = self.type_to_loader_dict[name]["params"] # type: ignore
base_classes += [name]
elif tool_type in OTHER_TOOLS:
tool_dict = build_template_from_class(tool_type, OTHER_TOOLS)
fields = tool_dict["template"]
# Pop unnecessary fields and add name
fields.pop("_type") # type: ignore
fields.pop("return_direct") # type: ignore
fields.pop("verbose") # type: ignore
tool_params = {
"name": fields.pop("name")["value"], # type: ignore
"description": fields.pop("description")["value"], # type: ignore
}
fields = [
TemplateField(name=name, field_type=field["type"], **field)
for name, field in fields.items() # type: ignore
]
base_classes += tool_dict["base_classes"]
# Copy the field and add the name
for param in params:
field = TOOL_INPUTS.get(param, TOOL_INPUTS["str"]).copy()
field.name = param
field.advanced = False
if param == "aiosession":
field.show = False
field.required = False
fields.append(field)
template = Template(fields=fields, type_name=tool_type)
tool_params = {**tool_params, **self.type_to_loader_dict[name]["params"]}
return {
"template": util.format_dict(template.to_dict()),
**tool_params,
"base_classes": base_classes,
}
def to_list(self) -> List[str]:
"""List all load tools"""
return list(self.type_to_loader_dict.keys())
tool_creator = ToolCreator()
| [
"langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys",
"langchain.agents.load_tools._LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys"
] | [((690, 792), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'placeholder': '""""""', 'value': '""""""'}), "(field_type='str', required=True, is_list=False, show=True,\n placeholder='', value='')\n", (703, 792), False, 'from langflow.template.field.base import TemplateField\n'), ((856, 946), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""BaseLanguageModel"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)'}), "(field_type='BaseLanguageModel', required=True, is_list=False,\n show=True)\n", (869, 946), False, 'from langflow.template.field.base import TemplateField\n'), ((970, 1068), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""function"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'multiline': '(True)'}), "(field_type='function', required=True, is_list=False, show=\n True, multiline=True)\n", (983, 1068), False, 'from langflow.template.field.base import TemplateField\n'), ((1124, 1226), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'multiline': '(True)'}), "(field_type='str', required=True, is_list=False, show=True,\n value='', multiline=True)\n", (1137, 1226), False, 'from langflow.template.field.base import TemplateField\n'), ((1291, 1454), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""file"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'suffixes': "['.json', '.yaml', '.yml']", 'fileTypes': "['json', 'yaml', 'yml']"}), "(field_type='file', required=True, is_list=False, show=True,\n value='', suffixes=['.json', '.yaml', '.yml'], fileTypes=['json',\n 'yaml', 'yml'])\n", (1304, 1454), False, 'from langflow.template.field.base import TemplateField\n'), ((4975, 5019), 'langflow.template.template.base.Template', 'Template', ([], {'fields': 'fields', 'type_name': 'tool_type'}), '(fields=fields, type_name=tool_type)\n', (4983, 5019), False, 'from langflow.template.template.base import Template\n'), ((1779, 1802), 'langflow.interface.tools.constants.ALL_TOOLS_NAMES.items', 'ALL_TOOLS_NAMES.items', ([], {}), '()\n', (1800, 1802), False, 'from langflow.interface.tools.constants import ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS\n'), ((2927, 2944), 'langchain.agents.load_tools._LLM_TOOLS.keys', '_LLM_TOOLS.keys', ([], {}), '()\n', (2942, 2944), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((1834, 1859), 'langflow.interface.tools.util.get_tool_params', 'get_tool_params', (['tool_fcn'], {}), '(tool_fcn)\n', (1849, 1859), False, 'from langflow.interface.tools.util import get_tool_params\n'), ((3001, 3024), 'langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys', '_EXTRA_LLM_TOOLS.keys', ([], {}), '()\n', (3022, 3024), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3150, 3178), 'langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys', '_EXTRA_OPTIONAL_TOOLS.keys', ([], {}), '()\n', (3176, 3178), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3874, 3923), 'langflow.utils.util.build_template_from_class', 'build_template_from_class', (['tool_type', 'OTHER_TOOLS'], {}), '(tool_type, OTHER_TOOLS)\n', (3899, 3923), False, 'from langflow.utils.util import build_template_from_class\n'), ((3582, 3615), 'langflow.custom.customs.get_custom_nodes', 'customs.get_custom_nodes', (['"""tools"""'], {}), "('tools')\n", (3606, 3615), False, 'from langflow.custom import customs\n'), ((4407, 4466), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'name': 'name', 'field_type': "field['type']"}), "(name=name, field_type=field['type'], **field)\n", (4420, 4466), False, 'from langflow.template.field.base import TemplateField\n')] |
# imports
import os, shutil, json, re
import pathlib
from langchain.document_loaders.unstructured import UnstructuredFileLoader
from langchain.document_loaders.unstructured import UnstructuredAPIFileLoader
from langchain.document_loaders import UnstructuredURLLoader
from langchain.docstore.document import Document
from google.cloud import storage
import base64
import langchain.text_splitter as text_splitter
from dotenv import load_dotenv
import tempfile
import hashlib
from langchain.schema import Document
import logging
from my_llm.pubsub_manager import PubSubManager
import datetime
from .database import setup_database
from .database import delete_row_from_source
from .database import return_sources_last24
load_dotenv()
def contains_url(message_data):
url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
if url_pattern.search(message_data):
return True
else:
return False
def extract_urls(text):
url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
urls = url_pattern.findall(text)
return urls
# utility functions
def convert_to_txt(file_path):
file_dir, file_name = os.path.split(file_path)
file_base, file_ext = os.path.splitext(file_name)
txt_file = os.path.join(file_dir, f"{file_base}.txt")
shutil.copyfile(file_path, txt_file)
return txt_file
def compute_sha1_from_file(file_path):
with open(file_path, "rb") as file:
bytes = file.read()
readable_hash = hashlib.sha1(bytes).hexdigest()
return readable_hash
def compute_sha1_from_content(content):
readable_hash = hashlib.sha1(content).hexdigest()
return readable_hash
def add_file_to_gcs(filename: str, vector_name:str, bucket_name: str=None, metadata:dict=None):
storage_client = storage.Client()
bucket_name = bucket_name if bucket_name is not None else os.getenv('GCS_BUCKET', None)
if bucket_name is None:
raise ValueError("No bucket found to upload to: GCS_BUCKET returned None")
if bucket_name.startswith("gs://"):
bucket_name = bucket_name.removeprefix("gs://")
logging.info(f"Bucket_name: {bucket_name}")
bucket = storage_client.get_bucket(bucket_name)
now = datetime.datetime.now()
year = now.strftime("%Y")
month = now.strftime("%m")
day = now.strftime("%d")
hour = now.strftime("%H")
bucket_filepath = f"{vector_name}/{year}/{month}/{day}/{hour}/{os.path.basename(filename)}"
blob = bucket.blob(bucket_filepath)
the_metadata = {
"vector_name": vector_name,
}
if metadata is not None:
the_metadata.update(metadata)
blob.metadata = the_metadata
#TODO: create cloud storage pubsub subscription?
blob.upload_from_filename(filename)
logging.info(f"File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}")
# create pubsub topic and subscription if necessary to receive notifications from cloud storage
pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"app_to_pubsub_{vector_name}")
sub_name = f"pubsub_to_store_{vector_name}"
sub_exists = pubsub_manager.subscription_exists(sub_name)
if not sub_exists:
pubsub_manager.create_subscription(sub_name,
push_endpoint=f"/pubsub_to_store/{vector_name}")
setup_database(vector_name)
return f"gs://{bucket_name}/{bucket_filepath}"
def read_url_to_document(url: str, metadata: dict = None):
loader = UnstructuredURLLoader(urls=[url])
docs = loader.load()
if metadata is not None:
for doc in docs:
doc.metadata.update(metadata)
logging.info(f"UnstructuredURLLoader docs: {docs}")
return docs
def read_file_to_document(gs_file: pathlib.Path, split=False, metadata: dict = None):
#file_sha1 = compute_sha1_from_file(gs_file.name)
try:
#TODO: Use UnstructuredAPIFileLoader instead?
logging.info(f"Sending {gs_file} to UnstructuredAPIFileLoader")
loader = UnstructuredAPIFileLoader(gs_file, mode="elements", api_key="FAKE_API_KEY")
if split:
# only supported for some file types
docs = loader.load_and_split()
else:
docs = loader.load()
logging.info(f"Loaded docs for {gs_file} from UnstructuredAPIFileLoader")
except ValueError as e:
logging.info(f"Error for {gs_file} from UnstructuredAPIFileLoader: {str(e)}")
if "file type is not supported in partition" in str(e):
logging.info("trying locally via .txt conversion")
txt_file = None
try:
# Convert the file to .txt and try again
txt_file = convert_to_txt(gs_file)
loader = UnstructuredFileLoader(txt_file, mode="elements")
if split:
docs = loader.load_and_split()
else:
docs = loader.load()
except Exception as inner_e:
raise Exception("An error occurred during txt conversion or loading.") from inner_e
finally:
# Ensure cleanup happens if txt_file was created
if txt_file is not None and os.path.exists(txt_file):
os.remove(txt_file)
else:
raise
except Exception as e:
logging.error(f"An unexpected error occurred for {gs_file}: {str(e)}")
raise
for doc in docs:
#doc.metadata["file_sha1"] = file_sha1
logging.info(f"doc_content: {doc.page_content[:30]}")
if metadata is not None:
doc.metadata.update(metadata)
logging.info(f"gs_file: {gs_file} turned into {len(docs)} documents")
return docs
def choose_splitter(extension: str, chunk_size: int=1024, chunk_overlap:int=0):
if extension == ".py":
return text_splitter.PythonCodeTextSplitter()
elif extension == ".md":
return text_splitter.MarkdownTextSplitter()
return text_splitter.RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
def remove_whitespace(page_content: str):
return page_content.replace("\n", " ").replace("\r", " ").replace("\t", " ").replace(" ", " ")
def chunk_doc_to_docs(documents: list, extension: str = ".md"):
"""Turns a Document object into a list of many Document chunks"""
source_chunks = []
for document in documents:
splitter = choose_splitter(extension)
for chunk in splitter.split_text(remove_whitespace(document.page_content)):
source_chunks.append(Document(page_content=chunk, metadata=document.metadata))
return source_chunks
def data_to_embed_pubsub(data: dict, vector_name:str="documents"):
"""Triggered from a message on a Cloud Pub/Sub topic.
Args:
data JSON
"""
#hash = data['message']['data']
message_data = base64.b64decode(data['message']['data']).decode('utf-8')
attributes = data['message'].get('attributes', {})
messageId = data['message'].get('messageId')
publishTime = data['message'].get('publishTime')
logging.info(f"data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}")
logging.info(f"data_to_embed_pubsub data: {message_data}")
# pubsub from a Google Cloud Storage push topic
if attributes.get("eventType", None) is not None and attributes.get("payloadFormat", None) is not None:
eventType = attributes.get("eventType")
payloadFormat = attributes.get("payloadFormat")
if eventType == "OBJECT_FINALIZE" and payloadFormat == "JSON_API_V1":
logging.info("Got valid event from Google Cloud Storage")
the_object = attributes.get("objectId", None)
if the_object is None:
logging.info("No object found")
return attributes
if the_object.endswith("/"):
logging.info("GCS object is a directory only")
return attributes
# https://cloud.google.com/storage/docs/json_api/v1/objects#resource-representations
message_data = 'gs://' + attributes.get("bucketId") + '/' + the_object
if '/' in the_object:
bucket_vector_name = the_object.split('/')[0]
if len(bucket_vector_name) > 0 and vector_name != bucket_vector_name:
logging.info(f"Overwriting vector_name {vector_name} with {bucket_vector_name}")
vector_name = bucket_vector_name
attributes["attrs"] = f"namespace:{vector_name}"
logging.info(f"Constructed message_data: {message_data}")
metadata = attributes
logging.info(f"Found metadata in pubsub: {metadata}")
chunks = []
if message_data.startswith('"gs://'):
message_data = message_data.strip('\"')
if message_data.startswith("gs://"):
logging.info("Detected gs://")
bucket_name, file_name = message_data[5:].split("/", 1)
# Create a client
storage_client = storage.Client()
# Download the file from GCS
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(file_name)
file_name=pathlib.Path(file_name)
with tempfile.TemporaryDirectory() as temp_dir:
tmp_file_path = os.path.join(temp_dir, file_name.name)
blob.download_to_filename(tmp_file_path)
the_metadata = {
"source": message_data,
"type": "file_load_gcs",
"bucket_name": bucket_name
}
metadata.update(the_metadata)
docs = read_file_to_document(tmp_file_path, metadata=metadata)
chunks = chunk_doc_to_docs(docs, file_name.suffix)
logging.info(f"Split {file_name} into {len(chunks)} chunks")
elif message_data.startswith("http"):
logging.info(f"Got http message: {message_data}")
# just in case, extract the URL again
urls = extract_urls(message_data)
docs = []
for url in urls:
metadata["source"] = url
metadata["url"] = url
metadata["type"] = "url_load"
doc = read_url_to_document(url, metadata=metadata)
docs.extend(doc)
chunks = chunk_doc_to_docs(docs)
logging.info(f"Split {url} into {len(chunks)} chunks")
else:
logging.info("No gs:// detected")
the_json = json.loads(message_data)
the_metadata = the_json.get("metadata", {})
metadata.update(the_metadata)
the_content = the_json.get("page_content", None)
if metadata.get("source", None) is not None:
metadata["source"] = "No source embedded"
if the_content is None:
logging.info("No content found")
return {"metadata": "No content found"}
docs = [Document(page_content=the_content, metadata=metadata)]
publish_if_urls(the_content, vector_name)
chunks = chunk_doc_to_docs(docs)
logging.info(f"Split content into {len(chunks)} chunks")
publish_chunks(chunks, vector_name=vector_name)
logging.info(f"data_to_embed_pubsub published chunks with metadata: {metadata}")
pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"pubsub_state_messages")
pubsub_manager.publish_message(f"pubsub_chunk - Added doc with metadata: {metadata} to {vector_name}")
return metadata
def publish_if_urls(the_content, vector_name):
"""
Extracts URLs and puts them in a queue for processing on PubSub
"""
if contains_url(the_content):
logging.info("Detected http://")
urls = extract_urls(the_content)
for url in urls:
publish_text(url, vector_name)
def publish_chunks(chunks: list[Document], vector_name: str):
logging.info("Publishing chunks to embed_chunk")
pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"embed_chunk_{vector_name}")
sub_name = f"pubsub_chunk_to_store_{vector_name}"
sub_exists = pubsub_manager.subscription_exists(sub_name)
if not sub_exists:
pubsub_manager.create_subscription(sub_name,
push_endpoint=f"/pubsub_chunk_to_store/{vector_name}")
setup_database(vector_name)
for chunk in chunks:
# Convert chunk to string, as Pub/Sub messages must be strings or bytes
chunk_str = chunk.json()
pubsub_manager.publish_message(chunk_str)
def publish_text(text:str, vector_name: str):
logging.info(f"Publishing text to app_to_pubsub_{vector_name}")
pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"app_to_pubsub_{vector_name}")
sub_name = f"pubsub_to_store_{vector_name}"
sub_exists = pubsub_manager.subscription_exists(sub_name)
if not sub_exists:
pubsub_manager.create_subscription(sub_name,
push_endpoint=f"/pubsub_chunk_to_store/{vector_name}")
setup_database(vector_name)
pubsub_manager.publish_message(text)
def delete_source(source:str, vector_name:str):
logging.info(f"Deleting source: {source} from {vector_name}")
delete_row_from_source(source, vector_name)
logging.info(f"Deleted source: {source} from {vector_name}")
def return_sources_last24_(vector_name:str):
logging.info(f"Returning sources last 24")
rows = return_sources_last24(vector_name)
return rows
| [
"langchain.document_loaders.unstructured.UnstructuredFileLoader",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.text_splitter.PythonCodeTextSplitter",
"langchain.document_loaders.unstructured.UnstructuredAPIFileLoader",
"langchain.document_loaders.UnstructuredURLLoader",
"langchain.text_splitter.MarkdownTextSplitter",
"langchain.schema.Document"
] | [((719, 732), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (730, 732), False, 'from dotenv import load_dotenv\n'), ((784, 892), 're.compile', 're.compile', (['"""http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"""'], {}), "(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n )\n", (794, 892), False, 'import os, shutil, json, re\n'), ((1015, 1123), 're.compile', 're.compile', (['"""http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"""'], {}), "(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n )\n", (1025, 1123), False, 'import os, shutil, json, re\n'), ((1242, 1266), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (1255, 1266), False, 'import os, shutil, json, re\n'), ((1293, 1320), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (1309, 1320), False, 'import os, shutil, json, re\n'), ((1336, 1378), 'os.path.join', 'os.path.join', (['file_dir', 'f"""{file_base}.txt"""'], {}), "(file_dir, f'{file_base}.txt')\n", (1348, 1378), False, 'import os, shutil, json, re\n'), ((1383, 1419), 'shutil.copyfile', 'shutil.copyfile', (['file_path', 'txt_file'], {}), '(file_path, txt_file)\n', (1398, 1419), False, 'import os, shutil, json, re\n'), ((1869, 1885), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (1883, 1885), False, 'from google.cloud import storage\n'), ((2200, 2243), 'logging.info', 'logging.info', (['f"""Bucket_name: {bucket_name}"""'], {}), "(f'Bucket_name: {bucket_name}')\n", (2212, 2243), False, 'import logging\n'), ((2306, 2329), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2327, 2329), False, 'import datetime\n'), ((2853, 2939), 'logging.info', 'logging.info', (['f"""File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}"""'], {}), "(\n f'File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}')\n", (2865, 2939), False, 'import logging\n'), ((3058, 3129), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""app_to_pubsub_{vector_name}"""'}), "(vector_name, pubsub_topic=f'app_to_pubsub_{vector_name}')\n", (3071, 3129), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((3584, 3617), 'langchain.document_loaders.UnstructuredURLLoader', 'UnstructuredURLLoader', ([], {'urls': '[url]'}), '(urls=[url])\n', (3605, 3617), False, 'from langchain.document_loaders import UnstructuredURLLoader\n'), ((3748, 3799), 'logging.info', 'logging.info', (['f"""UnstructuredURLLoader docs: {docs}"""'], {}), "(f'UnstructuredURLLoader docs: {docs}')\n", (3760, 3799), False, 'import logging\n'), ((6112, 6212), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'text_splitter.RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n', (6156, 6212), True, 'import langchain.text_splitter as text_splitter\n'), ((7229, 7342), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}"""'], {}), "(\n f'data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}'\n )\n", (7241, 7342), False, 'import logging\n'), ((7337, 7395), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub data: {message_data}"""'], {}), "(f'data_to_embed_pubsub data: {message_data}')\n", (7349, 7395), False, 'import logging\n'), ((8835, 8888), 'logging.info', 'logging.info', (['f"""Found metadata in pubsub: {metadata}"""'], {}), "(f'Found metadata in pubsub: {metadata}')\n", (8847, 8888), False, 'import logging\n'), ((11324, 11409), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub published chunks with metadata: {metadata}"""'], {}), "(f'data_to_embed_pubsub published chunks with metadata: {metadata}'\n )\n", (11336, 11409), False, 'import logging\n'), ((11426, 11491), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""pubsub_state_messages"""'}), "(vector_name, pubsub_topic=f'pubsub_state_messages')\n", (11439, 11491), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((12019, 12067), 'logging.info', 'logging.info', (['"""Publishing chunks to embed_chunk"""'], {}), "('Publishing chunks to embed_chunk')\n", (12031, 12067), False, 'import logging\n'), ((12094, 12163), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""embed_chunk_{vector_name}"""'}), "(vector_name, pubsub_topic=f'embed_chunk_{vector_name}')\n", (12107, 12163), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((12749, 12812), 'logging.info', 'logging.info', (['f"""Publishing text to app_to_pubsub_{vector_name}"""'], {}), "(f'Publishing text to app_to_pubsub_{vector_name}')\n", (12761, 12812), False, 'import logging\n'), ((12834, 12905), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""app_to_pubsub_{vector_name}"""'}), "(vector_name, pubsub_topic=f'app_to_pubsub_{vector_name}')\n", (12847, 12905), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((13331, 13392), 'logging.info', 'logging.info', (['f"""Deleting source: {source} from {vector_name}"""'], {}), "(f'Deleting source: {source} from {vector_name}')\n", (13343, 13392), False, 'import logging\n'), ((13445, 13505), 'logging.info', 'logging.info', (['f"""Deleted source: {source} from {vector_name}"""'], {}), "(f'Deleted source: {source} from {vector_name}')\n", (13457, 13505), False, 'import logging\n'), ((13557, 13599), 'logging.info', 'logging.info', (['f"""Returning sources last 24"""'], {}), "(f'Returning sources last 24')\n", (13569, 13599), False, 'import logging\n'), ((1949, 1978), 'os.getenv', 'os.getenv', (['"""GCS_BUCKET"""', 'None'], {}), "('GCS_BUCKET', None)\n", (1958, 1978), False, 'import os, shutil, json, re\n'), ((4044, 4107), 'logging.info', 'logging.info', (['f"""Sending {gs_file} to UnstructuredAPIFileLoader"""'], {}), "(f'Sending {gs_file} to UnstructuredAPIFileLoader')\n", (4056, 4107), False, 'import logging\n'), ((4125, 4200), 'langchain.document_loaders.unstructured.UnstructuredAPIFileLoader', 'UnstructuredAPIFileLoader', (['gs_file'], {'mode': '"""elements"""', 'api_key': '"""FAKE_API_KEY"""'}), "(gs_file, mode='elements', api_key='FAKE_API_KEY')\n", (4150, 4200), False, 'from langchain.document_loaders.unstructured import UnstructuredAPIFileLoader\n'), ((5632, 5685), 'logging.info', 'logging.info', (['f"""doc_content: {doc.page_content[:30]}"""'], {}), "(f'doc_content: {doc.page_content[:30]}')\n", (5644, 5685), False, 'import logging\n'), ((5976, 6014), 'langchain.text_splitter.PythonCodeTextSplitter', 'text_splitter.PythonCodeTextSplitter', ([], {}), '()\n', (6012, 6014), True, 'import langchain.text_splitter as text_splitter\n'), ((9047, 9077), 'logging.info', 'logging.info', (['"""Detected gs://"""'], {}), "('Detected gs://')\n", (9059, 9077), False, 'import logging\n'), ((9194, 9210), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (9208, 9210), False, 'from google.cloud import storage\n'), ((9362, 9385), 'pathlib.Path', 'pathlib.Path', (['file_name'], {}), '(file_name)\n', (9374, 9385), False, 'import pathlib\n'), ((11795, 11827), 'logging.info', 'logging.info', (['"""Detected http://"""'], {}), "('Detected http://')\n", (11807, 11827), False, 'import logging\n'), ((1691, 1712), 'hashlib.sha1', 'hashlib.sha1', (['content'], {}), '(content)\n', (1703, 1712), False, 'import hashlib\n'), ((2518, 2544), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (2534, 2544), False, 'import os, shutil, json, re\n'), ((4379, 4452), 'logging.info', 'logging.info', (['f"""Loaded docs for {gs_file} from UnstructuredAPIFileLoader"""'], {}), "(f'Loaded docs for {gs_file} from UnstructuredAPIFileLoader')\n", (4391, 4452), False, 'import logging\n'), ((6059, 6095), 'langchain.text_splitter.MarkdownTextSplitter', 'text_splitter.MarkdownTextSplitter', ([], {}), '()\n', (6093, 6095), True, 'import langchain.text_splitter as text_splitter\n'), ((7009, 7050), 'base64.b64decode', 'base64.b64decode', (["data['message']['data']"], {}), "(data['message']['data'])\n", (7025, 7050), False, 'import base64\n'), ((7751, 7808), 'logging.info', 'logging.info', (['"""Got valid event from Google Cloud Storage"""'], {}), "('Got valid event from Google Cloud Storage')\n", (7763, 7808), False, 'import logging\n'), ((8741, 8798), 'logging.info', 'logging.info', (['f"""Constructed message_data: {message_data}"""'], {}), "(f'Constructed message_data: {message_data}')\n", (8753, 8798), False, 'import logging\n'), ((9400, 9429), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (9427, 9429), False, 'import tempfile\n'), ((9471, 9509), 'os.path.join', 'os.path.join', (['temp_dir', 'file_name.name'], {}), '(temp_dir, file_name.name)\n', (9483, 9509), False, 'import os, shutil, json, re\n'), ((10036, 10085), 'logging.info', 'logging.info', (['f"""Got http message: {message_data}"""'], {}), "(f'Got http message: {message_data}')\n", (10048, 10085), False, 'import logging\n'), ((10548, 10581), 'logging.info', 'logging.info', (['"""No gs:// detected"""'], {}), "('No gs:// detected')\n", (10560, 10581), False, 'import logging\n'), ((10610, 10634), 'json.loads', 'json.loads', (['message_data'], {}), '(message_data)\n', (10620, 10634), False, 'import os, shutil, json, re\n'), ((1573, 1592), 'hashlib.sha1', 'hashlib.sha1', (['bytes'], {}), '(bytes)\n', (1585, 1592), False, 'import hashlib\n'), ((4643, 4693), 'logging.info', 'logging.info', (['"""trying locally via .txt conversion"""'], {}), "('trying locally via .txt conversion')\n", (4655, 4693), False, 'import logging\n'), ((6705, 6761), 'langchain.schema.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'document.metadata'}), '(page_content=chunk, metadata=document.metadata)\n', (6713, 6761), False, 'from langchain.schema import Document\n'), ((7919, 7950), 'logging.info', 'logging.info', (['"""No object found"""'], {}), "('No object found')\n", (7931, 7950), False, 'import logging\n'), ((8055, 8101), 'logging.info', 'logging.info', (['"""GCS object is a directory only"""'], {}), "('GCS object is a directory only')\n", (8067, 8101), False, 'import logging\n'), ((10935, 10967), 'logging.info', 'logging.info', (['"""No content found"""'], {}), "('No content found')\n", (10947, 10967), False, 'import logging\n'), ((11045, 11098), 'langchain.schema.Document', 'Document', ([], {'page_content': 'the_content', 'metadata': 'metadata'}), '(page_content=the_content, metadata=metadata)\n', (11053, 11098), False, 'from langchain.schema import Document\n'), ((4872, 4921), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['txt_file'], {'mode': '"""elements"""'}), "(txt_file, mode='elements')\n", (4894, 4921), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((8533, 8618), 'logging.info', 'logging.info', (['f"""Overwriting vector_name {vector_name} with {bucket_vector_name}"""'], {}), "(f'Overwriting vector_name {vector_name} with {bucket_vector_name}'\n )\n", (8545, 8618), False, 'import logging\n'), ((5335, 5359), 'os.path.exists', 'os.path.exists', (['txt_file'], {}), '(txt_file)\n', (5349, 5359), False, 'import os, shutil, json, re\n'), ((5381, 5400), 'os.remove', 'os.remove', (['txt_file'], {}), '(txt_file)\n', (5390, 5400), False, 'import os, shutil, json, re\n')] |
import asyncio
import inspect
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
Sequence,
cast,
)
import langchain
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForLLMRun,
CallbackManager,
CallbackManagerForLLMRun,
Callbacks,
)
from langchain.load.dump import dumpd, dumps
from langchain.prompts.base import StringPromptValue
from langchain.prompts.chat import ChatPromptValue
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import (
ChatGeneration,
ChatResult,
LLMResult,
PromptValue,
RunInfo,
)
from langchain.schema.language_model import BaseLanguageModel, LanguageModelInput
from langchain.schema.messages import (
AIMessage,
BaseMessage,
BaseMessageChunk,
HumanMessage,
)
from langchain.schema.output import ChatGenerationChunk
from langchain.schema.runnable import RunnableConfig
def _get_verbosity() -> bool:
return langchain.verbose
class BaseChatModel(BaseLanguageModel[BaseMessageChunk], ABC):
"""Base class for Chat models."""
cache: Optional[bool] = None
"""Whether to cache the response."""
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether to print out response text."""
callbacks: Callbacks = Field(default=None, exclude=True)
"""Callbacks to add to the run trace."""
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
"""Callback manager to add to the run trace."""
tags: Optional[List[str]] = Field(default=None, exclude=True)
"""Tags to add to the run trace."""
metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True)
"""Metadata to add to the run trace."""
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
# --- Runnable methods ---
def _convert_input(self, input: LanguageModelInput) -> PromptValue:
if isinstance(input, PromptValue):
return input
elif isinstance(input, str):
return StringPromptValue(text=input)
elif isinstance(input, list):
return ChatPromptValue(messages=input)
else:
raise ValueError(
f"Invalid input type {type(input)}. "
"Must be a PromptValue, str, or list of BaseMessages."
)
def invoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> BaseMessageChunk:
config = config or {}
return cast(
BaseMessageChunk,
cast(
ChatGeneration,
self.generate_prompt(
[self._convert_input(input)],
stop=stop,
callbacks=config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
**kwargs,
).generations[0][0],
).message,
)
async def ainvoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> BaseMessageChunk:
if type(self)._agenerate == BaseChatModel._agenerate:
# model doesn't implement async generation, so use default implementation
return await asyncio.get_running_loop().run_in_executor(
None, partial(self.invoke, input, config, stop=stop, **kwargs)
)
config = config or {}
llm_result = await self.agenerate_prompt(
[self._convert_input(input)],
stop=stop,
callbacks=config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
**kwargs,
)
return cast(
BaseMessageChunk, cast(ChatGeneration, llm_result.generations[0][0]).message
)
def stream(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> Iterator[BaseMessageChunk]:
if type(self)._stream == BaseChatModel._stream:
# model doesn't implement streaming, so use default implementation
yield self.invoke(input, config=config, stop=stop, **kwargs)
else:
config = config or {}
messages = self._convert_input(input).to_messages()
params = self._get_invocation_params(stop=stop, **kwargs)
options = {"stop": stop, **kwargs}
callback_manager = CallbackManager.configure(
config.get("callbacks"),
self.callbacks,
self.verbose,
config.get("tags"),
self.tags,
config.get("metadata"),
self.metadata,
)
(run_manager,) = callback_manager.on_chat_model_start(
dumpd(self), [messages], invocation_params=params, options=options
)
try:
message: Optional[BaseMessageChunk] = None
for chunk in self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
):
yield chunk.message
if message is None:
message = chunk.message
else:
message += chunk.message
assert message is not None
except (KeyboardInterrupt, Exception) as e:
run_manager.on_llm_error(e)
raise e
else:
run_manager.on_llm_end(
LLMResult(generations=[[ChatGeneration(message=message)]]),
)
async def astream(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> AsyncIterator[BaseMessageChunk]:
if type(self)._astream == BaseChatModel._astream:
# model doesn't implement streaming, so use default implementation
yield self.invoke(input, config=config, stop=stop, **kwargs)
else:
config = config or {}
messages = self._convert_input(input).to_messages()
params = self._get_invocation_params(stop=stop, **kwargs)
options = {"stop": stop, **kwargs}
callback_manager = AsyncCallbackManager.configure(
config.get("callbacks"),
self.callbacks,
self.verbose,
config.get("tags"),
self.tags,
config.get("metadata"),
self.metadata,
)
(run_manager,) = await callback_manager.on_chat_model_start(
dumpd(self), [messages], invocation_params=params, options=options
)
try:
message: Optional[BaseMessageChunk] = None
async for chunk in self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
):
yield chunk.message
if message is None:
message = chunk.message
else:
message += chunk.message
assert message is not None
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_llm_error(e)
raise e
else:
await run_manager.on_llm_end(
LLMResult(generations=[[ChatGeneration(message=message)]]),
)
# --- Custom methods ---
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
return {}
def _get_invocation_params(
self,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> dict:
params = self.dict()
params["stop"] = stop
return {**params, **kwargs}
def _get_llm_string(self, stop: Optional[List[str]] = None, **kwargs: Any) -> str:
if self.lc_serializable:
params = {**kwargs, **{"stop": stop}}
param_string = str(sorted([(k, v) for k, v in params.items()]))
llm_string = dumps(self)
return llm_string + "---" + param_string
else:
params = self._get_invocation_params(stop=stop, **kwargs)
params = {**params, **kwargs}
return str(sorted([(k, v) for k, v in params.items()]))
def generate(
self,
messages: List[List[BaseMessage]],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> LLMResult:
"""Top Level call"""
params = self._get_invocation_params(stop=stop, **kwargs)
options = {"stop": stop}
callback_manager = CallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
run_managers = callback_manager.on_chat_model_start(
dumpd(self), messages, invocation_params=params, options=options
)
results = []
for i, m in enumerate(messages):
try:
results.append(
self._generate_with_cache(
m,
stop=stop,
run_manager=run_managers[i] if run_managers else None,
**kwargs,
)
)
except (KeyboardInterrupt, Exception) as e:
if run_managers:
run_managers[i].on_llm_error(e)
raise e
flattened_outputs = [
LLMResult(generations=[res.generations], llm_output=res.llm_output)
for res in results
]
llm_output = self._combine_llm_outputs([res.llm_output for res in results])
generations = [res.generations for res in results]
output = LLMResult(generations=generations, llm_output=llm_output)
if run_managers:
run_infos = []
for manager, flattened_output in zip(run_managers, flattened_outputs):
manager.on_llm_end(flattened_output)
run_infos.append(RunInfo(run_id=manager.run_id))
output.run = run_infos
return output
async def agenerate(
self,
messages: List[List[BaseMessage]],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> LLMResult:
"""Top Level call"""
params = self._get_invocation_params(stop=stop, **kwargs)
options = {"stop": stop}
callback_manager = AsyncCallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
run_managers = await callback_manager.on_chat_model_start(
dumpd(self), messages, invocation_params=params, options=options
)
results = await asyncio.gather(
*[
self._agenerate_with_cache(
m,
stop=stop,
run_manager=run_managers[i] if run_managers else None,
**kwargs,
)
for i, m in enumerate(messages)
],
return_exceptions=True,
)
exceptions = []
for i, res in enumerate(results):
if isinstance(res, Exception):
if run_managers:
await run_managers[i].on_llm_error(res)
exceptions.append(res)
if exceptions:
if run_managers:
await asyncio.gather(
*[
run_manager.on_llm_end(
LLMResult(
generations=[res.generations], llm_output=res.llm_output
)
)
for run_manager, res in zip(run_managers, results)
if not isinstance(res, Exception)
]
)
raise exceptions[0]
flattened_outputs = [
LLMResult(generations=[res.generations], llm_output=res.llm_output)
for res in results
]
llm_output = self._combine_llm_outputs([res.llm_output for res in results])
generations = [res.generations for res in results]
output = LLMResult(generations=generations, llm_output=llm_output)
await asyncio.gather(
*[
run_manager.on_llm_end(flattened_output)
for run_manager, flattened_output in zip(
run_managers, flattened_outputs
)
]
)
if run_managers:
output.run = [
RunInfo(run_id=run_manager.run_id) for run_manager in run_managers
]
return output
def generate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
prompt_messages = [p.to_messages() for p in prompts]
return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
async def agenerate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
prompt_messages = [p.to_messages() for p in prompts]
return await self.agenerate(
prompt_messages, stop=stop, callbacks=callbacks, **kwargs
)
def _generate_with_cache(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
new_arg_supported = inspect.signature(self._generate).parameters.get(
"run_manager"
)
disregard_cache = self.cache is not None and not self.cache
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
if new_arg_supported:
return self._generate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
else:
return self._generate(messages, stop=stop, **kwargs)
else:
llm_string = self._get_llm_string(stop=stop, **kwargs)
prompt = dumps(messages)
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
return ChatResult(generations=cache_val)
else:
if new_arg_supported:
result = self._generate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
else:
result = self._generate(messages, stop=stop, **kwargs)
langchain.llm_cache.update(prompt, llm_string, result.generations)
return result
async def _agenerate_with_cache(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
new_arg_supported = inspect.signature(self._agenerate).parameters.get(
"run_manager"
)
disregard_cache = self.cache is not None and not self.cache
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
if new_arg_supported:
return await self._agenerate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
else:
return await self._agenerate(messages, stop=stop, **kwargs)
else:
llm_string = self._get_llm_string(stop=stop, **kwargs)
prompt = dumps(messages)
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
return ChatResult(generations=cache_val)
else:
if new_arg_supported:
result = await self._agenerate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
else:
result = await self._agenerate(messages, stop=stop, **kwargs)
langchain.llm_cache.update(prompt, llm_string, result.generations)
return result
@abstractmethod
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call"""
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call"""
raise NotImplementedError()
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
raise NotImplementedError()
def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
raise NotImplementedError()
def __call__(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseMessage:
generation = self.generate(
[messages], stop=stop, callbacks=callbacks, **kwargs
).generations[0][0]
if isinstance(generation, ChatGeneration):
return generation.message
else:
raise ValueError("Unexpected generation type")
async def _call_async(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> BaseMessage:
result = await self.agenerate(
[messages], stop=stop, callbacks=callbacks, **kwargs
)
generation = result.generations[0][0]
if isinstance(generation, ChatGeneration):
return generation.message
else:
raise ValueError("Unexpected generation type")
def call_as_llm(
self, message: str, stop: Optional[List[str]] = None, **kwargs: Any
) -> str:
return self.predict(message, stop=stop, **kwargs)
def predict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
result = self([HumanMessage(content=text)], stop=_stop, **kwargs)
return result.content
def predict_messages(
self,
messages: List[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
if stop is None:
_stop = None
else:
_stop = list(stop)
return self(messages, stop=_stop, **kwargs)
async def apredict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
result = await self._call_async(
[HumanMessage(content=text)], stop=_stop, **kwargs
)
return result.content
async def apredict_messages(
self,
messages: List[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
if stop is None:
_stop = None
else:
_stop = list(stop)
return await self._call_async(messages, stop=_stop, **kwargs)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {}
@property
@abstractmethod
def _llm_type(self) -> str:
"""Return type of chat model."""
def dict(self, **kwargs: Any) -> Dict:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
return starter_dict
class SimpleChatModel(BaseChatModel):
"""Simple Chat Model."""
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs)
message = AIMessage(content=output_str)
generation = ChatGeneration(message=message)
return ChatResult(generations=[generation])
@abstractmethod
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Simpler interface."""
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
func = partial(
self._generate, messages, stop=stop, run_manager=run_manager, **kwargs
)
return await asyncio.get_event_loop().run_in_executor(None, func)
| [
"langchain.llm_cache.lookup",
"langchain.schema.messages.HumanMessage",
"langchain.prompts.base.StringPromptValue",
"langchain.schema.messages.AIMessage",
"langchain.schema.ChatGeneration",
"langchain.load.dump.dumps",
"langchain.schema.RunInfo",
"langchain.llm_cache.update",
"langchain.pydantic_v1.Field",
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.ChatResult",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.pydantic_v1.root_validator",
"langchain.schema.LLMResult",
"langchain.load.dump.dumpd",
"langchain.prompts.chat.ChatPromptValue"
] | [((1364, 1401), 'langchain.pydantic_v1.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (1369, 1401), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1475, 1508), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1480, 1508), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1608, 1641), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1613, 1641), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1726, 1759), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1731, 1759), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1841, 1874), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (1846, 1874), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((1925, 1941), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (1939, 1941), False, 'from langchain.pydantic_v1 import Field, root_validator\n'), ((9835, 9947), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose', 'tags', 'self.tags', 'metadata', 'self.metadata'], {}), '(callbacks, self.callbacks, self.verbose, tags,\n self.tags, metadata, self.metadata)\n', (9860, 9947), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((11036, 11093), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (11045, 11093), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((11869, 11986), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose', 'tags', 'self.tags', 'metadata', 'self.metadata'], {}), '(callbacks, self.callbacks, self.verbose,\n tags, self.tags, metadata, self.metadata)\n', (11899, 11986), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((13721, 13778), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (13730, 13778), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((22871, 22900), 'langchain.schema.messages.AIMessage', 'AIMessage', ([], {'content': 'output_str'}), '(content=output_str)\n', (22880, 22900), False, 'from langchain.schema.messages import AIMessage, BaseMessage, BaseMessageChunk, HumanMessage\n'), ((22922, 22953), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'message'}), '(message=message)\n', (22936, 22953), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((22969, 23005), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': '[generation]'}), '(generations=[generation])\n', (22979, 23005), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((23517, 23596), 'functools.partial', 'partial', (['self._generate', 'messages'], {'stop': 'stop', 'run_manager': 'run_manager'}), '(self._generate, messages, stop=stop, run_manager=run_manager, **kwargs)\n', (23524, 23596), False, 'from functools import partial\n'), ((2132, 2234), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (2145, 2234), False, 'import warnings\n'), ((9118, 9129), 'langchain.load.dump.dumps', 'dumps', (['self'], {}), '(self)\n', (9123, 9129), False, 'from langchain.load.dump import dumpd, dumps\n'), ((10112, 10123), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (10117, 10123), False, 'from langchain.load.dump import dumpd, dumps\n'), ((10767, 10834), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': '[res.generations]', 'llm_output': 'res.llm_output'}), '(generations=[res.generations], llm_output=res.llm_output)\n', (10776, 10834), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((13452, 13519), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': '[res.generations]', 'llm_output': 'res.llm_output'}), '(generations=[res.generations], llm_output=res.llm_output)\n', (13461, 13519), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((16027, 16042), 'langchain.load.dump.dumps', 'dumps', (['messages'], {}), '(messages)\n', (16032, 16042), False, 'from langchain.load.dump import dumpd, dumps\n'), ((16067, 16113), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (16093, 16113), False, 'import langchain\n'), ((17747, 17762), 'langchain.load.dump.dumps', 'dumps', (['messages'], {}), '(messages)\n', (17752, 17762), False, 'from langchain.load.dump import dumpd, dumps\n'), ((17787, 17833), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (17813, 17833), False, 'import langchain\n'), ((2713, 2742), 'langchain.prompts.base.StringPromptValue', 'StringPromptValue', ([], {'text': 'input'}), '(text=input)\n', (2730, 2742), False, 'from langchain.prompts.base import StringPromptValue\n'), ((4629, 4679), 'typing.cast', 'cast', (['ChatGeneration', 'llm_result.generations[0][0]'], {}), '(ChatGeneration, llm_result.generations[0][0])\n', (4633, 4679), False, 'from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Sequence, cast\n'), ((5755, 5766), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (5760, 5766), False, 'from langchain.load.dump import dumpd, dumps\n'), ((12158, 12169), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (12163, 12169), False, 'from langchain.load.dump import dumpd, dumps\n'), ((14101, 14135), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (14108, 14135), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((16181, 16214), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': 'cache_val'}), '(generations=cache_val)\n', (16191, 16214), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((16530, 16596), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result.generations'], {}), '(prompt, llm_string, result.generations)\n', (16556, 16596), False, 'import langchain\n'), ((17901, 17934), 'langchain.schema.ChatResult', 'ChatResult', ([], {'generations': 'cache_val'}), '(generations=cache_val)\n', (17911, 17934), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((18264, 18330), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result.generations'], {}), '(prompt, llm_string, result.generations)\n', (18290, 18330), False, 'import langchain\n'), ((20898, 20924), 'langchain.schema.messages.HumanMessage', 'HumanMessage', ([], {'content': 'text'}), '(content=text)\n', (20910, 20924), False, 'from langchain.schema.messages import AIMessage, BaseMessage, BaseMessageChunk, HumanMessage\n'), ((2800, 2831), 'langchain.prompts.chat.ChatPromptValue', 'ChatPromptValue', ([], {'messages': 'input'}), '(messages=input)\n', (2815, 2831), False, 'from langchain.prompts.chat import ChatPromptValue\n'), ((4200, 4256), 'functools.partial', 'partial', (['self.invoke', 'input', 'config'], {'stop': 'stop'}), '(self.invoke, input, config, stop=stop, **kwargs)\n', (4207, 4256), False, 'from functools import partial\n'), ((7657, 7668), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (7662, 7668), False, 'from langchain.load.dump import dumpd, dumps\n'), ((11315, 11345), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'manager.run_id'}), '(run_id=manager.run_id)\n', (11322, 11345), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((15194, 15227), 'inspect.signature', 'inspect.signature', (['self._generate'], {}), '(self._generate)\n', (15211, 15227), False, 'import inspect\n'), ((16899, 16933), 'inspect.signature', 'inspect.signature', (['self._agenerate'], {}), '(self._agenerate)\n', (16916, 16933), False, 'import inspect\n'), ((21574, 21600), 'langchain.schema.messages.HumanMessage', 'HumanMessage', ([], {'content': 'text'}), '(content=text)\n', (21586, 21600), False, 'from langchain.schema.messages import AIMessage, BaseMessage, BaseMessageChunk, HumanMessage\n'), ((23640, 23664), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (23662, 23664), False, 'import asyncio\n'), ((4134, 4160), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (4158, 4160), False, 'import asyncio\n'), ((6521, 6552), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'message'}), '(message=message)\n', (6535, 6552), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((13049, 13116), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': '[res.generations]', 'llm_output': 'res.llm_output'}), '(generations=[res.generations], llm_output=res.llm_output)\n', (13058, 13116), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n'), ((8442, 8473), 'langchain.schema.ChatGeneration', 'ChatGeneration', ([], {'message': 'message'}), '(message=message)\n', (8456, 8473), False, 'from langchain.schema import ChatGeneration, ChatResult, LLMResult, PromptValue, RunInfo\n')] |
import os
from transformers import AutoTokenizer
from configs import (
EMBEDDING_MODEL,
KB_ROOT_PATH,
CHUNK_SIZE,
OVERLAP_SIZE,
ZH_TITLE_ENHANCE,
logger,
log_verbose,
text_splitter_dict,
LLM_MODEL,
TEXT_SPLITTER_NAME,
)
import importlib
from text_splitter import zh_title_enhance as func_zh_title_enhance
import langchain.document_loaders
from langchain.docstore.document import Document
from langchain.text_splitter import TextSplitter
from pathlib import Path
import json
from concurrent.futures import ThreadPoolExecutor
from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config
import io
from typing import List, Union, Callable, Dict, Optional, Tuple, Generator
import chardet
def validate_kb_name(knowledge_base_id: str) -> bool:
# 检查是否包含预期外的字符或路径攻击关键字
if "../" in knowledge_base_id:
return False
return True
def get_kb_path(knowledge_base_name: str):
return os.path.join(KB_ROOT_PATH, knowledge_base_name)
def get_doc_path(knowledge_base_name: str):
return os.path.join(get_kb_path(knowledge_base_name), "content")
def get_vs_path(knowledge_base_name: str, vector_name: str):
return os.path.join(get_kb_path(knowledge_base_name), vector_name)
def get_file_path(knowledge_base_name: str, doc_name: str):
return os.path.join(get_doc_path(knowledge_base_name), doc_name)
def list_kbs_from_folder():
return [f for f in os.listdir(KB_ROOT_PATH)
if os.path.isdir(os.path.join(KB_ROOT_PATH, f))]
def list_files_from_folder(kb_name: str):
doc_path = get_doc_path(kb_name)
return [file for file in os.listdir(doc_path)
if os.path.isfile(os.path.join(doc_path, file))]
def load_embeddings(model: str = EMBEDDING_MODEL, device: str = embedding_device()):
'''
从缓存中加载embeddings,可以避免多线程时竞争加载。
'''
from server.knowledge_base.kb_cache.base import embeddings_pool
return embeddings_pool.load_embeddings(model=model, device=device)
LOADER_DICT = {"UnstructuredHTMLLoader": ['.html'],
"UnstructuredMarkdownLoader": ['.md'],
"CustomJSONLoader": [".json"],
"CSVLoader": [".csv"],
"RapidOCRPDFLoader": [".pdf"],
"RapidOCRLoader": ['.png', '.jpg', '.jpeg', '.bmp'],
"UnstructuredFileLoader": ['.eml', '.msg', '.rst',
'.rtf', '.txt', '.xml',
'.docx', '.epub', '.odt',
'.ppt', '.pptx', '.tsv'],
}
SUPPORTED_EXTS = [ext for sublist in LOADER_DICT.values() for ext in sublist]
class CustomJSONLoader(langchain.document_loaders.JSONLoader):
'''
langchain的JSONLoader需要jq,在win上使用不便,进行替代。针对langchain==0.0.286
'''
def __init__(
self,
file_path: Union[str, Path],
content_key: Optional[str] = None,
metadata_func: Optional[Callable[[Dict, Dict], Dict]] = None,
text_content: bool = True,
json_lines: bool = False,
):
"""Initialize the JSONLoader.
Args:
file_path (Union[str, Path]): The path to the JSON or JSON Lines file.
content_key (str): The key to use to extract the content from the JSON if
results to a list of objects (dict).
metadata_func (Callable[Dict, Dict]): A function that takes in the JSON
object extracted by the jq_schema and the default metadata and returns
a dict of the updated metadata.
text_content (bool): Boolean flag to indicate whether the content is in
string format, default to True.
json_lines (bool): Boolean flag to indicate whether the input is in
JSON Lines format.
"""
self.file_path = Path(file_path).resolve()
self._content_key = content_key
self._metadata_func = metadata_func
self._text_content = text_content
self._json_lines = json_lines
def _parse(self, content: str, docs: List[Document]) -> None:
"""Convert given content to documents."""
data = json.loads(content)
# Perform some validation
# This is not a perfect validation, but it should catch most cases
# and prevent the user from getting a cryptic error later on.
if self._content_key is not None:
self._validate_content_key(data)
if self._metadata_func is not None:
self._validate_metadata_func(data)
for i, sample in enumerate(data, len(docs) + 1):
text = self._get_text(sample=sample)
metadata = self._get_metadata(
sample=sample, source=str(self.file_path), seq_num=i
)
docs.append(Document(page_content=text, metadata=metadata))
langchain.document_loaders.CustomJSONLoader = CustomJSONLoader
def get_LoaderClass(file_extension):
for LoaderClass, extensions in LOADER_DICT.items():
if file_extension in extensions:
return LoaderClass
# 把一些向量化共用逻辑从KnowledgeFile抽取出来,等langchain支持内存文件的时候,可以将非磁盘文件向量化
def get_loader(loader_name: str, file_path_or_content: Union[str, bytes, io.StringIO, io.BytesIO]):
'''
根据loader_name和文件路径或内容返回文档加载器。
'''
try:
if loader_name in ["RapidOCRPDFLoader", "RapidOCRLoader"]:
document_loaders_module = importlib.import_module('document_loaders')
else:
document_loaders_module = importlib.import_module('langchain.document_loaders')
DocumentLoader = getattr(document_loaders_module, loader_name)
except Exception as e:
msg = f"为文件{file_path_or_content}查找加载器{loader_name}时出错:{e}"
logger.error(f'{e.__class__.__name__}: {msg}',
exc_info=e if log_verbose else None)
document_loaders_module = importlib.import_module('langchain.document_loaders')
DocumentLoader = getattr(document_loaders_module, "UnstructuredFileLoader")
if loader_name == "UnstructuredFileLoader":
loader = DocumentLoader(file_path_or_content, autodetect_encoding=True)
elif loader_name == "CSVLoader":
# 自动识别文件编码类型,避免langchain loader 加载文件报编码错误
with open(file_path_or_content, 'rb') as struct_file:
encode_detect = chardet.detect(struct_file.read())
if encode_detect:
loader = DocumentLoader(file_path_or_content, encoding=encode_detect["encoding"])
else:
loader = DocumentLoader(file_path_or_content, encoding="utf-8")
elif loader_name == "JSONLoader":
loader = DocumentLoader(file_path_or_content, jq_schema=".", text_content=False)
elif loader_name == "CustomJSONLoader":
loader = DocumentLoader(file_path_or_content, text_content=False)
elif loader_name == "UnstructuredMarkdownLoader":
loader = DocumentLoader(file_path_or_content, mode="elements")
elif loader_name == "UnstructuredHTMLLoader":
loader = DocumentLoader(file_path_or_content, mode="elements")
else:
loader = DocumentLoader(file_path_or_content)
return loader
def make_text_splitter(
splitter_name: str = TEXT_SPLITTER_NAME,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
llm_model: str = LLM_MODEL,
):
"""
根据参数获取特定的分词器
"""
splitter_name = splitter_name or "SpacyTextSplitter"
try:
if splitter_name == "MarkdownHeaderTextSplitter": # MarkdownHeaderTextSplitter特殊判定
headers_to_split_on = text_splitter_dict[splitter_name]['headers_to_split_on']
text_splitter = langchain.text_splitter.MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on)
else:
try: ## 优先使用用户自定义的text_splitter
text_splitter_module = importlib.import_module('text_splitter')
TextSplitter = getattr(text_splitter_module, splitter_name)
except: ## 否则使用langchain的text_splitter
text_splitter_module = importlib.import_module('langchain.text_splitter')
TextSplitter = getattr(text_splitter_module, splitter_name)
if text_splitter_dict[splitter_name]["source"] == "tiktoken": ## 从tiktoken加载
try:
text_splitter = TextSplitter.from_tiktoken_encoder(
encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"],
pipeline="zh_core_web_sm",
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
except:
text_splitter = TextSplitter.from_tiktoken_encoder(
encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"],
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
elif text_splitter_dict[splitter_name]["source"] == "huggingface": ## 从huggingface加载
if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "":
config = get_model_worker_config(llm_model)
text_splitter_dict[splitter_name]["tokenizer_name_or_path"] = \
config.get("model_path")
if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "gpt2":
from transformers import GPT2TokenizerFast
from langchain.text_splitter import CharacterTextSplitter
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
else: ## 字符长度加载
tokenizer = AutoTokenizer.from_pretrained(
text_splitter_dict[splitter_name]["tokenizer_name_or_path"],
trust_remote_code=True)
text_splitter = TextSplitter.from_huggingface_tokenizer(
tokenizer=tokenizer,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
else:
try:
text_splitter = TextSplitter(
pipeline="zh_core_web_sm",
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
except:
text_splitter = TextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
except Exception as e:
print(e)
text_splitter_module = importlib.import_module('langchain.text_splitter')
TextSplitter = getattr(text_splitter_module, "RecursiveCharacterTextSplitter")
text_splitter = TextSplitter(chunk_size=250, chunk_overlap=50)
return text_splitter
class KnowledgeFile:
def __init__(
self,
filename: str,
knowledge_base_name: str
):
'''
对应知识库目录中的文件,必须是磁盘上存在的才能进行向量化等操作。
'''
self.kb_name = knowledge_base_name
self.filename = filename
self.ext = os.path.splitext(filename)[-1].lower()
if self.ext not in SUPPORTED_EXTS:
raise ValueError(f"暂未支持的文件格式 {self.ext}")
self.filepath = get_file_path(knowledge_base_name, filename)
self.docs = None
self.splited_docs = None
self.document_loader_name = get_LoaderClass(self.ext)
self.text_splitter_name = TEXT_SPLITTER_NAME
def file2docs(self, refresh: bool=False):
if self.docs is None or refresh:
logger.info(f"{self.document_loader_name} used for {self.filepath}")
loader = get_loader(self.document_loader_name, self.filepath)
self.docs = loader.load()
return self.docs
def docs2texts(
self,
docs: List[Document] = None,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
refresh: bool = False,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
text_splitter: TextSplitter = None,
):
docs = docs or self.file2docs(refresh=refresh)
if not docs:
return []
if self.ext not in [".csv"]:
if text_splitter is None:
text_splitter = make_text_splitter(splitter_name=self.text_splitter_name, chunk_size=chunk_size, chunk_overlap=chunk_overlap)
if self.text_splitter_name == "MarkdownHeaderTextSplitter":
docs = text_splitter.split_text(docs[0].page_content)
for doc in docs:
# 如果文档有元数据
if doc.metadata:
doc.metadata["source"] = os.path.basename(self.filepath)
else:
docs = text_splitter.split_documents(docs)
print(f"文档切分示例:{docs[0]}")
if zh_title_enhance:
docs = func_zh_title_enhance(docs)
self.splited_docs = docs
return self.splited_docs
def file2text(
self,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
refresh: bool = False,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
text_splitter: TextSplitter = None,
):
if self.splited_docs is None or refresh:
docs = self.file2docs()
self.splited_docs = self.docs2texts(docs=docs,
zh_title_enhance=zh_title_enhance,
refresh=refresh,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
text_splitter=text_splitter)
return self.splited_docs
def file_exist(self):
return os.path.isfile(self.filepath)
def get_mtime(self):
return os.path.getmtime(self.filepath)
def get_size(self):
return os.path.getsize(self.filepath)
def files2docs_in_thread(
files: List[Union[KnowledgeFile, Tuple[str, str], Dict]],
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
pool: ThreadPoolExecutor = None,
) -> Generator:
'''
利用多线程批量将磁盘文件转化成langchain Document.
如果传入参数是Tuple,形式为(filename, kb_name)
生成器返回值为 status, (kb_name, file_name, docs | error)
'''
def file2docs(*, file: KnowledgeFile, **kwargs) -> Tuple[bool, Tuple[str, str, List[Document]]]:
try:
return True, (file.kb_name, file.filename, file.file2text(**kwargs))
except Exception as e:
msg = f"从文件 {file.kb_name}/{file.filename} 加载文档时出错:{e}"
logger.error(f'{e.__class__.__name__}: {msg}',
exc_info=e if log_verbose else None)
return False, (file.kb_name, file.filename, msg)
kwargs_list = []
for i, file in enumerate(files):
kwargs = {}
try:
if isinstance(file, tuple) and len(file) >= 2:
filename=file[0]
kb_name=file[1]
file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name)
elif isinstance(file, dict):
filename = file.pop("filename")
kb_name = file.pop("kb_name")
kwargs.update(file)
file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name)
kwargs["file"] = file
kwargs["chunk_size"] = chunk_size
kwargs["chunk_overlap"] = chunk_overlap
kwargs["zh_title_enhance"] = zh_title_enhance
kwargs_list.append(kwargs)
except Exception as e:
yield False, (kb_name, filename, str(e))
for result in run_in_thread_pool(func=file2docs, params=kwargs_list, pool=pool):
yield result
if __name__ == "__main__":
from pprint import pprint
kb_file = KnowledgeFile(filename="test.txt", knowledge_base_name="samples")
# kb_file.text_splitter_name = "RecursiveCharacterTextSplitter"
docs = kb_file.file2docs()
pprint(docs[-1])
docs = kb_file.file2text()
pprint(docs[-1])
| [
"langchain.docstore.document.Document",
"langchain.text_splitter.TextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.TextSplitter",
"langchain.text_splitter.TextSplitter.from_huggingface_tokenizer"
] | [((964, 1011), 'os.path.join', 'os.path.join', (['KB_ROOT_PATH', 'knowledge_base_name'], {}), '(KB_ROOT_PATH, knowledge_base_name)\n', (976, 1011), False, 'import os\n'), ((1789, 1807), 'server.utils.embedding_device', 'embedding_device', ([], {}), '()\n', (1805, 1807), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((1940, 1999), 'server.knowledge_base.kb_cache.base.embeddings_pool.load_embeddings', 'embeddings_pool.load_embeddings', ([], {'model': 'model', 'device': 'device'}), '(model=model, device=device)\n', (1971, 1999), False, 'from server.knowledge_base.kb_cache.base import embeddings_pool\n'), ((15816, 15881), 'server.utils.run_in_thread_pool', 'run_in_thread_pool', ([], {'func': 'file2docs', 'params': 'kwargs_list', 'pool': 'pool'}), '(func=file2docs, params=kwargs_list, pool=pool)\n', (15834, 15881), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((16147, 16163), 'pprint.pprint', 'pprint', (['docs[-1]'], {}), '(docs[-1])\n', (16153, 16163), False, 'from pprint import pprint\n'), ((16200, 16216), 'pprint.pprint', 'pprint', (['docs[-1]'], {}), '(docs[-1])\n', (16206, 16216), False, 'from pprint import pprint\n'), ((4174, 4193), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (4184, 4193), False, 'import json\n'), ((13844, 13873), 'os.path.isfile', 'os.path.isfile', (['self.filepath'], {}), '(self.filepath)\n', (13858, 13873), False, 'import os\n'), ((13915, 13946), 'os.path.getmtime', 'os.path.getmtime', (['self.filepath'], {}), '(self.filepath)\n', (13931, 13946), False, 'import os\n'), ((13987, 14017), 'os.path.getsize', 'os.path.getsize', (['self.filepath'], {}), '(self.filepath)\n', (14002, 14017), False, 'import os\n'), ((1445, 1469), 'os.listdir', 'os.listdir', (['KB_ROOT_PATH'], {}), '(KB_ROOT_PATH)\n', (1455, 1469), False, 'import os\n'), ((1641, 1661), 'os.listdir', 'os.listdir', (['doc_path'], {}), '(doc_path)\n', (1651, 1661), False, 'import os\n'), ((5418, 5461), 'importlib.import_module', 'importlib.import_module', (['"""document_loaders"""'], {}), "('document_loaders')\n", (5441, 5461), False, 'import importlib\n'), ((5514, 5567), 'importlib.import_module', 'importlib.import_module', (['"""langchain.document_loaders"""'], {}), "('langchain.document_loaders')\n", (5537, 5567), False, 'import importlib\n'), ((5742, 5829), 'configs.logger.error', 'logger.error', (['f"""{e.__class__.__name__}: {msg}"""'], {'exc_info': '(e if log_verbose else None)'}), "(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else\n None)\n", (5754, 5829), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((5881, 5934), 'importlib.import_module', 'importlib.import_module', (['"""langchain.document_loaders"""'], {}), "('langchain.document_loaders')\n", (5904, 5934), False, 'import importlib\n'), ((10617, 10667), 'importlib.import_module', 'importlib.import_module', (['"""langchain.text_splitter"""'], {}), "('langchain.text_splitter')\n", (10640, 10667), False, 'import importlib\n'), ((10779, 10825), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': '(250)', 'chunk_overlap': '(50)'}), '(chunk_size=250, chunk_overlap=50)\n', (10791, 10825), False, 'from langchain.text_splitter import TextSplitter\n'), ((11618, 11686), 'configs.logger.info', 'logger.info', (['f"""{self.document_loader_name} used for {self.filepath}"""'], {}), "(f'{self.document_loader_name} used for {self.filepath}')\n", (11629, 11686), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((12910, 12937), 'text_splitter.zh_title_enhance', 'func_zh_title_enhance', (['docs'], {}), '(docs)\n', (12931, 12937), True, 'from text_splitter import zh_title_enhance as func_zh_title_enhance\n'), ((1499, 1528), 'os.path.join', 'os.path.join', (['KB_ROOT_PATH', 'f'], {}), '(KB_ROOT_PATH, f)\n', (1511, 1528), False, 'import os\n'), ((1692, 1720), 'os.path.join', 'os.path.join', (['doc_path', 'file'], {}), '(doc_path, file)\n', (1704, 1720), False, 'import os\n'), ((3852, 3867), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (3856, 3867), False, 'from pathlib import Path\n'), ((4809, 4855), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (4817, 4855), False, 'from langchain.docstore.document import Document\n'), ((7841, 7881), 'importlib.import_module', 'importlib.import_module', (['"""text_splitter"""'], {}), "('text_splitter')\n", (7864, 7881), False, 'import importlib\n'), ((14757, 14844), 'configs.logger.error', 'logger.error', (['f"""{e.__class__.__name__}: {msg}"""'], {'exc_info': '(e if log_verbose else None)'}), "(f'{e.__class__.__name__}: {msg}', exc_info=e if log_verbose else\n None)\n", (14769, 14844), False, 'from configs import EMBEDDING_MODEL, KB_ROOT_PATH, CHUNK_SIZE, OVERLAP_SIZE, ZH_TITLE_ENHANCE, logger, log_verbose, text_splitter_dict, LLM_MODEL, TEXT_SPLITTER_NAME\n'), ((8049, 8099), 'importlib.import_module', 'importlib.import_module', (['"""langchain.text_splitter"""'], {}), "('langchain.text_splitter')\n", (8072, 8099), False, 'import importlib\n'), ((8324, 8521), 'langchain.text_splitter.TextSplitter.from_tiktoken_encoder', 'TextSplitter.from_tiktoken_encoder', ([], {'encoding_name': "text_splitter_dict[splitter_name]['tokenizer_name_or_path']", 'pipeline': '"""zh_core_web_sm"""', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(encoding_name=text_splitter_dict[\n splitter_name]['tokenizer_name_or_path'], pipeline='zh_core_web_sm',\n chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n", (8358, 8521), False, 'from langchain.text_splitter import TextSplitter\n'), ((9895, 10012), 'langchain.text_splitter.TextSplitter.from_huggingface_tokenizer', 'TextSplitter.from_huggingface_tokenizer', ([], {'tokenizer': 'tokenizer', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(tokenizer=tokenizer, chunk_size=\n chunk_size, chunk_overlap=chunk_overlap)\n', (9934, 10012), False, 'from langchain.text_splitter import TextSplitter\n'), ((11140, 11166), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (11156, 11166), False, 'import os\n'), ((8691, 8861), 'langchain.text_splitter.TextSplitter.from_tiktoken_encoder', 'TextSplitter.from_tiktoken_encoder', ([], {'encoding_name': "text_splitter_dict[splitter_name]['tokenizer_name_or_path']", 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(encoding_name=text_splitter_dict[\n splitter_name]['tokenizer_name_or_path'], chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n", (8725, 8861), False, 'from langchain.text_splitter import TextSplitter\n'), ((9160, 9194), 'server.utils.get_model_worker_config', 'get_model_worker_config', (['llm_model'], {}), '(llm_model)\n', (9183, 9194), False, 'from server.utils import run_in_thread_pool, embedding_device, get_model_worker_config\n'), ((9592, 9633), 'transformers.GPT2TokenizerFast.from_pretrained', 'GPT2TokenizerFast.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (9625, 9633), False, 'from transformers import GPT2TokenizerFast\n'), ((9699, 9818), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (["text_splitter_dict[splitter_name]['tokenizer_name_or_path']"], {'trust_remote_code': '(True)'}), "(text_splitter_dict[splitter_name][\n 'tokenizer_name_or_path'], trust_remote_code=True)\n", (9728, 9818), False, 'from transformers import AutoTokenizer\n'), ((10161, 10256), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'pipeline': '"""zh_core_web_sm"""', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(pipeline='zh_core_web_sm', chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n", (10173, 10256), False, 'from langchain.text_splitter import TextSplitter\n'), ((12717, 12748), 'os.path.basename', 'os.path.basename', (['self.filepath'], {}), '(self.filepath)\n', (12733, 12748), False, 'import os\n'), ((10407, 10471), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (10419, 10471), False, 'from langchain.text_splitter import TextSplitter\n')] |
"""Push and pull to the LangChain Hub."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from langchain.load.dump import dumps
from langchain.load.load import loads
if TYPE_CHECKING:
from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client:
try:
from langchainhub import Client
except ImportError as e:
raise ImportError(
"Could not import langchainhub, please install with `pip install "
"langchainhub`."
) from e
# Client logic will also attempt to load URL/key from environment variables
return Client(api_url, api_key=api_key)
def push(
repo_full_name: str,
object: Any,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = "latest",
new_repo_is_public: bool = False,
new_repo_description: str = "",
) -> str:
"""
Pushes an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the repo to push to in the format of
`owner/repo`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the repo should be public. Defaults to
False (Private by default).
:param new_repo_description: The description of the repo. Defaults to an empty
string.
"""
client = _get_client(api_url=api_url, api_key=api_key)
manifest_json = dumps(object)
message = client.push(
repo_full_name,
manifest_json,
parent_commit_hash=parent_commit_hash,
new_repo_is_public=new_repo_is_public,
new_repo_description=new_repo_description,
)
return message
def pull(
owner_repo_commit: str,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pulls an object from the hub and returns it as a LangChain object.
:param owner_repo_commit: The full name of the repo to pull from in the format of
`owner/repo:commit_hash`.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
"""
client = _get_client(api_url=api_url, api_key=api_key)
resp: str = client.pull(owner_repo_commit)
return loads(resp)
| [
"langchainhub.Client",
"langchain.load.load.loads",
"langchain.load.dump.dumps"
] | [((671, 703), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (677, 703), False, 'from langchainhub import Client\n'), ((1907, 1920), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1912, 1920), False, 'from langchain.load.dump import dumps\n'), ((2857, 2868), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (2862, 2868), False, 'from langchain.load.load import loads\n')] |
"""Push and pull to the LangChain Hub."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from langchain.load.dump import dumps
from langchain.load.load import loads
if TYPE_CHECKING:
from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client:
try:
from langchainhub import Client
except ImportError as e:
raise ImportError(
"Could not import langchainhub, please install with `pip install "
"langchainhub`."
) from e
# Client logic will also attempt to load URL/key from environment variables
return Client(api_url, api_key=api_key)
def push(
repo_full_name: str,
object: Any,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = "latest",
new_repo_is_public: bool = False,
new_repo_description: str = "",
) -> str:
"""
Pushes an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the repo to push to in the format of
`owner/repo`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the repo should be public. Defaults to
False (Private by default).
:param new_repo_description: The description of the repo. Defaults to an empty
string.
"""
client = _get_client(api_url=api_url, api_key=api_key)
manifest_json = dumps(object)
message = client.push(
repo_full_name,
manifest_json,
parent_commit_hash=parent_commit_hash,
new_repo_is_public=new_repo_is_public,
new_repo_description=new_repo_description,
)
return message
def pull(
owner_repo_commit: str,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pulls an object from the hub and returns it as a LangChain object.
:param owner_repo_commit: The full name of the repo to pull from in the format of
`owner/repo:commit_hash`.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
"""
client = _get_client(api_url=api_url, api_key=api_key)
resp: str = client.pull(owner_repo_commit)
return loads(resp)
| [
"langchainhub.Client",
"langchain.load.load.loads",
"langchain.load.dump.dumps"
] | [((671, 703), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (677, 703), False, 'from langchainhub import Client\n'), ((1907, 1920), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1912, 1920), False, 'from langchain.load.dump import dumps\n'), ((2857, 2868), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (2862, 2868), False, 'from langchain.load.load import loads\n')] |
"""Push and pull to the LangChain Hub."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from langchain.load.dump import dumps
from langchain.load.load import loads
if TYPE_CHECKING:
from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client:
try:
from langchainhub import Client
except ImportError as e:
raise ImportError(
"Could not import langchainhub, please install with `pip install "
"langchainhub`."
) from e
# Client logic will also attempt to load URL/key from environment variables
return Client(api_url, api_key=api_key)
def push(
repo_full_name: str,
object: Any,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = "latest",
new_repo_is_public: bool = False,
new_repo_description: str = "",
) -> str:
"""
Pushes an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the repo to push to in the format of
`owner/repo`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the repo should be public. Defaults to
False (Private by default).
:param new_repo_description: The description of the repo. Defaults to an empty
string.
"""
client = _get_client(api_url=api_url, api_key=api_key)
manifest_json = dumps(object)
message = client.push(
repo_full_name,
manifest_json,
parent_commit_hash=parent_commit_hash,
new_repo_is_public=new_repo_is_public,
new_repo_description=new_repo_description,
)
return message
def pull(
owner_repo_commit: str,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pulls an object from the hub and returns it as a LangChain object.
:param owner_repo_commit: The full name of the repo to pull from in the format of
`owner/repo:commit_hash`.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
"""
client = _get_client(api_url=api_url, api_key=api_key)
resp: str = client.pull(owner_repo_commit)
return loads(resp)
| [
"langchainhub.Client",
"langchain.load.load.loads",
"langchain.load.dump.dumps"
] | [((671, 703), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (677, 703), False, 'from langchainhub import Client\n'), ((1907, 1920), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1912, 1920), False, 'from langchain.load.dump import dumps\n'), ((2857, 2868), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (2862, 2868), False, 'from langchain.load.load import loads\n')] |
"""Push and pull to the LangChain Hub."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from langchain.load.dump import dumps
from langchain.load.load import loads
if TYPE_CHECKING:
from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client:
try:
from langchainhub import Client
except ImportError as e:
raise ImportError(
"Could not import langchainhub, please install with `pip install "
"langchainhub`."
) from e
# Client logic will also attempt to load URL/key from environment variables
return Client(api_url, api_key=api_key)
def push(
repo_full_name: str,
object: Any,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = "latest",
new_repo_is_public: bool = False,
new_repo_description: str = "",
) -> str:
"""
Pushes an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the repo to push to in the format of
`owner/repo`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the repo should be public. Defaults to
False (Private by default).
:param new_repo_description: The description of the repo. Defaults to an empty
string.
"""
client = _get_client(api_url=api_url, api_key=api_key)
manifest_json = dumps(object)
message = client.push(
repo_full_name,
manifest_json,
parent_commit_hash=parent_commit_hash,
new_repo_is_public=new_repo_is_public,
new_repo_description=new_repo_description,
)
return message
def pull(
owner_repo_commit: str,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pulls an object from the hub and returns it as a LangChain object.
:param owner_repo_commit: The full name of the repo to pull from in the format of
`owner/repo:commit_hash`.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
"""
client = _get_client(api_url=api_url, api_key=api_key)
resp: str = client.pull(owner_repo_commit)
return loads(resp)
| [
"langchainhub.Client",
"langchain.load.load.loads",
"langchain.load.dump.dumps"
] | [((671, 703), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (677, 703), False, 'from langchainhub import Client\n'), ((1907, 1920), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1912, 1920), False, 'from langchain.load.dump import dumps\n'), ((2857, 2868), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (2862, 2868), False, 'from langchain.load.load import loads\n')] |
import langchain_visualizer # isort:skip # noqa: F401
import asyncio
import vcr_langchain as vcr
from langchain import PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import OpenAI
# ========================== Start of langchain example code ==========================
# https://langchain.readthedocs.io/en/latest/modules/chains/getting_started.html
llm = OpenAI(temperature=0)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
chain = LLMChain(llm=llm, prompt=prompt)
# ================================== Execute example ==================================
@vcr.use_cassette()
async def llm_chain_demo():
return chain.run("colorful socks")
def test_llm_usage_succeeds():
"""Check that the chain can run normally"""
result = asyncio.get_event_loop().run_until_complete(llm_chain_demo())
assert result.strip() == "Socktastic!"
if __name__ == "__main__":
from langchain_visualizer import visualize
visualize(llm_chain_demo)
| [
"langchain_visualizer.visualize",
"langchain.llms.OpenAI",
"langchain.chains.LLMChain",
"langchain.PromptTemplate"
] | [((387, 408), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (393, 408), False, 'from langchain.llms import OpenAI\n'), ((418, 534), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['product']", 'template': '"""What is a good name for a company that makes {product}?"""'}), "(input_variables=['product'], template=\n 'What is a good name for a company that makes {product}?')\n", (432, 534), False, 'from langchain import PromptTemplate\n'), ((550, 582), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (558, 582), False, 'from langchain.chains import LLMChain\n'), ((676, 694), 'vcr_langchain.use_cassette', 'vcr.use_cassette', ([], {}), '()\n', (692, 694), True, 'import vcr_langchain as vcr\n'), ((1042, 1067), 'langchain_visualizer.visualize', 'visualize', (['llm_chain_demo'], {}), '(llm_chain_demo)\n', (1051, 1067), False, 'from langchain_visualizer import visualize\n'), ((856, 880), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (878, 880), False, 'import asyncio\n')] |
"""Test logic on base chain class."""
from typing import Any, Dict, List, Optional
import pytest
from langchain.callbacks.base import CallbackManager
from langchain.chains.base import Chain
from langchain.schema import BaseMemory
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
class FakeMemory(BaseMemory):
"""Fake memory class for testing purposes."""
@property
def memory_variables(self) -> List[str]:
"""Return baz variable."""
return ["baz"]
def load_memory_variables(
self, inputs: Optional[Dict[str, Any]] = None
) -> Dict[str, str]:
"""Return baz variable."""
return {"baz": "foo"}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Pass."""
pass
def clear(self) -> None:
"""Pass."""
pass
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: List[str] = ["foo"]
the_output_keys: List[str] = ["bar"]
@property
def input_keys(self) -> List[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> List[str]:
"""Output key of bar."""
return self.the_output_keys
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
if self.be_correct:
return {"bar": "baz"}
else:
return {"baz": "bar"}
def test_bad_inputs() -> None:
"""Test errors are raised if input keys are not found."""
chain = FakeChain()
with pytest.raises(ValueError):
chain({"foobar": "baz"})
def test_bad_outputs() -> None:
"""Test errors are raised if outputs keys are not found."""
chain = FakeChain(be_correct=False)
with pytest.raises(ValueError):
chain({"foo": "baz"})
def test_correct_call() -> None:
"""Test correct call of fake chain."""
chain = FakeChain()
output = chain({"foo": "bar"})
assert output == {"foo": "bar", "bar": "baz"}
def test_single_input_correct() -> None:
"""Test passing single input works."""
chain = FakeChain()
output = chain("bar")
assert output == {"foo": "bar", "bar": "baz"}
def test_single_input_error() -> None:
"""Test passing single input errors as expected."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain("bar")
def test_run_single_arg() -> None:
"""Test run method with single arg."""
chain = FakeChain()
output = chain.run("bar")
assert output == "baz"
def test_run_multiple_args_error() -> None:
"""Test run method with multiple args errors as expected."""
chain = FakeChain()
with pytest.raises(ValueError):
chain.run("bar", "foo")
def test_run_kwargs() -> None:
"""Test run method with kwargs."""
chain = FakeChain(the_input_keys=["foo", "bar"])
output = chain.run(foo="bar", bar="foo")
assert output == "baz"
def test_run_kwargs_error() -> None:
"""Test run method with kwargs errors as expected."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain.run(foo="bar", baz="foo")
def test_run_args_and_kwargs_error() -> None:
"""Test run method with args and kwargs."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain.run("bar", foo="bar")
def test_multiple_output_keys_error() -> None:
"""Test run with multiple output keys errors as expected."""
chain = FakeChain(the_output_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain.run("bar")
def test_run_arg_with_memory() -> None:
"""Test run method works when arg is passed."""
chain = FakeChain(the_input_keys=["foo", "baz"], memory=FakeMemory())
chain.run("bar")
def test_run_with_callback() -> None:
"""Test run method works when callback manager is passed."""
handler = FakeCallbackHandler()
chain = FakeChain(
callback_manager=CallbackManager(handlers=[handler]), verbose=True
)
output = chain.run("bar")
assert output == "baz"
assert handler.starts == 1
assert handler.ends == 1
assert handler.errors == 0
def test_run_with_callback_not_verbose() -> None:
"""Test run method works when callback manager is passed and not verbose."""
import langchain
langchain.verbose = False
handler = FakeCallbackHandler()
chain = FakeChain(callback_manager=CallbackManager(handlers=[handler]))
output = chain.run("bar")
assert output == "baz"
assert handler.starts == 0
assert handler.ends == 0
assert handler.errors == 0
| [
"langchain.callbacks.base.CallbackManager"
] | [((3986, 4007), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4005, 4007), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((4460, 4481), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4479, 4481), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((1597, 1622), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1610, 1622), False, 'import pytest\n'), ((1804, 1829), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1817, 1829), False, 'import pytest\n'), ((2393, 2418), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2406, 2418), False, 'import pytest\n'), ((2746, 2771), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2759, 2771), False, 'import pytest\n'), ((3161, 3186), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3174, 3186), False, 'import pytest\n'), ((3386, 3411), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3399, 3411), False, 'import pytest\n'), ((3626, 3651), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3639, 3651), False, 'import pytest\n'), ((4056, 4091), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4071, 4091), False, 'from langchain.callbacks.base import CallbackManager\n'), ((4521, 4556), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4536, 4556), False, 'from langchain.callbacks.base import CallbackManager\n')] |
"""Test logic on base chain class."""
from typing import Any, Dict, List, Optional
import pytest
from langchain.callbacks.base import CallbackManager
from langchain.chains.base import Chain
from langchain.schema import BaseMemory
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
class FakeMemory(BaseMemory):
"""Fake memory class for testing purposes."""
@property
def memory_variables(self) -> List[str]:
"""Return baz variable."""
return ["baz"]
def load_memory_variables(
self, inputs: Optional[Dict[str, Any]] = None
) -> Dict[str, str]:
"""Return baz variable."""
return {"baz": "foo"}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Pass."""
pass
def clear(self) -> None:
"""Pass."""
pass
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: List[str] = ["foo"]
the_output_keys: List[str] = ["bar"]
@property
def input_keys(self) -> List[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> List[str]:
"""Output key of bar."""
return self.the_output_keys
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
if self.be_correct:
return {"bar": "baz"}
else:
return {"baz": "bar"}
def test_bad_inputs() -> None:
"""Test errors are raised if input keys are not found."""
chain = FakeChain()
with pytest.raises(ValueError):
chain({"foobar": "baz"})
def test_bad_outputs() -> None:
"""Test errors are raised if outputs keys are not found."""
chain = FakeChain(be_correct=False)
with pytest.raises(ValueError):
chain({"foo": "baz"})
def test_correct_call() -> None:
"""Test correct call of fake chain."""
chain = FakeChain()
output = chain({"foo": "bar"})
assert output == {"foo": "bar", "bar": "baz"}
def test_single_input_correct() -> None:
"""Test passing single input works."""
chain = FakeChain()
output = chain("bar")
assert output == {"foo": "bar", "bar": "baz"}
def test_single_input_error() -> None:
"""Test passing single input errors as expected."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain("bar")
def test_run_single_arg() -> None:
"""Test run method with single arg."""
chain = FakeChain()
output = chain.run("bar")
assert output == "baz"
def test_run_multiple_args_error() -> None:
"""Test run method with multiple args errors as expected."""
chain = FakeChain()
with pytest.raises(ValueError):
chain.run("bar", "foo")
def test_run_kwargs() -> None:
"""Test run method with kwargs."""
chain = FakeChain(the_input_keys=["foo", "bar"])
output = chain.run(foo="bar", bar="foo")
assert output == "baz"
def test_run_kwargs_error() -> None:
"""Test run method with kwargs errors as expected."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain.run(foo="bar", baz="foo")
def test_run_args_and_kwargs_error() -> None:
"""Test run method with args and kwargs."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain.run("bar", foo="bar")
def test_multiple_output_keys_error() -> None:
"""Test run with multiple output keys errors as expected."""
chain = FakeChain(the_output_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain.run("bar")
def test_run_arg_with_memory() -> None:
"""Test run method works when arg is passed."""
chain = FakeChain(the_input_keys=["foo", "baz"], memory=FakeMemory())
chain.run("bar")
def test_run_with_callback() -> None:
"""Test run method works when callback manager is passed."""
handler = FakeCallbackHandler()
chain = FakeChain(
callback_manager=CallbackManager(handlers=[handler]), verbose=True
)
output = chain.run("bar")
assert output == "baz"
assert handler.starts == 1
assert handler.ends == 1
assert handler.errors == 0
def test_run_with_callback_not_verbose() -> None:
"""Test run method works when callback manager is passed and not verbose."""
import langchain
langchain.verbose = False
handler = FakeCallbackHandler()
chain = FakeChain(callback_manager=CallbackManager(handlers=[handler]))
output = chain.run("bar")
assert output == "baz"
assert handler.starts == 0
assert handler.ends == 0
assert handler.errors == 0
| [
"langchain.callbacks.base.CallbackManager"
] | [((3986, 4007), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4005, 4007), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((4460, 4481), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4479, 4481), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((1597, 1622), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1610, 1622), False, 'import pytest\n'), ((1804, 1829), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1817, 1829), False, 'import pytest\n'), ((2393, 2418), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2406, 2418), False, 'import pytest\n'), ((2746, 2771), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2759, 2771), False, 'import pytest\n'), ((3161, 3186), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3174, 3186), False, 'import pytest\n'), ((3386, 3411), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3399, 3411), False, 'import pytest\n'), ((3626, 3651), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3639, 3651), False, 'import pytest\n'), ((4056, 4091), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4071, 4091), False, 'from langchain.callbacks.base import CallbackManager\n'), ((4521, 4556), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4536, 4556), False, 'from langchain.callbacks.base import CallbackManager\n')] |
"""Test logic on base chain class."""
from typing import Any, Dict, List, Optional
import pytest
from langchain.callbacks.base import CallbackManager
from langchain.chains.base import Chain
from langchain.schema import BaseMemory
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
class FakeMemory(BaseMemory):
"""Fake memory class for testing purposes."""
@property
def memory_variables(self) -> List[str]:
"""Return baz variable."""
return ["baz"]
def load_memory_variables(
self, inputs: Optional[Dict[str, Any]] = None
) -> Dict[str, str]:
"""Return baz variable."""
return {"baz": "foo"}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Pass."""
pass
def clear(self) -> None:
"""Pass."""
pass
class FakeChain(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: List[str] = ["foo"]
the_output_keys: List[str] = ["bar"]
@property
def input_keys(self) -> List[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> List[str]:
"""Output key of bar."""
return self.the_output_keys
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
if self.be_correct:
return {"bar": "baz"}
else:
return {"baz": "bar"}
def test_bad_inputs() -> None:
"""Test errors are raised if input keys are not found."""
chain = FakeChain()
with pytest.raises(ValueError):
chain({"foobar": "baz"})
def test_bad_outputs() -> None:
"""Test errors are raised if outputs keys are not found."""
chain = FakeChain(be_correct=False)
with pytest.raises(ValueError):
chain({"foo": "baz"})
def test_correct_call() -> None:
"""Test correct call of fake chain."""
chain = FakeChain()
output = chain({"foo": "bar"})
assert output == {"foo": "bar", "bar": "baz"}
def test_single_input_correct() -> None:
"""Test passing single input works."""
chain = FakeChain()
output = chain("bar")
assert output == {"foo": "bar", "bar": "baz"}
def test_single_input_error() -> None:
"""Test passing single input errors as expected."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain("bar")
def test_run_single_arg() -> None:
"""Test run method with single arg."""
chain = FakeChain()
output = chain.run("bar")
assert output == "baz"
def test_run_multiple_args_error() -> None:
"""Test run method with multiple args errors as expected."""
chain = FakeChain()
with pytest.raises(ValueError):
chain.run("bar", "foo")
def test_run_kwargs() -> None:
"""Test run method with kwargs."""
chain = FakeChain(the_input_keys=["foo", "bar"])
output = chain.run(foo="bar", bar="foo")
assert output == "baz"
def test_run_kwargs_error() -> None:
"""Test run method with kwargs errors as expected."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain.run(foo="bar", baz="foo")
def test_run_args_and_kwargs_error() -> None:
"""Test run method with args and kwargs."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain.run("bar", foo="bar")
def test_multiple_output_keys_error() -> None:
"""Test run with multiple output keys errors as expected."""
chain = FakeChain(the_output_keys=["foo", "bar"])
with pytest.raises(ValueError):
chain.run("bar")
def test_run_arg_with_memory() -> None:
"""Test run method works when arg is passed."""
chain = FakeChain(the_input_keys=["foo", "baz"], memory=FakeMemory())
chain.run("bar")
def test_run_with_callback() -> None:
"""Test run method works when callback manager is passed."""
handler = FakeCallbackHandler()
chain = FakeChain(
callback_manager=CallbackManager(handlers=[handler]), verbose=True
)
output = chain.run("bar")
assert output == "baz"
assert handler.starts == 1
assert handler.ends == 1
assert handler.errors == 0
def test_run_with_callback_not_verbose() -> None:
"""Test run method works when callback manager is passed and not verbose."""
import langchain
langchain.verbose = False
handler = FakeCallbackHandler()
chain = FakeChain(callback_manager=CallbackManager(handlers=[handler]))
output = chain.run("bar")
assert output == "baz"
assert handler.starts == 0
assert handler.ends == 0
assert handler.errors == 0
| [
"langchain.callbacks.base.CallbackManager"
] | [((3986, 4007), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4005, 4007), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((4460, 4481), 'tests.unit_tests.callbacks.fake_callback_handler.FakeCallbackHandler', 'FakeCallbackHandler', ([], {}), '()\n', (4479, 4481), False, 'from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler\n'), ((1597, 1622), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1610, 1622), False, 'import pytest\n'), ((1804, 1829), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1817, 1829), False, 'import pytest\n'), ((2393, 2418), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2406, 2418), False, 'import pytest\n'), ((2746, 2771), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2759, 2771), False, 'import pytest\n'), ((3161, 3186), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3174, 3186), False, 'import pytest\n'), ((3386, 3411), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3399, 3411), False, 'import pytest\n'), ((3626, 3651), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3639, 3651), False, 'import pytest\n'), ((4056, 4091), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4071, 4091), False, 'from langchain.callbacks.base import CallbackManager\n'), ((4521, 4556), 'langchain.callbacks.base.CallbackManager', 'CallbackManager', ([], {'handlers': '[handler]'}), '(handlers=[handler])\n', (4536, 4556), False, 'from langchain.callbacks.base import CallbackManager\n')] |
"""Test Momento cache functionality.
To run tests, set the environment variable MOMENTO_AUTH_TOKEN to a valid
Momento auth token. This can be obtained by signing up for a free
Momento account at https://gomomento.com/.
"""
from __future__ import annotations
import uuid
from datetime import timedelta
from typing import Iterator
import pytest
import langchain
from langchain.cache import MomentoCache
from langchain.schema import Generation, LLMResult
from tests.unit_tests.llms.fake_llm import FakeLLM
def random_string() -> str:
return str(uuid.uuid4())
@pytest.fixture(scope="module")
def momento_cache() -> Iterator[MomentoCache]:
from momento import CacheClient, Configurations, CredentialProvider
cache_name = f"langchain-test-cache-{random_string()}"
client = CacheClient(
Configurations.Laptop.v1(),
CredentialProvider.from_environment_variable("MOMENTO_AUTH_TOKEN"),
default_ttl=timedelta(seconds=30),
)
try:
llm_cache = MomentoCache(client, cache_name)
langchain.llm_cache = llm_cache
yield llm_cache
finally:
client.delete_cache(cache_name)
def test_invalid_ttl() -> None:
from momento import CacheClient, Configurations, CredentialProvider
client = CacheClient(
Configurations.Laptop.v1(),
CredentialProvider.from_environment_variable("MOMENTO_AUTH_TOKEN"),
default_ttl=timedelta(seconds=30),
)
with pytest.raises(ValueError):
MomentoCache(client, cache_name=random_string(), ttl=timedelta(seconds=-1))
def test_momento_cache_miss(momento_cache: MomentoCache) -> None:
llm = FakeLLM()
stub_llm_output = LLMResult(generations=[[Generation(text="foo")]])
assert llm.generate([random_string()]) == stub_llm_output
@pytest.mark.parametrize(
"prompts, generations",
[
# Single prompt, single generation
([random_string()], [[random_string()]]),
# Single prompt, multiple generations
([random_string()], [[random_string(), random_string()]]),
# Single prompt, multiple generations
([random_string()], [[random_string(), random_string(), random_string()]]),
# Multiple prompts, multiple generations
(
[random_string(), random_string()],
[[random_string()], [random_string(), random_string()]],
),
],
)
def test_momento_cache_hit(
momento_cache: MomentoCache, prompts: list[str], generations: list[list[str]]
) -> None:
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
llm_generations = [
[
Generation(text=generation, generation_info=params)
for generation in prompt_i_generations
]
for prompt_i_generations in generations
]
for prompt_i, llm_generations_i in zip(prompts, llm_generations):
momento_cache.update(prompt_i, llm_string, llm_generations_i)
assert llm.generate(prompts) == LLMResult(
generations=llm_generations, llm_output={}
)
| [
"langchain.schema.Generation",
"langchain.schema.LLMResult",
"langchain.cache.MomentoCache"
] | [((569, 599), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (583, 599), False, 'import pytest\n'), ((1637, 1646), 'tests.unit_tests.llms.fake_llm.FakeLLM', 'FakeLLM', ([], {}), '()\n', (1644, 1646), False, 'from tests.unit_tests.llms.fake_llm import FakeLLM\n'), ((2507, 2516), 'tests.unit_tests.llms.fake_llm.FakeLLM', 'FakeLLM', ([], {}), '()\n', (2514, 2516), False, 'from tests.unit_tests.llms.fake_llm import FakeLLM\n'), ((552, 564), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (562, 564), False, 'import uuid\n'), ((813, 839), 'momento.Configurations.Laptop.v1', 'Configurations.Laptop.v1', ([], {}), '()\n', (837, 839), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((849, 915), 'momento.CredentialProvider.from_environment_variable', 'CredentialProvider.from_environment_variable', (['"""MOMENTO_AUTH_TOKEN"""'], {}), "('MOMENTO_AUTH_TOKEN')\n", (893, 915), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((995, 1027), 'langchain.cache.MomentoCache', 'MomentoCache', (['client', 'cache_name'], {}), '(client, cache_name)\n', (1007, 1027), False, 'from langchain.cache import MomentoCache\n'), ((1286, 1312), 'momento.Configurations.Laptop.v1', 'Configurations.Laptop.v1', ([], {}), '()\n', (1310, 1312), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((1322, 1388), 'momento.CredentialProvider.from_environment_variable', 'CredentialProvider.from_environment_variable', (['"""MOMENTO_AUTH_TOKEN"""'], {}), "('MOMENTO_AUTH_TOKEN')\n", (1366, 1388), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((1448, 1473), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1461, 1473), False, 'import pytest\n'), ((3024, 3077), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'llm_generations', 'llm_output': '{}'}), '(generations=llm_generations, llm_output={})\n', (3033, 3077), False, 'from langchain.schema import Generation, LLMResult\n'), ((937, 958), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(30)'}), '(seconds=30)\n', (946, 958), False, 'from datetime import timedelta\n'), ((1410, 1431), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(30)'}), '(seconds=30)\n', (1419, 1431), False, 'from datetime import timedelta\n'), ((2680, 2731), 'langchain.schema.Generation', 'Generation', ([], {'text': 'generation', 'generation_info': 'params'}), '(text=generation, generation_info=params)\n', (2690, 2731), False, 'from langchain.schema import Generation, LLMResult\n'), ((1536, 1557), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(-1)'}), '(seconds=-1)\n', (1545, 1557), False, 'from datetime import timedelta\n'), ((1693, 1715), 'langchain.schema.Generation', 'Generation', ([], {'text': '"""foo"""'}), "(text='foo')\n", (1703, 1715), False, 'from langchain.schema import Generation, LLMResult\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
os.environ["REPLICATE_API_TOKEN"] = "<your replicate api token>"
llama2_13b_chat = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d"
llm = Replicate(
model=llama2_13b_chat,
model_kwargs={"temperature": 0.01, "top_p": 1, "max_new_tokens":500}
)
app = Flask(__name__)
@app.route('/msgrcvd_pager', methods=['POST', 'GET'])
def msgrcvd_pager():
message = request.args.get('message')
sender = request.args.get('sender')
recipient = request.args.get('recipient')
answer = llm(message)
print(message)
print(answer)
url = f"https://graph.facebook.com/v18.0/{recipient}/messages"
params = {
'recipient': '{"id": ' + sender + '}',
'message': json.dumps({'text': answer}),
'messaging_type': 'RESPONSE',
'access_token': "<your page access token>"
}
headers = {
'Content-Type': 'application/json'
}
response = requests.post(url, params=params, headers=headers)
print(response.status_code)
print(response.text)
return message + "<p/>" + answer
| [
"langchain.llms.Replicate"
] | [((488, 595), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (497, 595), False, 'from langchain.llms import Replicate\n'), ((608, 623), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (613, 623), False, 'from flask import Flask\n'), ((718, 745), 'flask.request.args.get', 'request.args.get', (['"""message"""'], {}), "('message')\n", (734, 745), False, 'from flask import request\n'), ((759, 785), 'flask.request.args.get', 'request.args.get', (['"""sender"""'], {}), "('sender')\n", (775, 785), False, 'from flask import request\n'), ((802, 831), 'flask.request.args.get', 'request.args.get', (['"""recipient"""'], {}), "('recipient')\n", (818, 831), False, 'from flask import request\n'), ((1250, 1300), 'requests.post', 'requests.post', (['url'], {'params': 'params', 'headers': 'headers'}), '(url, params=params, headers=headers)\n', (1263, 1300), False, 'import requests\n'), ((1045, 1073), 'json.dumps', 'json.dumps', (["{'text': answer}"], {}), "({'text': answer})\n", (1055, 1073), False, 'import json\n')] |
"""**Document Transformers** are classes to transform Documents.
**Document Transformers** usually used to transform a lot of Documents in a single run.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> <name> # Examples: DoctranQATransformer, DoctranTextTranslator
**Main helpers:**
.. code-block::
Document
""" # noqa: E501
import warnings
from typing import Any
from langchain_core._api import LangChainDeprecationWarning
from langchain.utils.interactive_env import is_interactive_env
def __getattr__(name: str) -> Any:
from langchain_community import document_transformers
# If not in interactive env, raise warning.
if not is_interactive_env():
warnings.warn(
"Importing document transformers from langchain is deprecated. Importing "
"from langchain will no longer be supported as of langchain==0.2.0. "
"Please import from langchain-community instead:\n\n"
f"`from langchain_community.document_transformers import {name}`.\n\n"
"To install langchain-community run `pip install -U langchain-community`.",
category=LangChainDeprecationWarning,
)
return getattr(document_transformers, name)
__all__ = [
"BeautifulSoupTransformer",
"DoctranQATransformer",
"DoctranTextTranslator",
"DoctranPropertyExtractor",
"EmbeddingsClusteringFilter",
"EmbeddingsRedundantFilter",
"GoogleTranslateTransformer",
"get_stateful_documents",
"LongContextReorder",
"NucliaTextTransformer",
"OpenAIMetadataTagger",
"Html2TextTransformer",
]
| [
"langchain.utils.interactive_env.is_interactive_env"
] | [((677, 697), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (695, 697), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((707, 1102), 'warnings.warn', 'warnings.warn', (['f"""Importing document transformers from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.document_transformers import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing document transformers from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.document_transformers import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (720, 1102), False, 'import warnings\n')] |
# based on: https://python.langchain.com/en/latest/modules/indexes/vectorstores/examples/pgvector.html
from typing import List, Tuple
from langchain.embeddings.openai import OpenAIEmbeddings
import langchain.vectorstores.pgvector
class RepoSearcher:
store: langchain.vectorstores.pgvector.PGVector
def __init__(self, collection_name: str, connection_string: str):
self.store = langchain.vectorstores.pgvector.PGVector(
embedding_function=OpenAIEmbeddings(), # type: ignore
collection_name=collection_name,
connection_string=connection_string,
distance_strategy=langchain.vectorstores.pgvector.DistanceStrategy.COSINE,
)
def find_repos(self, query: str, limit=4) -> List[Tuple[str, str]]:
results = self.store.similarity_search_with_score(query, limit)
# sort by relevance, returning most relevant repository first
results.sort(key=lambda a: a[1], reverse=True)
return [
(r[0].metadata["namespace"], r[0].metadata["repository"]) for r in results
]
| [
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((469, 487), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (485, 487), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n')] |
import os
import chardet
import importlib
from pathlib import Path
from WebUI.text_splitter import zh_title_enhance as func_zh_title_enhance
from WebUI.Server.document_loaders import RapidOCRPDFLoader, RapidOCRLoader
import langchain.document_loaders
from langchain.docstore.document import Document
from langchain.text_splitter import TextSplitter
from WebUI.configs.basicconfig import (GetKbConfig, GetKbRootPath, GetTextSplitterDict)
from WebUI.Server.utils import run_in_thread_pool, get_model_worker_config
from WebUI.Server.document_loaders import *
from typing import List, Union,Dict, Tuple, Generator
TEXT_SPLITTER_NAME = "ChineseRecursiveTextSplitter"
CHUNK_SIZE = 500
OVERLAP_SIZE = 100
ZH_TITLE_ENHANCE = False
VECTOR_SEARCH_TOP_K = 5
SCORE_THRESHOLD = 1.5
LOADER_DICT = {"UnstructuredHTMLLoader": ['.html'],
"MHTMLLoader": ['.mhtml'],
"UnstructuredMarkdownLoader": ['.md'],
"JSONLoader": [".json"],
"JSONLinesLoader": [".jsonl"],
"CSVLoader": [".csv"],
# "FilteredCSVLoader": [".csv"], # 需要自己指定,目前还没有支持
"RapidOCRPDFLoader": [".pdf"],
"RapidOCRLoader": ['.png', '.jpg', '.jpeg', '.bmp'],
"UnstructuredEmailLoader": ['.eml', '.msg'],
"UnstructuredEPubLoader": ['.epub'],
"UnstructuredExcelLoader": ['.xlsx', '.xls', '.xlsd'],
"NotebookLoader": ['.ipynb'],
"UnstructuredODTLoader": ['.odt'],
"PythonLoader": ['.py'],
"UnstructuredRSTLoader": ['.rst'],
"UnstructuredRTFLoader": ['.rtf'],
"SRTLoader": ['.srt'],
"TomlLoader": ['.toml'],
"UnstructuredTSVLoader": ['.tsv'],
"UnstructuredWordDocumentLoader": ['.docx', '.doc'],
"UnstructuredXMLLoader": ['.xml'],
"UnstructuredPowerPointLoader": ['.ppt', '.pptx'],
"EverNoteLoader": ['.enex'],
"UnstructuredFileLoader": ['.txt'],
}
SUPPORTED_EXTS = [ext for sublist in LOADER_DICT.values() for ext in sublist]
def validate_kb_name(knowledge_base_id: str) -> bool:
if "../" in knowledge_base_id:
return False
return True
def get_kb_path(knowledge_base_name: str):
kb_config = GetKbConfig()
kb_root_path = GetKbRootPath(kb_config)
return os.path.join(kb_root_path, knowledge_base_name)
def get_doc_path(knowledge_base_name: str):
return os.path.join(get_kb_path(knowledge_base_name), "content")
def get_vs_path(knowledge_base_name: str, vector_name: str):
return os.path.join(get_kb_path(knowledge_base_name), "vector_store", vector_name)
def get_file_path(knowledge_base_name: str, doc_name: str):
return os.path.join(get_doc_path(knowledge_base_name), doc_name)
def list_files_from_folder(kb_name: str):
doc_path = get_doc_path(kb_name)
result = []
def is_skiped_path(path: str):
tail = os.path.basename(path).lower()
for x in ["temp", "tmp", ".", "~$"]:
if tail.startswith(x):
return True
return False
def process_entry(entry):
if is_skiped_path(entry.path):
return
if entry.is_symlink():
target_path = os.path.realpath(entry.path)
with os.scandir(target_path) as target_it:
for target_entry in target_it:
process_entry(target_entry)
elif entry.is_file():
file_path = (Path(os.path.relpath(entry.path, doc_path)).as_posix())
result.append(file_path)
elif entry.is_dir():
with os.scandir(entry.path) as it:
for sub_entry in it:
process_entry(sub_entry)
with os.scandir(doc_path) as it:
for entry in it:
process_entry(entry)
return result
def get_LoaderClass(file_extension):
for LoaderClass, extensions in LOADER_DICT.items():
if file_extension in extensions:
return LoaderClass
def get_loader(loader_name: str, file_path: str, loader_kwargs: Dict = None):
loader_kwargs = loader_kwargs or {}
try:
if loader_name == "RapidOCRPDFLoader":
DocumentLoader = RapidOCRPDFLoader
elif loader_name == "RapidOCRLoader":
DocumentLoader = RapidOCRLoader
else:
document_loaders_module = importlib.import_module('langchain.document_loaders')
DocumentLoader = getattr(document_loaders_module, loader_name)
except Exception as e:
msg = f"for file {file_path} search loader {loader_name} failed: {e}"
print(f'{e.__class__.__name__}: {msg}')
document_loaders_module = importlib.import_module('langchain.document_loaders')
DocumentLoader = getattr(document_loaders_module, "UnstructuredFileLoader")
if loader_name == "UnstructuredFileLoader":
loader_kwargs.setdefault("autodetect_encoding", True)
elif loader_name == "CSVLoader":
if not loader_kwargs.get("encoding"):
with open(file_path, 'rb') as struct_file:
encode_detect = chardet.detect(struct_file.read())
if encode_detect is None:
encode_detect = {"encoding": "utf-8"}
loader_kwargs["encoding"] = encode_detect["encoding"]
elif loader_name == "JSONLoader":
loader_kwargs.setdefault("jq_schema", ".")
loader_kwargs.setdefault("text_content", False)
elif loader_name == "JSONLinesLoader":
loader_kwargs.setdefault("jq_schema", ".")
loader_kwargs.setdefault("text_content", False)
loader = DocumentLoader(file_path, **loader_kwargs)
return loader
def make_text_splitter(
splitter_name: str = TEXT_SPLITTER_NAME,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
llm_model: str = "",
):
splitter_name = splitter_name or "SpacyTextSplitter"
try:
text_splitter_dict = GetTextSplitterDict()
if splitter_name == "MarkdownHeaderTextSplitter":
headers_to_split_on = text_splitter_dict[splitter_name]['headers_to_split_on']
text_splitter = langchain.text_splitter.MarkdownHeaderTextSplitter(
headers_to_split_on=headers_to_split_on)
elif splitter_name == "ChineseRecursiveTextSplitter":
text_splitter_module = importlib.import_module('text_splitter')
TextSplitter = getattr(text_splitter_module, splitter_name)
text_splitter = TextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
else:
try:
text_splitter_module = importlib.import_module('text_splitter')
TextSplitter = getattr(text_splitter_module, splitter_name)
except:
text_splitter_module = importlib.import_module('langchain.text_splitter')
TextSplitter = getattr(text_splitter_module, splitter_name)
if text_splitter_dict[splitter_name]["source"] == "tiktoken":
try:
text_splitter = TextSplitter.from_tiktoken_encoder(
encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"],
pipeline="zh_core_web_sm",
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
except:
text_splitter = TextSplitter.from_tiktoken_encoder(
encoding_name=text_splitter_dict[splitter_name]["tokenizer_name_or_path"],
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
elif text_splitter_dict[splitter_name]["source"] == "huggingface":
if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "":
config = get_model_worker_config(llm_model)
text_splitter_dict[splitter_name]["tokenizer_name_or_path"] = \
config.get("model_path")
if text_splitter_dict[splitter_name]["tokenizer_name_or_path"] == "gpt2":
from transformers import GPT2TokenizerFast
from langchain.text_splitter import CharacterTextSplitter
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
else:
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
text_splitter_dict[splitter_name]["tokenizer_name_or_path"],
trust_remote_code=True)
text_splitter = TextSplitter.from_huggingface_tokenizer(
tokenizer=tokenizer,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
else:
try:
text_splitter = TextSplitter(
pipeline="zh_core_web_sm",
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
except:
text_splitter = TextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap
)
except Exception as e:
print(e)
text_splitter_module = importlib.import_module('langchain.text_splitter')
TextSplitter = getattr(text_splitter_module, "RecursiveCharacterTextSplitter")
text_splitter = TextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
splitter_name = "RecursiveCharacterTextSplitter"
return text_splitter, splitter_name
def list_kbs_from_folder():
kb_config = GetKbConfig()
kb_root_path = GetKbRootPath(kb_config)
kb_list = []
try:
dirs = os.listdir(kb_root_path)
for f in dirs:
if os.path.isdir(os.path.join(kb_root_path, f)):
kb_list.append(f)
except Exception as e:
pass
return kb_list
class KnowledgeFile:
def __init__(
self,
filename: str,
knowledge_base_name: str,
loader_kwargs: Dict = {},
):
self.kb_name = knowledge_base_name
self.filename = str(Path(filename).as_posix())
self.ext = os.path.splitext(filename)[-1].lower()
if self.ext not in SUPPORTED_EXTS:
raise ValueError(f"Not support file format: {self.filename}")
self.loader_kwargs = loader_kwargs
self.filepath = get_file_path(knowledge_base_name, filename)
self.docs = None
self.splited_docs = None
self.document_loader_name = get_LoaderClass(self.ext)
self.text_splitter_name = TEXT_SPLITTER_NAME
def file2docs(self, refresh: bool = False):
if self.docs is None or refresh:
print(f"{self.document_loader_name} used for {self.filepath}")
loader = get_loader(loader_name=self.document_loader_name,
file_path=self.filepath,
loader_kwargs=self.loader_kwargs)
self.docs = loader.load()
return self.docs
def docs2texts(
self,
docs: List[Document] = None,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
refresh: bool = False,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
text_splitter: TextSplitter = None,
):
docs = docs or self.file2docs(refresh=refresh)
if not docs:
return []
if self.ext not in [".csv"]:
if text_splitter is None:
text_splitter, new_text_splitter_name = make_text_splitter(splitter_name=self.text_splitter_name, chunk_size=chunk_size,
chunk_overlap=chunk_overlap)
if new_text_splitter_name != self.text_splitter_name:
self.text_splitter_name = new_text_splitter_name
if self.text_splitter_name == "MarkdownHeaderTextSplitter":
docs = text_splitter.split_text(docs[0].page_content)
else:
docs = text_splitter.split_documents(docs)
if not docs:
return []
print(f"Document split samples: {docs[0]}")
if zh_title_enhance:
docs = func_zh_title_enhance(docs)
self.splited_docs = docs
return self.splited_docs
def file2text(
self,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
refresh: bool = False,
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
text_splitter: TextSplitter = None,
):
if self.splited_docs is None or refresh:
docs = self.file2docs()
self.splited_docs = self.docs2texts(docs=docs,
zh_title_enhance=zh_title_enhance,
refresh=refresh,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
text_splitter=text_splitter)
return self.splited_docs
def file_exist(self):
return os.path.isfile(self.filepath)
def get_mtime(self):
return os.path.getmtime(self.filepath)
def get_size(self):
return os.path.getsize(self.filepath)
def files2docs_in_thread(
files: List[Union[KnowledgeFile, Tuple[str, str], Dict]],
chunk_size: int = CHUNK_SIZE,
chunk_overlap: int = OVERLAP_SIZE,
zh_title_enhance: bool = ZH_TITLE_ENHANCE,
) -> Generator:
def file2docs(*, file: KnowledgeFile, **kwargs) -> Tuple[bool, Tuple[str, str, List[Document]]]:
try:
return True, (file.kb_name, file.filename, file.file2text(**kwargs))
except Exception as e:
msg = f"from {file.kb_name}/{file.filename} load failed: {e}"
return False, (file.kb_name, file.filename, msg)
kwargs_list = []
for i, file in enumerate(files):
kwargs = {}
try:
if isinstance(file, tuple) and len(file) >= 2:
filename = file[0]
kb_name = file[1]
file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name)
elif isinstance(file, dict):
filename = file.pop("filename")
kb_name = file.pop("kb_name")
kwargs.update(file)
file = KnowledgeFile(filename=filename, knowledge_base_name=kb_name)
kwargs["file"] = file
kwargs["chunk_size"] = chunk_size
kwargs["chunk_overlap"] = chunk_overlap
kwargs["zh_title_enhance"] = zh_title_enhance
kwargs_list.append(kwargs)
except Exception as e:
yield False, (kb_name, filename, str(e))
for result in run_in_thread_pool(func=file2docs, params=kwargs_list):
yield result | [
"langchain.text_splitter.TextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.TextSplitter",
"langchain.text_splitter.TextSplitter.from_huggingface_tokenizer"
] | [((2330, 2343), 'WebUI.configs.basicconfig.GetKbConfig', 'GetKbConfig', ([], {}), '()\n', (2341, 2343), False, 'from WebUI.configs.basicconfig import GetKbConfig, GetKbRootPath, GetTextSplitterDict\n'), ((2363, 2387), 'WebUI.configs.basicconfig.GetKbRootPath', 'GetKbRootPath', (['kb_config'], {}), '(kb_config)\n', (2376, 2387), False, 'from WebUI.configs.basicconfig import GetKbConfig, GetKbRootPath, GetTextSplitterDict\n'), ((2399, 2446), 'os.path.join', 'os.path.join', (['kb_root_path', 'knowledge_base_name'], {}), '(kb_root_path, knowledge_base_name)\n', (2411, 2446), False, 'import os\n'), ((9822, 9835), 'WebUI.configs.basicconfig.GetKbConfig', 'GetKbConfig', ([], {}), '()\n', (9833, 9835), False, 'from WebUI.configs.basicconfig import GetKbConfig, GetKbRootPath, GetTextSplitterDict\n'), ((9855, 9879), 'WebUI.configs.basicconfig.GetKbRootPath', 'GetKbRootPath', (['kb_config'], {}), '(kb_config)\n', (9868, 9879), False, 'from WebUI.configs.basicconfig import GetKbConfig, GetKbRootPath, GetTextSplitterDict\n'), ((15110, 15164), 'WebUI.Server.utils.run_in_thread_pool', 'run_in_thread_pool', ([], {'func': 'file2docs', 'params': 'kwargs_list'}), '(func=file2docs, params=kwargs_list)\n', (15128, 15164), False, 'from WebUI.Server.utils import run_in_thread_pool, get_model_worker_config\n'), ((3792, 3812), 'os.scandir', 'os.scandir', (['doc_path'], {}), '(doc_path)\n', (3802, 3812), False, 'import os\n'), ((6015, 6036), 'WebUI.configs.basicconfig.GetTextSplitterDict', 'GetTextSplitterDict', ([], {}), '()\n', (6034, 6036), False, 'from WebUI.configs.basicconfig import GetKbConfig, GetKbRootPath, GetTextSplitterDict\n'), ((9921, 9945), 'os.listdir', 'os.listdir', (['kb_root_path'], {}), '(kb_root_path)\n', (9931, 9945), False, 'import os\n'), ((13436, 13465), 'os.path.isfile', 'os.path.isfile', (['self.filepath'], {}), '(self.filepath)\n', (13450, 13465), False, 'import os\n'), ((13507, 13538), 'os.path.getmtime', 'os.path.getmtime', (['self.filepath'], {}), '(self.filepath)\n', (13523, 13538), False, 'import os\n'), ((13579, 13609), 'os.path.getsize', 'os.path.getsize', (['self.filepath'], {}), '(self.filepath)\n', (13594, 13609), False, 'import os\n'), ((3297, 3325), 'os.path.realpath', 'os.path.realpath', (['entry.path'], {}), '(entry.path)\n', (3313, 3325), False, 'import os\n'), ((4750, 4803), 'importlib.import_module', 'importlib.import_module', (['"""langchain.document_loaders"""'], {}), "('langchain.document_loaders')\n", (4773, 4803), False, 'import importlib\n'), ((9453, 9503), 'importlib.import_module', 'importlib.import_module', (['"""langchain.text_splitter"""'], {}), "('langchain.text_splitter')\n", (9476, 9503), False, 'import importlib\n'), ((9615, 9679), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (9627, 9679), False, 'from langchain.text_splitter import TextSplitter\n'), ((12478, 12505), 'WebUI.text_splitter.zh_title_enhance', 'func_zh_title_enhance', (['docs'], {}), '(docs)\n', (12499, 12505), True, 'from WebUI.text_splitter import zh_title_enhance as func_zh_title_enhance\n'), ((2990, 3012), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3006, 3012), False, 'import os\n'), ((3343, 3366), 'os.scandir', 'os.scandir', (['target_path'], {}), '(target_path)\n', (3353, 3366), False, 'import os\n'), ((4434, 4487), 'importlib.import_module', 'importlib.import_module', (['"""langchain.document_loaders"""'], {}), "('langchain.document_loaders')\n", (4457, 4487), False, 'import importlib\n'), ((6420, 6460), 'importlib.import_module', 'importlib.import_module', (['"""text_splitter"""'], {}), "('text_splitter')\n", (6443, 6460), False, 'import importlib\n'), ((6561, 6625), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (6573, 6625), False, 'from langchain.text_splitter import TextSplitter\n'), ((9998, 10027), 'os.path.join', 'os.path.join', (['kb_root_path', 'f'], {}), '(kb_root_path, f)\n', (10010, 10027), False, 'import os\n'), ((6696, 6736), 'importlib.import_module', 'importlib.import_module', (['"""text_splitter"""'], {}), "('text_splitter')\n", (6719, 6736), False, 'import importlib\n'), ((10362, 10376), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (10366, 10376), False, 'from pathlib import Path\n'), ((10408, 10434), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (10424, 10434), False, 'import os\n'), ((3670, 3692), 'os.scandir', 'os.scandir', (['entry.path'], {}), '(entry.path)\n', (3680, 3692), False, 'import os\n'), ((6872, 6922), 'importlib.import_module', 'importlib.import_module', (['"""langchain.text_splitter"""'], {}), "('langchain.text_splitter')\n", (6895, 6922), False, 'import importlib\n'), ((7131, 7328), 'langchain.text_splitter.TextSplitter.from_tiktoken_encoder', 'TextSplitter.from_tiktoken_encoder', ([], {'encoding_name': "text_splitter_dict[splitter_name]['tokenizer_name_or_path']", 'pipeline': '"""zh_core_web_sm"""', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(encoding_name=text_splitter_dict[\n splitter_name]['tokenizer_name_or_path'], pipeline='zh_core_web_sm',\n chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n", (7165, 7328), False, 'from langchain.text_splitter import TextSplitter\n'), ((8731, 8848), 'langchain.text_splitter.TextSplitter.from_huggingface_tokenizer', 'TextSplitter.from_huggingface_tokenizer', ([], {'tokenizer': 'tokenizer', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(tokenizer=tokenizer, chunk_size=\n chunk_size, chunk_overlap=chunk_overlap)\n', (8770, 8848), False, 'from langchain.text_splitter import TextSplitter\n'), ((3536, 3573), 'os.path.relpath', 'os.path.relpath', (['entry.path', 'doc_path'], {}), '(entry.path, doc_path)\n', (3551, 3573), False, 'import os\n'), ((7498, 7668), 'langchain.text_splitter.TextSplitter.from_tiktoken_encoder', 'TextSplitter.from_tiktoken_encoder', ([], {'encoding_name': "text_splitter_dict[splitter_name]['tokenizer_name_or_path']", 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(encoding_name=text_splitter_dict[\n splitter_name]['tokenizer_name_or_path'], chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n", (7532, 7668), False, 'from langchain.text_splitter import TextSplitter\n'), ((7948, 7982), 'WebUI.Server.utils.get_model_worker_config', 'get_model_worker_config', (['llm_model'], {}), '(llm_model)\n', (7971, 7982), False, 'from WebUI.Server.utils import run_in_thread_pool, get_model_worker_config\n'), ((8380, 8421), 'transformers.GPT2TokenizerFast.from_pretrained', 'GPT2TokenizerFast.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (8413, 8421), False, 'from transformers import GPT2TokenizerFast\n'), ((8535, 8654), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (["text_splitter_dict[splitter_name]['tokenizer_name_or_path']"], {'trust_remote_code': '(True)'}), "(text_splitter_dict[splitter_name][\n 'tokenizer_name_or_path'], trust_remote_code=True)\n", (8564, 8654), False, 'from transformers import AutoTokenizer\n'), ((8997, 9092), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'pipeline': '"""zh_core_web_sm"""', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), "(pipeline='zh_core_web_sm', chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n", (9009, 9092), False, 'from langchain.text_splitter import TextSplitter\n'), ((9243, 9307), 'langchain.text_splitter.TextSplitter', 'TextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (9255, 9307), False, 'from langchain.text_splitter import TextSplitter\n')] |
import os
import langchain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain import OpenAI, VectorDBQA
from langchain.document_loaders import TextLoader
from langchain.document_loaders import WebBaseLoader
from langchain.agents.agent_toolkits import (
create_vectorstore_agent,
VectorStoreToolkit,
VectorStoreInfo,
)
os.environ['serpapi_api_key']="YOUR_serpapi_api_key"
os.environ['OPENAI_API_KEY']="YOUR_OPENAI_API_KEY"
llm = OpenAI(temperature=0)
loader = TextLoader('the_needed_text.txt')
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
text_store = Chroma.from_documents(texts, embeddings, collection_name="the_needed_text")
loader = WebBaseLoader("https://beta.ruff.rs/docs/faq/")
docs = loader.load()
ruff_texts = text_splitter.split_documents(docs)
ruff_store = Chroma.from_documents(ruff_texts, embeddings, collection_name="ruff")
vectorstore_info = VectorStoreInfo(
name="the_needed_text_in_detail",
description="the most recent data of bill gates",
vectorstore=text_store
)
toolkit = VectorStoreToolkit(vectorstore_info=vectorstore_info)
agent_executor = create_vectorstore_agent(
llm=llm,
toolkit=toolkit,
verbose=True
)
agent_executor.run("who is bill gates?, what is his age now? and how many assets he has now? ") | [
"langchain.document_loaders.WebBaseLoader",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.vectorstores.Chroma.from_documents",
"langchain.agents.agent_toolkits.create_vectorstore_agent",
"langchain.agents.agent_toolkits.VectorStoreToolkit",
"langchain.document_loaders.TextLoader",
"langchain.agents.agent_toolkits.VectorStoreInfo",
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.OpenAI"
] | [((586, 607), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (592, 607), False, 'from langchain import OpenAI, VectorDBQA\n'), ((622, 655), 'langchain.document_loaders.TextLoader', 'TextLoader', (['"""the_needed_text.txt"""'], {}), "('the_needed_text.txt')\n", (632, 655), False, 'from langchain.document_loaders import TextLoader\n'), ((700, 755), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (721, 755), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((822, 840), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (838, 840), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((855, 930), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['texts', 'embeddings'], {'collection_name': '"""the_needed_text"""'}), "(texts, embeddings, collection_name='the_needed_text')\n", (876, 930), False, 'from langchain.vectorstores import Chroma\n'), ((943, 990), 'langchain.document_loaders.WebBaseLoader', 'WebBaseLoader', (['"""https://beta.ruff.rs/docs/faq/"""'], {}), "('https://beta.ruff.rs/docs/faq/')\n", (956, 990), False, 'from langchain.document_loaders import WebBaseLoader\n'), ((1077, 1146), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['ruff_texts', 'embeddings'], {'collection_name': '"""ruff"""'}), "(ruff_texts, embeddings, collection_name='ruff')\n", (1098, 1146), False, 'from langchain.vectorstores import Chroma\n'), ((1169, 1297), 'langchain.agents.agent_toolkits.VectorStoreInfo', 'VectorStoreInfo', ([], {'name': '"""the_needed_text_in_detail"""', 'description': '"""the most recent data of bill gates"""', 'vectorstore': 'text_store'}), "(name='the_needed_text_in_detail', description=\n 'the most recent data of bill gates', vectorstore=text_store)\n", (1184, 1297), False, 'from langchain.agents.agent_toolkits import create_vectorstore_agent, VectorStoreToolkit, VectorStoreInfo\n'), ((1324, 1377), 'langchain.agents.agent_toolkits.VectorStoreToolkit', 'VectorStoreToolkit', ([], {'vectorstore_info': 'vectorstore_info'}), '(vectorstore_info=vectorstore_info)\n', (1342, 1377), False, 'from langchain.agents.agent_toolkits import create_vectorstore_agent, VectorStoreToolkit, VectorStoreInfo\n'), ((1400, 1464), 'langchain.agents.agent_toolkits.create_vectorstore_agent', 'create_vectorstore_agent', ([], {'llm': 'llm', 'toolkit': 'toolkit', 'verbose': '(True)'}), '(llm=llm, toolkit=toolkit, verbose=True)\n', (1424, 1464), False, 'from langchain.agents.agent_toolkits import create_vectorstore_agent, VectorStoreToolkit, VectorStoreInfo\n')] |
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from sqlalchemy import Column, Integer, String, create_engine, select
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm import Session
from langchain.utils import get_from_env
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from langchain.embeddings.base import Embeddings
from langchain.load.dump import dumps
from langchain.load.load import loads
from langchain.schema import Generation
from langchain.vectorstores.redis import Redis as RedisVectorstore
logger = logging.getLogger(__file__)
if TYPE_CHECKING:
import momento
RETURN_VAL_TYPE = Sequence[Generation]
def _hash(_input: str) -> str:
"""Use a deterministic hashing approach."""
return hashlib.md5(_input.encode()).hexdigest()
def _dump_generations_to_json(generations: RETURN_VAL_TYPE) -> str:
"""Dump generations to json.
Args:
generations (RETURN_VAL_TYPE): A list of language model generations.
Returns:
str: Json representing a list of generations.
"""
return json.dumps([generation.dict() for generation in generations])
def _load_generations_from_json(generations_json: str) -> RETURN_VAL_TYPE:
"""Load generations from json.
Args:
generations_json (str): A string of json representing a list of generations.
Raises:
ValueError: Could not decode json string to list of generations.
Returns:
RETURN_VAL_TYPE: A list of generations.
"""
try:
results = json.loads(generations_json)
return [Generation(**generation_dict) for generation_dict in results]
except json.JSONDecodeError:
raise ValueError(
f"Could not decode json to list of generations: {generations_json}"
)
class BaseCache(ABC):
"""Base interface for cache."""
@abstractmethod
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
@abstractmethod
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
@abstractmethod
def clear(self, **kwargs: Any) -> None:
"""Clear cache that can take additional keyword arguments."""
class InMemoryCache(BaseCache):
"""Cache that stores things in memory."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
return self._cache.get((prompt, llm_string), None)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
self._cache[(prompt, llm_string)] = return_val
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
Base = declarative_base()
class FullLLMCache(Base): # type: ignore
"""SQLite table for full LLM Cache (all generations)."""
__tablename__ = "full_llm_cache"
prompt = Column(String, primary_key=True)
llm = Column(String, primary_key=True)
idx = Column(Integer, primary_key=True)
response = Column(String)
class SQLAlchemyCache(BaseCache):
"""Cache that uses SQAlchemy as a backend."""
def __init__(self, engine: Engine, cache_schema: Type[FullLLMCache] = FullLLMCache):
"""Initialize by creating all tables."""
self.engine = engine
self.cache_schema = cache_schema
self.cache_schema.metadata.create_all(self.engine)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
stmt = (
select(self.cache_schema.response)
.where(self.cache_schema.prompt == prompt) # type: ignore
.where(self.cache_schema.llm == llm_string)
.order_by(self.cache_schema.idx)
)
with Session(self.engine) as session:
rows = session.execute(stmt).fetchall()
if rows:
try:
return [loads(row[0]) for row in rows]
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
"properly. This is likely due to the cache being in an "
"older format. Please recreate your cache to avoid this "
"error."
)
# In a previous life we stored the raw text directly
# in the table, so assume it's in that format.
return [Generation(text=row[0]) for row in rows]
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update based on prompt and llm_string."""
items = [
self.cache_schema(prompt=prompt, llm=llm_string, response=dumps(gen), idx=i)
for i, gen in enumerate(return_val)
]
with Session(self.engine) as session, session.begin():
for item in items:
session.merge(item)
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
with Session(self.engine) as session:
session.query(self.cache_schema).delete()
class SQLiteCache(SQLAlchemyCache):
"""Cache that uses SQLite as a backend."""
def __init__(self, database_path: str = ".langchain.db"):
"""Initialize by creating the engine and all tables."""
engine = create_engine(f"sqlite:///{database_path}")
super().__init__(engine)
class RedisCache(BaseCache):
"""Cache that uses Redis as a backend."""
# TODO - implement a TTL policy in Redis
def __init__(self, redis_: Any):
"""Initialize by passing in Redis instance."""
try:
from redis import Redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
if not isinstance(redis_, Redis):
raise ValueError("Please pass in Redis object.")
self.redis = redis_
def _key(self, prompt: str, llm_string: str) -> str:
"""Compute key from prompt and llm_string"""
return _hash(prompt + llm_string)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
generations = []
# Read from a Redis HASH
results = self.redis.hgetall(self._key(prompt, llm_string))
if results:
for _, text in results.items():
generations.append(Generation(text=text))
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"RedisCache only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
# Write to a Redis HASH
key = self._key(prompt, llm_string)
self.redis.hset(
key,
mapping={
str(idx): generation.text for idx, generation in enumerate(return_val)
},
)
def clear(self, **kwargs: Any) -> None:
"""Clear cache. If `asynchronous` is True, flush asynchronously."""
asynchronous = kwargs.get("asynchronous", False)
self.redis.flushdb(asynchronous=asynchronous, **kwargs)
class RedisSemanticCache(BaseCache):
"""Cache that uses Redis as a vector-store backend."""
# TODO - implement a TTL policy in Redis
def __init__(
self, redis_url: str, embedding: Embeddings, score_threshold: float = 0.2
):
"""Initialize by passing in the `init` GPTCache func
Args:
redis_url (str): URL to connect to Redis.
embedding (Embedding): Embedding provider for semantic encoding and search.
score_threshold (float, 0.2):
Example:
.. code-block:: python
import langchain
from langchain.cache import RedisSemanticCache
from langchain.embeddings import OpenAIEmbeddings
langchain.llm_cache = RedisSemanticCache(
redis_url="redis://localhost:6379",
embedding=OpenAIEmbeddings()
)
"""
self._cache_dict: Dict[str, RedisVectorstore] = {}
self.redis_url = redis_url
self.embedding = embedding
self.score_threshold = score_threshold
def _index_name(self, llm_string: str) -> str:
hashed_index = _hash(llm_string)
return f"cache:{hashed_index}"
def _get_llm_cache(self, llm_string: str) -> RedisVectorstore:
index_name = self._index_name(llm_string)
# return vectorstore client for the specific llm string
if index_name in self._cache_dict:
return self._cache_dict[index_name]
# create new vectorstore client for the specific llm string
try:
self._cache_dict[index_name] = RedisVectorstore.from_existing_index(
embedding=self.embedding,
index_name=index_name,
redis_url=self.redis_url,
)
except ValueError:
redis = RedisVectorstore(
embedding_function=self.embedding.embed_query,
index_name=index_name,
redis_url=self.redis_url,
)
_embedding = self.embedding.embed_query(text="test")
redis._create_index(dim=len(_embedding))
self._cache_dict[index_name] = redis
return self._cache_dict[index_name]
def clear(self, **kwargs: Any) -> None:
"""Clear semantic cache for a given llm_string."""
index_name = self._index_name(kwargs["llm_string"])
if index_name in self._cache_dict:
self._cache_dict[index_name].drop_index(
index_name=index_name, delete_documents=True, redis_url=self.redis_url
)
del self._cache_dict[index_name]
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
llm_cache = self._get_llm_cache(llm_string)
generations = []
# Read from a Hash
results = llm_cache.similarity_search_limit_score(
query=prompt,
k=1,
score_threshold=self.score_threshold,
)
if results:
for document in results:
for text in document.metadata["return_val"]:
generations.append(Generation(text=text))
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"RedisSemanticCache only supports caching of "
f"normal LLM generations, got {type(gen)}"
)
llm_cache = self._get_llm_cache(llm_string)
# Write to vectorstore
metadata = {
"llm_string": llm_string,
"prompt": prompt,
"return_val": [generation.text for generation in return_val],
}
llm_cache.add_texts(texts=[prompt], metadatas=[metadata])
class GPTCache(BaseCache):
"""Cache that uses GPTCache as a backend."""
def __init__(
self,
init_func: Union[
Callable[[Any, str], None], Callable[[Any], None], None
] = None,
):
"""Initialize by passing in init function (default: `None`).
Args:
init_func (Optional[Callable[[Any], None]]): init `GPTCache` function
(default: `None`)
Example:
.. code-block:: python
# Initialize GPTCache with a custom init function
import gptcache
from gptcache.processor.pre import get_prompt
from gptcache.manager.factory import get_data_manager
# Avoid multiple caches using the same file,
causing different llm model caches to affect each other
def init_gptcache(cache_obj: gptcache.Cache, llm str):
cache_obj.init(
pre_embedding_func=get_prompt,
data_manager=manager_factory(
manager="map",
data_dir=f"map_cache_{llm}"
),
)
langchain.llm_cache = GPTCache(init_gptcache)
"""
try:
import gptcache # noqa: F401
except ImportError:
raise ImportError(
"Could not import gptcache python package. "
"Please install it with `pip install gptcache`."
)
self.init_gptcache_func: Union[
Callable[[Any, str], None], Callable[[Any], None], None
] = init_func
self.gptcache_dict: Dict[str, Any] = {}
def _new_gptcache(self, llm_string: str) -> Any:
"""New gptcache object"""
from gptcache import Cache
from gptcache.manager.factory import get_data_manager
from gptcache.processor.pre import get_prompt
_gptcache = Cache()
if self.init_gptcache_func is not None:
sig = inspect.signature(self.init_gptcache_func)
if len(sig.parameters) == 2:
self.init_gptcache_func(_gptcache, llm_string) # type: ignore[call-arg]
else:
self.init_gptcache_func(_gptcache) # type: ignore[call-arg]
else:
_gptcache.init(
pre_embedding_func=get_prompt,
data_manager=get_data_manager(data_path=llm_string),
)
self.gptcache_dict[llm_string] = _gptcache
return _gptcache
def _get_gptcache(self, llm_string: str) -> Any:
"""Get a cache object.
When the corresponding llm model cache does not exist, it will be created."""
return self.gptcache_dict.get(llm_string, self._new_gptcache(llm_string))
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up the cache data.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then retrieve the data from the cache based on the `prompt`.
"""
from gptcache.adapter.api import get
_gptcache = self.gptcache_dict.get(llm_string, None)
if _gptcache is None:
return None
res = get(prompt, cache_obj=_gptcache)
if res:
return [
Generation(**generation_dict) for generation_dict in json.loads(res)
]
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then store the `prompt` and `return_val` in the cache object.
"""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"GPTCache only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
from gptcache.adapter.api import put
_gptcache = self._get_gptcache(llm_string)
handled_data = json.dumps([generation.dict() for generation in return_val])
put(prompt, handled_data, cache_obj=_gptcache)
return None
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
from gptcache import Cache
for gptcache_instance in self.gptcache_dict.values():
gptcache_instance = cast(Cache, gptcache_instance)
gptcache_instance.flush()
self.gptcache_dict.clear()
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists
):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(f"Unexpected response cache creation: {create_cache_response}")
def _validate_ttl(ttl: Optional[timedelta]) -> None:
if ttl is not None and ttl <= timedelta(seconds=0):
raise ValueError(f"ttl must be positive but was {ttl}.")
class MomentoCache(BaseCache):
"""Cache that uses Momento as a backend. See https://gomomento.com/"""
def __init__(
self,
cache_client: momento.CacheClient,
cache_name: str,
*,
ttl: Optional[timedelta] = None,
ensure_cache_exists: bool = True,
):
"""Instantiate a prompt cache using Momento as a backend.
Note: to instantiate the cache client passed to MomentoCache,
you must have a Momento account. See https://gomomento.com/.
Args:
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the data.
ttl (Optional[timedelta], optional): The time to live for the cache items.
Defaults to None, ie use the client default TTL.
ensure_cache_exists (bool, optional): Create the cache if it doesn't
exist. Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
ValueError: ttl is non-null and non-negative
"""
try:
from momento import CacheClient
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if not isinstance(cache_client, CacheClient):
raise TypeError("cache_client must be a momento.CacheClient object.")
_validate_ttl(ttl)
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.cache_client = cache_client
self.cache_name = cache_name
self.ttl = ttl
@classmethod
def from_client_params(
cls,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> MomentoCache:
"""Construct cache from CacheClient parameters."""
try:
from momento import CacheClient, Configurations, CredentialProvider
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if configuration is None:
configuration = Configurations.Laptop.v1()
auth_token = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN")
credentials = CredentialProvider.from_string(auth_token)
cache_client = CacheClient(configuration, credentials, default_ttl=ttl)
return cls(cache_client, cache_name, ttl=ttl, **kwargs)
def __key(self, prompt: str, llm_string: str) -> str:
"""Compute cache key from prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Returns:
str: The cache key.
"""
return _hash(prompt + llm_string)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Lookup llm generations in cache by prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Raises:
SdkException: Momento service or network error
Returns:
Optional[RETURN_VAL_TYPE]: A list of language model generations.
"""
from momento.responses import CacheGet
generations: RETURN_VAL_TYPE = []
get_response = self.cache_client.get(
self.cache_name, self.__key(prompt, llm_string)
)
if isinstance(get_response, CacheGet.Hit):
value = get_response.value_string
generations = _load_generations_from_json(value)
elif isinstance(get_response, CacheGet.Miss):
pass
elif isinstance(get_response, CacheGet.Error):
raise get_response.inner_exception
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Store llm generations in cache.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model string.
return_val (RETURN_VAL_TYPE): A list of language model generations.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"Momento only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
key = self.__key(prompt, llm_string)
value = _dump_generations_to_json(return_val)
set_response = self.cache_client.set(self.cache_name, key, value, self.ttl)
from momento.responses import CacheSet
if isinstance(set_response, CacheSet.Success):
pass
elif isinstance(set_response, CacheSet.Error):
raise set_response.inner_exception
else:
raise Exception(f"Unexpected response: {set_response}")
def clear(self, **kwargs: Any) -> None:
"""Clear the cache.
Raises:
SdkException: Momento service or network error
"""
from momento.responses import CacheFlush
flush_response = self.cache_client.flush_cache(self.cache_name)
if isinstance(flush_response, CacheFlush.Success):
pass
elif isinstance(flush_response, CacheFlush.Error):
raise flush_response.inner_exception
| [
"langchain.schema.Generation",
"langchain.utils.get_from_env",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.load.load.loads"
] | [((918, 945), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (935, 945), False, 'import logging\n'), ((3390, 3408), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3406, 3408), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((3565, 3597), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)'}), '(String, primary_key=True)\n', (3571, 3597), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((3608, 3640), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)'}), '(String, primary_key=True)\n', (3614, 3640), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((3651, 3684), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (3657, 3684), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((3700, 3714), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (3706, 3714), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((1888, 1916), 'json.loads', 'json.loads', (['generations_json'], {}), '(generations_json)\n', (1898, 1916), False, 'import json\n'), ((6089, 6132), 'sqlalchemy.create_engine', 'create_engine', (['f"""sqlite:///{database_path}"""'], {}), "(f'sqlite:///{database_path}')\n", (6102, 6132), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((14135, 14142), 'gptcache.Cache', 'Cache', ([], {}), '()\n', (14140, 14142), False, 'from gptcache import Cache\n'), ((15447, 15479), 'gptcache.adapter.api.get', 'get', (['prompt'], {'cache_obj': '_gptcache'}), '(prompt, cache_obj=_gptcache)\n', (15450, 15479), False, 'from gptcache.adapter.api import get\n'), ((16366, 16412), 'gptcache.adapter.api.put', 'put', (['prompt', 'handled_data'], {'cache_obj': '_gptcache'}), '(prompt, handled_data, cache_obj=_gptcache)\n', (16369, 16412), False, 'from gptcache.adapter.api import put\n'), ((20261, 20303), 'momento.CredentialProvider.from_string', 'CredentialProvider.from_string', (['auth_token'], {}), '(auth_token)\n', (20291, 20303), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((20327, 20383), 'momento.CacheClient', 'CacheClient', (['configuration', 'credentials'], {'default_ttl': 'ttl'}), '(configuration, credentials, default_ttl=ttl)\n', (20338, 20383), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((1933, 1962), 'langchain.schema.Generation', 'Generation', ([], {}), '(**generation_dict)\n', (1943, 1962), False, 'from langchain.schema import Generation\n'), ((4464, 4484), 'sqlalchemy.orm.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (4471, 4484), False, 'from sqlalchemy.orm import Session\n'), ((5571, 5591), 'sqlalchemy.orm.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (5578, 5591), False, 'from sqlalchemy.orm import Session\n'), ((5773, 5793), 'sqlalchemy.orm.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (5780, 5793), False, 'from sqlalchemy.orm import Session\n'), ((9839, 9955), 'langchain.vectorstores.redis.Redis.from_existing_index', 'RedisVectorstore.from_existing_index', ([], {'embedding': 'self.embedding', 'index_name': 'index_name', 'redis_url': 'self.redis_url'}), '(embedding=self.embedding, index_name=\n index_name, redis_url=self.redis_url)\n', (9875, 9955), True, 'from langchain.vectorstores.redis import Redis as RedisVectorstore\n'), ((14209, 14251), 'inspect.signature', 'inspect.signature', (['self.init_gptcache_func'], {}), '(self.init_gptcache_func)\n', (14226, 14251), False, 'import inspect\n'), ((16635, 16665), 'typing.cast', 'cast', (['Cache', 'gptcache_instance'], {}), '(Cache, gptcache_instance)\n', (16639, 16665), False, 'from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union, cast\n'), ((17565, 17585), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (17574, 17585), False, 'from datetime import timedelta\n'), ((20128, 20154), 'momento.Configurations.Laptop.v1', 'Configurations.Laptop.v1', ([], {}), '()\n', (20152, 20154), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((20190, 20238), 'langchain.utils.get_from_env', 'get_from_env', (['"""auth_token"""', '"""MOMENTO_AUTH_TOKEN"""'], {}), "('auth_token', 'MOMENTO_AUTH_TOKEN')\n", (20202, 20238), False, 'from langchain.utils import get_from_env\n'), ((10061, 10178), 'langchain.vectorstores.redis.Redis', 'RedisVectorstore', ([], {'embedding_function': 'self.embedding.embed_query', 'index_name': 'index_name', 'redis_url': 'self.redis_url'}), '(embedding_function=self.embedding.embed_query, index_name=\n index_name, redis_url=self.redis_url)\n', (10077, 10178), True, 'from langchain.vectorstores.redis import Redis as RedisVectorstore\n'), ((15533, 15562), 'langchain.schema.Generation', 'Generation', ([], {}), '(**generation_dict)\n', (15543, 15562), False, 'from langchain.schema import Generation\n'), ((5481, 5491), 'langchain.load.dump.dumps', 'dumps', (['gen'], {}), '(gen)\n', (5486, 5491), False, 'from langchain.load.dump import dumps\n'), ((7268, 7289), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (7278, 7289), False, 'from langchain.schema import Generation\n'), ((14595, 14633), 'gptcache.manager.factory.get_data_manager', 'get_data_manager', ([], {'data_path': 'llm_string'}), '(data_path=llm_string)\n', (14611, 14633), False, 'from gptcache.manager.factory import get_data_manager\n'), ((15586, 15601), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (15596, 15601), False, 'import json\n'), ((4619, 4632), 'langchain.load.load.loads', 'loads', (['row[0]'], {}), '(row[0])\n', (4624, 4632), False, 'from langchain.load.load import loads\n'), ((11414, 11435), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (11424, 11435), False, 'from langchain.schema import Generation\n'), ((5189, 5212), 'langchain.schema.Generation', 'Generation', ([], {'text': 'row[0]'}), '(text=row[0])\n', (5199, 5212), False, 'from langchain.schema import Generation\n'), ((4234, 4268), 'sqlalchemy.select', 'select', (['self.cache_schema.response'], {}), '(self.cache_schema.response)\n', (4240, 4268), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n')] |
# INITIALIZATION
# LangChain imports
import langchain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.chains import SequentialChain
# General imports
import os
from dotenv import load_dotenv
# Load API key from .env
load_dotenv()
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
# LangChain debugging settings
langchain.debug = False
langchain.verbose = False
"""
NOTE
LangChain applications can be debugged easily either
1. by enabling the debug mode or
2. by enabling verbose outputs (recommended for agents).
Turn on (switch to True) one of the above and see what happens when the chain executes.
Reference: https://python.langchain.com/docs/guides/debugging
"""
# MODEL
llm = OpenAI(temperature=0.6)
# PROMPT MANAGEMENT
def generate_recipe_names(selected_items):
"""
Generate a list of recipe names using a list of selected ingredients
by executing an LLMChain.
Args:
selected_items (list): A list of ingredients selected by the user
Returns:
dict: A dictionary of recipe names
"""
# Set up prompt template
prompt_template_recipe_name = PromptTemplate(
input_variables=["ingredients"],
template="Generate a list of meal names that can be prepared using the provided ingredients. "
"Ingredients are {ingredients}. "
"It's not necessary to use all of the ingredients, "
"and the list can include both simple and complex meal names. "
"Please consider the ingredients provided and suggest meal names accordingly.",
)
# Set up chain
recipe_name_chain = LLMChain(
llm=llm, prompt=prompt_template_recipe_name, output_key="recipe_name"
)
# Set up multichain workflow with inputs
chain = SequentialChain(
chains=[recipe_name_chain],
input_variables=["ingredients"],
output_variables=["recipe_name"],
)
# Execute workflow and get response
response = chain({"ingredients": selected_items})
return response
def generate_recipe(recipe_name):
"""
Generate a comprehensive recipe using a name of a recipe as input
by executing an LLMChain
Args:
recipe_name (str): The name of the recipe to be generated
Returns:
dict: A recipe (as a dictionary object)
"""
# Set up prompt template
prompt_template_recipe = PromptTemplate(
input_variables=["recipe_name"],
template="Generate a recipe for {recipe_name}. Please include a list of ingredients and "
"step-by-step instructions for preparing {recipe_name}. "
"Please include the cooking time and any special instructions.",
)
# Set up chain
recipe_chain = LLMChain(llm=llm, prompt=prompt_template_recipe, output_key="recipe")
# Set up multichain workflow with inputs
chain = SequentialChain(
chains=[recipe_chain],
input_variables=["recipe_name"],
output_variables=["recipe"],
)
# Execute workflow and get response
response = chain({"recipe_name": recipe_name})
return response
| [
"langchain.llms.OpenAI",
"langchain.prompts.PromptTemplate",
"langchain.chains.SequentialChain",
"langchain.chains.LLMChain"
] | [((303, 316), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (314, 316), False, 'from dotenv import load_dotenv\n'), ((348, 375), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (357, 375), False, 'import os\n'), ((785, 808), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.6)'}), '(temperature=0.6)\n', (791, 808), False, 'from langchain.llms import OpenAI\n'), ((1201, 1572), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['ingredients']", 'template': '"""Generate a list of meal names that can be prepared using the provided ingredients. Ingredients are {ingredients}. It\'s not necessary to use all of the ingredients, and the list can include both simple and complex meal names. Please consider the ingredients provided and suggest meal names accordingly."""'}), '(input_variables=[\'ingredients\'], template=\n "Generate a list of meal names that can be prepared using the provided ingredients. Ingredients are {ingredients}. It\'s not necessary to use all of the ingredients, and the list can include both simple and complex meal names. Please consider the ingredients provided and suggest meal names accordingly."\n )\n', (1215, 1572), False, 'from langchain.prompts import PromptTemplate\n'), ((1674, 1753), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template_recipe_name', 'output_key': '"""recipe_name"""'}), "(llm=llm, prompt=prompt_template_recipe_name, output_key='recipe_name')\n", (1682, 1753), False, 'from langchain.chains import LLMChain\n'), ((1826, 1940), 'langchain.chains.SequentialChain', 'SequentialChain', ([], {'chains': '[recipe_name_chain]', 'input_variables': "['ingredients']", 'output_variables': "['recipe_name']"}), "(chains=[recipe_name_chain], input_variables=['ingredients'],\n output_variables=['recipe_name'])\n", (1841, 1940), False, 'from langchain.chains import SequentialChain\n'), ((2433, 2697), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['recipe_name']", 'template': '"""Generate a recipe for {recipe_name}. Please include a list of ingredients and step-by-step instructions for preparing {recipe_name}. Please include the cooking time and any special instructions."""'}), "(input_variables=['recipe_name'], template=\n 'Generate a recipe for {recipe_name}. Please include a list of ingredients and step-by-step instructions for preparing {recipe_name}. Please include the cooking time and any special instructions.'\n )\n", (2447, 2697), False, 'from langchain.prompts import PromptTemplate\n'), ((2772, 2841), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template_recipe', 'output_key': '"""recipe"""'}), "(llm=llm, prompt=prompt_template_recipe, output_key='recipe')\n", (2780, 2841), False, 'from langchain.chains import LLMChain\n'), ((2900, 3004), 'langchain.chains.SequentialChain', 'SequentialChain', ([], {'chains': '[recipe_chain]', 'input_variables': "['recipe_name']", 'output_variables': "['recipe']"}), "(chains=[recipe_chain], input_variables=['recipe_name'],\n output_variables=['recipe'])\n", (2915, 3004), False, 'from langchain.chains import SequentialChain\n')] |
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import DirectoryLoader, TextLoader
import bibtexparser
import langchain
import os
import glob
from dotenv import load_dotenv
import openai
import constants
import time
# Set OpenAI API Key
load_dotenv()
os.environ["OPENAI_API_KEY"] = constants.APIKEY
openai.api_key = constants.APIKEY
# Set paths
source_path = './data/src/'
destination_file = './data/ingested.txt'
store_path = './vectorstore/'
bibtex_file_path = '/home/wouter/Tools/Zotero/bibtex/library.bib'
# Load documents
print("===Loading documents===")
text_loader_kwargs={'autodetect_encoding': True}
loader = DirectoryLoader(source_path,
show_progress=True,
use_multithreading=True,
loader_cls=TextLoader,
loader_kwargs=text_loader_kwargs)
documents = loader.load()
if len(documents) == 0:
print("No new documents found")
quit()
# Add metadata based in bibliographic information
print("===Adding metadata===")
# Read the BibTeX file
with open(bibtex_file_path) as bibtex_file:
bib_database = bibtexparser.load(bibtex_file)
# Get a list of all text file names in the directory
text_file_names = os.listdir(source_path)
metadata_store = []
# Go through each entry in the BibTeX file
for entry in bib_database.entries:
# Check if the 'file' key exists in the entry
if 'file' in entry:
# Extract the file name from the 'file' field and remove the extension
pdf_file_name = os.path.basename(entry['file']).replace('.pdf', '')
# Check if there is a text file with the same name
if f'{pdf_file_name}.txt' in text_file_names:
# If a match is found, append the metadata to the list
metadata_store.append(entry)
for document in documents:
for entry in metadata_store:
doc_name = os.path.basename(document.metadata['source']).replace('.txt', '')
ent_name = os.path.basename(entry['file']).replace('.pdf', '')
if doc_name == ent_name:
document.metadata.update(entry)
# Initialize text splitter
print("===Splitting documents into chunks===")
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = 1500,
chunk_overlap = 150,
length_function = len,
add_start_index = True,
)
split_documents = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings(
show_progress_bar=True,
request_timeout=60,
)
print("===Embedding text and creating database===")
new_db = FAISS.from_documents(split_documents, embeddings)
print("===Merging new and old database===")
old_db = FAISS.load_local(store_path, embeddings)
old_db.merge_from(new_db)
old_db.save_local(store_path, "index")
# Record the files that we have added
print("===Recording ingested files===")
with open(destination_file, 'a') as f:
for document in documents:
f.write(os.path.basename(document.metadata['source']))
f.write('\n')
| [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.document_loaders.DirectoryLoader",
"langchain.vectorstores.FAISS.from_documents",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.vectorstores.FAISS.load_local"
] | [((380, 393), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (391, 393), False, 'from dotenv import load_dotenv\n'), ((764, 898), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (['source_path'], {'show_progress': '(True)', 'use_multithreading': '(True)', 'loader_cls': 'TextLoader', 'loader_kwargs': 'text_loader_kwargs'}), '(source_path, show_progress=True, use_multithreading=True,\n loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n', (779, 898), False, 'from langchain.document_loaders import DirectoryLoader, TextLoader\n'), ((1364, 1387), 'os.listdir', 'os.listdir', (['source_path'], {}), '(source_path)\n', (1374, 1387), False, 'import os\n'), ((2324, 2437), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1500)', 'chunk_overlap': '(150)', 'length_function': 'len', 'add_start_index': '(True)'}), '(chunk_size=1500, chunk_overlap=150,\n length_function=len, add_start_index=True)\n', (2354, 2437), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2536, 2596), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'show_progress_bar': '(True)', 'request_timeout': '(60)'}), '(show_progress_bar=True, request_timeout=60)\n', (2552, 2596), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2670, 2719), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['split_documents', 'embeddings'], {}), '(split_documents, embeddings)\n', (2690, 2719), False, 'from langchain.vectorstores import FAISS\n'), ((2774, 2814), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['store_path', 'embeddings'], {}), '(store_path, embeddings)\n', (2790, 2814), False, 'from langchain.vectorstores import FAISS\n'), ((1261, 1291), 'bibtexparser.load', 'bibtexparser.load', (['bibtex_file'], {}), '(bibtex_file)\n', (1278, 1291), False, 'import bibtexparser\n'), ((3045, 3090), 'os.path.basename', 'os.path.basename', (["document.metadata['source']"], {}), "(document.metadata['source'])\n", (3061, 3090), False, 'import os\n'), ((1664, 1695), 'os.path.basename', 'os.path.basename', (["entry['file']"], {}), "(entry['file'])\n", (1680, 1695), False, 'import os\n'), ((2019, 2064), 'os.path.basename', 'os.path.basename', (["document.metadata['source']"], {}), "(document.metadata['source'])\n", (2035, 2064), False, 'import os\n'), ((2104, 2135), 'os.path.basename', 'os.path.basename', (["entry['file']"], {}), "(entry['file'])\n", (2120, 2135), False, 'import os\n')] |
from llama_index import (
ServiceContext,
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.vector_stores.qdrant import QdrantVectorStore
from tqdm import tqdm
import arxiv
import os
import argparse
import yaml
import qdrant_client
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.embeddings import LangchainEmbedding
from llama_index import ServiceContext
from llama_index.llms import Ollama
class Data:
def __init__(self, config):
self.config = config
def _create_data_folder(self, download_path):
data_path = download_path
if not os.path.exists(data_path):
os.makedirs(self.config["data_path"])
print("Output folder created")
else:
print("Output folder already exists.")
def download_papers(self, search_query, download_path, max_results):
self._create_data_folder(download_path)
client = arxiv.Client()
search = arxiv.Search(
query=search_query,
max_results=max_results,
sort_by=arxiv.SortCriterion.SubmittedDate,
)
results = list(client.results(search))
for paper in tqdm(results):
if os.path.exists(download_path):
paper_title = (paper.title).replace(" ", "_")
paper.download_pdf(dirpath=download_path, filename=f"{paper_title}.pdf")
print(f"{paper.title} Downloaded.")
def ingest(self, embedder, llm):
print("Indexing data...")
documents = SimpleDirectoryReader(self.config["data_path"]).load_data()
client = qdrant_client.QdrantClient(url=self.config["qdrant_url"])
qdrant_vector_store = QdrantVectorStore(
client=client, collection_name=self.config["collection_name"]
)
storage_context = StorageContext.from_defaults(vector_store=qdrant_vector_store)
# service_context = ServiceContext.from_defaults(
# llm=llm, embed_model=embedder, chunk_size=self.config["chunk_size"]
# )
service_context = ServiceContext.from_defaults(
llm=None, embed_model=embedder, chunk_size=self.config["chunk_size"]
)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, service_context=service_context
)
print(
f"Data indexed successfully to Qdrant. Collection: {self.config['collection_name']}"
)
return index
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-q", "--query",
type=str,
default=False,
help="Download papers from arxiv with this query.",
)
# parser.add_argument(
# "-o", "--output", type=str, default=False, help="Download path."
# )
parser.add_argument(
"-m", "--max", type=int, default=False, help="Max results to download."
)
parser.add_argument(
"-i",
"--ingest",
action=argparse.BooleanOptionalAction,
default=False,
help="Ingest data to Qdrant vector Database.",
)
args = parser.parse_args()
config_file = "config.yml"
with open(config_file, "r") as conf:
config = yaml.safe_load(conf)
data = Data(config)
if args.query:
data.download_papers(
search_query=args.query,
download_path=config["data_path"],
max_results=args.max,
)
if args.ingest:
print("Loading Embedder...")
embed_model = LangchainEmbedding(
HuggingFaceEmbeddings(model_name=config["embedding_model"])
)
llm = Ollama(model=config["llm_name"], base_url=config["llm_url"])
data.ingest(embedder=embed_model, llm=llm)
| [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings"
] | [((2566, 2591), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2589, 2591), False, 'import argparse\n'), ((970, 984), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (982, 984), False, 'import arxiv\n'), ((1003, 1108), 'arxiv.Search', 'arxiv.Search', ([], {'query': 'search_query', 'max_results': 'max_results', 'sort_by': 'arxiv.SortCriterion.SubmittedDate'}), '(query=search_query, max_results=max_results, sort_by=arxiv.\n SortCriterion.SubmittedDate)\n', (1015, 1108), False, 'import arxiv\n'), ((1220, 1233), 'tqdm.tqdm', 'tqdm', (['results'], {}), '(results)\n', (1224, 1233), False, 'from tqdm import tqdm\n'), ((1654, 1711), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'url': "self.config['qdrant_url']"}), "(url=self.config['qdrant_url'])\n", (1680, 1711), False, 'import qdrant_client\n'), ((1742, 1827), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'client', 'collection_name': "self.config['collection_name']"}), "(client=client, collection_name=self.config['collection_name']\n )\n", (1759, 1827), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((1871, 1933), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'qdrant_vector_store'}), '(vector_store=qdrant_vector_store)\n', (1899, 1933), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, StorageContext, VectorStoreIndex\n'), ((2112, 2215), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'None', 'embed_model': 'embedder', 'chunk_size': "self.config['chunk_size']"}), "(llm=None, embed_model=embedder, chunk_size=\n self.config['chunk_size'])\n", (2140, 2215), False, 'from llama_index import ServiceContext\n'), ((2250, 2362), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(documents, storage_context=storage_context,\n service_context=service_context)\n', (2281, 2362), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, StorageContext, VectorStoreIndex\n'), ((3283, 3303), 'yaml.safe_load', 'yaml.safe_load', (['conf'], {}), '(conf)\n', (3297, 3303), False, 'import yaml\n'), ((3700, 3760), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': "config['llm_name']", 'base_url': "config['llm_url']"}), "(model=config['llm_name'], base_url=config['llm_url'])\n", (3706, 3760), False, 'from llama_index.llms import Ollama\n'), ((646, 671), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (660, 671), False, 'import os\n'), ((685, 722), 'os.makedirs', 'os.makedirs', (["self.config['data_path']"], {}), "(self.config['data_path'])\n", (696, 722), False, 'import os\n'), ((1250, 1279), 'os.path.exists', 'os.path.exists', (['download_path'], {}), '(download_path)\n', (1264, 1279), False, 'import os\n'), ((3616, 3675), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': "config['embedding_model']"}), "(model_name=config['embedding_model'])\n", (3637, 3675), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((1576, 1623), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (["self.config['data_path']"], {}), "(self.config['data_path'])\n", (1597, 1623), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, StorageContext, VectorStoreIndex\n')] |
import logging
import os
import pickle
import tempfile
import streamlit as st
from dotenv import load_dotenv
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes
from langchain.callbacks import StdOutCallbackHandler
from langchain.chains.question_answering import load_qa_chain
from langchain.document_loaders import PyPDFLoader
from langchain.embeddings import (HuggingFaceHubEmbeddings,
HuggingFaceInstructEmbeddings)
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS, Chroma
from PIL import Image
from langChainInterface import LangChainInterface
# Most GENAI logs are at Debug level.
logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG"))
st.set_page_config(
page_title="Retrieval Augmented Generation",
page_icon="🧊",
layout="wide",
initial_sidebar_state="expanded"
)
st.header("Retrieval Augmented Generation with watsonx.ai 💬")
# chunk_size=1500
# chunk_overlap = 200
load_dotenv()
handler = StdOutCallbackHandler()
api_key = os.getenv("API_KEY", None)
ibm_cloud_url = os.getenv("IBM_CLOUD_URL", None)
project_id = os.getenv("PROJECT_ID", None)
if api_key is None or ibm_cloud_url is None or project_id is None:
print("Ensure you copied the .env file that you created earlier into the same directory as this notebook")
else:
creds = {
"url": ibm_cloud_url,
"apikey": api_key
}
GEN_API_KEY = os.getenv("GENAI_KEY", None)
# Sidebar contents
with st.sidebar:
st.title("RAG App")
st.markdown('''
## About
This app is an LLM-powered RAG built using:
- [IBM Generative AI SDK](https://github.com/IBM/ibm-generative-ai/)
- [HuggingFace](https://huggingface.co/)
- [IBM watsonx.ai](https://www.ibm.com/products/watsonx-ai) LLM model
''')
st.write('Powered by [IBM watsonx.ai](https://www.ibm.com/products/watsonx-ai)')
image = Image.open('watsonxai.jpg')
st.image(image, caption='Powered by watsonx.ai')
max_new_tokens= st.number_input('max_new_tokens',1,1024,value=300)
min_new_tokens= st.number_input('min_new_tokens',0,value=15)
repetition_penalty = st.number_input('repetition_penalty',1,2,value=2)
decoding = st.text_input(
"Decoding",
"greedy",
key="placeholder",
)
uploaded_files = st.file_uploader("Choose a PDF file", accept_multiple_files=True)
@st.cache_data
def read_pdf(uploaded_files,chunk_size =250,chunk_overlap=20):
for uploaded_file in uploaded_files:
bytes_data = uploaded_file.read()
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as temp_file:
# Write content to the temporary file
temp_file.write(bytes_data)
filepath = temp_file.name
with st.spinner('Waiting for the file to upload'):
loader = PyPDFLoader(filepath)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size= chunk_size, chunk_overlap=chunk_overlap)
docs = text_splitter.split_documents(data)
return docs
@st.cache_data
def read_push_embeddings():
embeddings = HuggingFaceHubEmbeddings(repo_id="sentence-transformers/all-MiniLM-L6-v2")
if os.path.exists("db.pickle"):
with open("db.pickle",'rb') as file_name:
db = pickle.load(file_name)
else:
db = FAISS.from_documents(docs, embeddings)
with open('db.pickle','wb') as file_name :
pickle.dump(db,file_name)
st.write("\n")
return db
# show user input
if user_question := st.text_input(
"Ask a question about your Policy Document:"
):
docs = read_pdf(uploaded_files)
db = read_push_embeddings()
docs = db.similarity_search(user_question)
params = {
GenParams.DECODING_METHOD: "greedy",
GenParams.MIN_NEW_TOKENS: 30,
GenParams.MAX_NEW_TOKENS: 300,
GenParams.TEMPERATURE: 0.0,
# GenParams.TOP_K: 100,
# GenParams.TOP_P: 1,
GenParams.REPETITION_PENALTY: 1
}
model_llm = LangChainInterface(model=ModelTypes.LLAMA_2_70B_CHAT.value, credentials=creds, params=params, project_id=project_id)
chain = load_qa_chain(model_llm, chain_type="stuff")
response = chain.run(input_documents=docs, question=user_question)
st.text_area(label="Model Response", value=response, height=100)
st.write()
| [
"langchain.embeddings.HuggingFaceHubEmbeddings",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.vectorstores.FAISS.from_documents",
"langchain.chains.question_answering.load_qa_chain",
"langchain.callbacks.StdOutCallbackHandler",
"langchain.document_loaders.PyPDFLoader"
] | [((861, 993), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Retrieval Augmented Generation"""', 'page_icon': '"""🧊"""', 'layout': '"""wide"""', 'initial_sidebar_state': '"""expanded"""'}), "(page_title='Retrieval Augmented Generation', page_icon=\n '🧊', layout='wide', initial_sidebar_state='expanded')\n", (879, 993), True, 'import streamlit as st\n'), ((1007, 1068), 'streamlit.header', 'st.header', (['"""Retrieval Augmented Generation with watsonx.ai 💬"""'], {}), "('Retrieval Augmented Generation with watsonx.ai 💬')\n", (1016, 1068), True, 'import streamlit as st\n'), ((1110, 1123), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1121, 1123), False, 'from dotenv import load_dotenv\n'), ((1135, 1158), 'langchain.callbacks.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (1156, 1158), False, 'from langchain.callbacks import StdOutCallbackHandler\n'), ((1170, 1196), 'os.getenv', 'os.getenv', (['"""API_KEY"""', 'None'], {}), "('API_KEY', None)\n", (1179, 1196), False, 'import os\n'), ((1213, 1245), 'os.getenv', 'os.getenv', (['"""IBM_CLOUD_URL"""', 'None'], {}), "('IBM_CLOUD_URL', None)\n", (1222, 1245), False, 'import os\n'), ((1259, 1288), 'os.getenv', 'os.getenv', (['"""PROJECT_ID"""', 'None'], {}), "('PROJECT_ID', None)\n", (1268, 1288), False, 'import os\n'), ((1566, 1594), 'os.getenv', 'os.getenv', (['"""GENAI_KEY"""', 'None'], {}), "('GENAI_KEY', None)\n", (1575, 1594), False, 'import os\n'), ((2468, 2533), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a PDF file"""'], {'accept_multiple_files': '(True)'}), "('Choose a PDF file', accept_multiple_files=True)\n", (2484, 2533), True, 'import streamlit as st\n'), ((1636, 1655), 'streamlit.title', 'st.title', (['"""RAG App"""'], {}), "('RAG App')\n", (1644, 1655), True, 'import streamlit as st\n'), ((1660, 1949), 'streamlit.markdown', 'st.markdown', (['"""\n ## About\n This app is an LLM-powered RAG built using:\n - [IBM Generative AI SDK](https://github.com/IBM/ibm-generative-ai/)\n - [HuggingFace](https://huggingface.co/)\n - [IBM watsonx.ai](https://www.ibm.com/products/watsonx-ai) LLM model\n \n """'], {}), '(\n """\n ## About\n This app is an LLM-powered RAG built using:\n - [IBM Generative AI SDK](https://github.com/IBM/ibm-generative-ai/)\n - [HuggingFace](https://huggingface.co/)\n - [IBM watsonx.ai](https://www.ibm.com/products/watsonx-ai) LLM model\n \n """\n )\n', (1671, 1949), True, 'import streamlit as st\n'), ((1944, 2029), 'streamlit.write', 'st.write', (['"""Powered by [IBM watsonx.ai](https://www.ibm.com/products/watsonx-ai)"""'], {}), "('Powered by [IBM watsonx.ai](https://www.ibm.com/products/watsonx-ai)'\n )\n", (1952, 2029), True, 'import streamlit as st\n'), ((2037, 2064), 'PIL.Image.open', 'Image.open', (['"""watsonxai.jpg"""'], {}), "('watsonxai.jpg')\n", (2047, 2064), False, 'from PIL import Image\n'), ((2069, 2117), 'streamlit.image', 'st.image', (['image'], {'caption': '"""Powered by watsonx.ai"""'}), "(image, caption='Powered by watsonx.ai')\n", (2077, 2117), True, 'import streamlit as st\n'), ((2138, 2191), 'streamlit.number_input', 'st.number_input', (['"""max_new_tokens"""', '(1)', '(1024)'], {'value': '(300)'}), "('max_new_tokens', 1, 1024, value=300)\n", (2153, 2191), True, 'import streamlit as st\n'), ((2209, 2255), 'streamlit.number_input', 'st.number_input', (['"""min_new_tokens"""', '(0)'], {'value': '(15)'}), "('min_new_tokens', 0, value=15)\n", (2224, 2255), True, 'import streamlit as st\n'), ((2279, 2331), 'streamlit.number_input', 'st.number_input', (['"""repetition_penalty"""', '(1)', '(2)'], {'value': '(2)'}), "('repetition_penalty', 1, 2, value=2)\n", (2294, 2331), True, 'import streamlit as st\n'), ((2344, 2398), 'streamlit.text_input', 'st.text_input', (['"""Decoding"""', '"""greedy"""'], {'key': '"""placeholder"""'}), "('Decoding', 'greedy', key='placeholder')\n", (2357, 2398), True, 'import streamlit as st\n'), ((3284, 3358), 'langchain.embeddings.HuggingFaceHubEmbeddings', 'HuggingFaceHubEmbeddings', ([], {'repo_id': '"""sentence-transformers/all-MiniLM-L6-v2"""'}), "(repo_id='sentence-transformers/all-MiniLM-L6-v2')\n", (3308, 3358), False, 'from langchain.embeddings import HuggingFaceHubEmbeddings, HuggingFaceInstructEmbeddings\n'), ((3366, 3393), 'os.path.exists', 'os.path.exists', (['"""db.pickle"""'], {}), "('db.pickle')\n", (3380, 3393), False, 'import os\n'), ((3719, 3778), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about your Policy Document:"""'], {}), "('Ask a question about your Policy Document:')\n", (3732, 3778), True, 'import streamlit as st\n'), ((4198, 4319), 'langChainInterface.LangChainInterface', 'LangChainInterface', ([], {'model': 'ModelTypes.LLAMA_2_70B_CHAT.value', 'credentials': 'creds', 'params': 'params', 'project_id': 'project_id'}), '(model=ModelTypes.LLAMA_2_70B_CHAT.value, credentials=\n creds, params=params, project_id=project_id)\n', (4216, 4319), False, 'from langChainInterface import LangChainInterface\n'), ((4327, 4371), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['model_llm'], {'chain_type': '"""stuff"""'}), "(model_llm, chain_type='stuff')\n", (4340, 4371), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((4449, 4513), 'streamlit.text_area', 'st.text_area', ([], {'label': '"""Model Response"""', 'value': 'response', 'height': '(100)'}), "(label='Model Response', value=response, height=100)\n", (4461, 4513), True, 'import streamlit as st\n'), ((4518, 4528), 'streamlit.write', 'st.write', ([], {}), '()\n', (4526, 4528), True, 'import streamlit as st\n'), ((823, 858), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', '"""DEBUG"""'], {}), "('LOGLEVEL', 'DEBUG')\n", (837, 858), False, 'import os\n'), ((3513, 3551), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['docs', 'embeddings'], {}), '(docs, embeddings)\n', (3533, 3551), False, 'from langchain.vectorstores import FAISS, Chroma\n'), ((3651, 3665), 'streamlit.write', 'st.write', (['"""\n"""'], {}), "('\\n')\n", (3659, 3665), True, 'import streamlit as st\n'), ((2705, 2757), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""wb"""', 'delete': '(False)'}), "(mode='wb', delete=False)\n", (2732, 2757), False, 'import tempfile\n'), ((3462, 3484), 'pickle.load', 'pickle.load', (['file_name'], {}), '(file_name)\n', (3473, 3484), False, 'import pickle\n'), ((3617, 3643), 'pickle.dump', 'pickle.dump', (['db', 'file_name'], {}), '(db, file_name)\n', (3628, 3643), False, 'import pickle\n'), ((2905, 2949), 'streamlit.spinner', 'st.spinner', (['"""Waiting for the file to upload"""'], {}), "('Waiting for the file to upload')\n", (2915, 2949), True, 'import streamlit as st\n'), ((2973, 2994), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['filepath'], {}), '(filepath)\n', (2984, 2994), False, 'from langchain.document_loaders import PyPDFLoader\n'), ((3058, 3145), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap)\n', (3088, 3145), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n')] |
import os
import re
from typing import Optional
import langchain
import paperqa
import paperscraper
from langchain import SerpAPIWrapper, OpenAI
from langchain.base_language import BaseLanguageModel
from langchain.chains import LLMChain
from langchain.tools import BaseTool
from pydantic import validator
from pypdf.errors import PdfReadError
class LitSearch(BaseTool):
name = "LiteratureSearch"
description = (
"Input a specific question, returns an answer from literature search. "
"Do not mention any specific molecule names, but use more general features to formulate your questions."
)
llm: BaseLanguageModel
query_chain: Optional[LLMChain] = None
pdir: str = "query"
searches: int = 2
verobse: bool = False
docs: Optional[paperqa.Docs] = None
@validator("query_chain", always=True)
def init_query_chain(cls, v, values):
if v is None:
search_prompt = langchain.prompts.PromptTemplate(
input_variables=["question", "count"],
template="We want to answer the following question: {question} \n"
"Provide {count} keyword searches (one search per line) "
"that will find papers to help answer the question. "
"Do not use boolean operators. "
"Make some searches broad and some narrow. "
"Do not use boolean operators or quotes.\n\n"
"1. ",
)
llm = OpenAI(temperature=0)
v = LLMChain(llm=llm, prompt=search_prompt)
return v
@validator("pdir", always=True)
def init_pdir(cls, v):
if not os.path.isdir(v):
os.mkdir(v)
return v
def paper_search(self, search):
try:
return paperscraper.search_papers(
search, pdir=self.pdir, batch_size=6, limit=4, verbose=False
)
except KeyError:
return {}
def _run(self, query: str) -> str:
if self.verbose:
print("\n\nChoosing search terms\n1. ", end="")
searches = self.query_chain.run(question=query, count=self.searches)
print("")
queries = [s for s in searches.split("\n") if len(s) > 3]
# remove 2., 3. from queries
queries = [re.sub(r"^\d+\.\s*", "", q) for q in queries]
# remove quotes
queries = [re.sub(r"\"", "", q) for q in queries]
papers = {}
for q in queries:
papers.update(self.paper_search(q))
if self.verbose:
print(f"retrieved {len(papers)} papers total")
if len(papers) == 0:
return "Not enough papers found"
if self.docs is None:
self.docs = paperqa.Docs(
llm=self.llm, summary_llm="gpt-3.5-turbo", memory=True
)
not_loaded = 0
for path, data in papers.items():
try:
self.docs.add(path, citation=data["citation"], docname=data["key"])
except (ValueError, PdfReadError):
not_loaded += 1
if not_loaded:
print(f"\nFound {len(papers.items())} papers, couldn't load {not_loaded}")
return self.docs.query(query, length_prompt="about 100 words").answer
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError()
class WebSearch(BaseTool):
name = "WebSearch"
description = (
"Input search query, returns snippets from web search. "
"Prefer LitSearch tool over this tool, except for simple questions."
)
serpapi: SerpAPIWrapper = None
def __init__(self, search_engine="google"):
super(WebSearch, self).__init__()
self.serpapi = SerpAPIWrapper(
serpapi_api_key=os.getenv("SERP_API_KEY"), search_engine=search_engine
)
def _run(self, query: str) -> str:
try:
return self.serpapi.run(query)
except:
return "No results, try another search"
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError()
| [
"langchain.prompts.PromptTemplate",
"langchain.OpenAI",
"langchain.chains.LLMChain"
] | [((810, 847), 'pydantic.validator', 'validator', (['"""query_chain"""'], {'always': '(True)'}), "('query_chain', always=True)\n", (819, 847), False, 'from pydantic import validator\n'), ((1585, 1615), 'pydantic.validator', 'validator', (['"""pdir"""'], {'always': '(True)'}), "('pdir', always=True)\n", (1594, 1615), False, 'from pydantic import validator\n'), ((940, 1318), 'langchain.prompts.PromptTemplate', 'langchain.prompts.PromptTemplate', ([], {'input_variables': "['question', 'count']", 'template': '"""We want to answer the following question: {question} \nProvide {count} keyword searches (one search per line) that will find papers to help answer the question. Do not use boolean operators. Make some searches broad and some narrow. Do not use boolean operators or quotes.\n\n1. """'}), '(input_variables=[\'question\', \'count\'],\n template=\n """We want to answer the following question: {question} \nProvide {count} keyword searches (one search per line) that will find papers to help answer the question. Do not use boolean operators. Make some searches broad and some narrow. Do not use boolean operators or quotes.\n\n1. """\n )\n', (972, 1318), False, 'import langchain\n'), ((1484, 1505), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (1490, 1505), False, 'from langchain import SerpAPIWrapper, OpenAI\n'), ((1522, 1561), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'search_prompt'}), '(llm=llm, prompt=search_prompt)\n', (1530, 1561), False, 'from langchain.chains import LLMChain\n'), ((1658, 1674), 'os.path.isdir', 'os.path.isdir', (['v'], {}), '(v)\n', (1671, 1674), False, 'import os\n'), ((1688, 1699), 'os.mkdir', 'os.mkdir', (['v'], {}), '(v)\n', (1696, 1699), False, 'import os\n'), ((1786, 1878), 'paperscraper.search_papers', 'paperscraper.search_papers', (['search'], {'pdir': 'self.pdir', 'batch_size': '(6)', 'limit': '(4)', 'verbose': '(False)'}), '(search, pdir=self.pdir, batch_size=6, limit=4,\n verbose=False)\n', (1812, 1878), False, 'import paperscraper\n'), ((2294, 2323), 're.sub', 're.sub', (['"""^\\\\d+\\\\.\\\\s*"""', '""""""', 'q'], {}), "('^\\\\d+\\\\.\\\\s*', '', q)\n", (2300, 2323), False, 'import re\n'), ((2383, 2403), 're.sub', 're.sub', (['"""\\\\\\""""', '""""""', 'q'], {}), '(\'\\\\"\', \'\', q)\n', (2389, 2403), False, 'import re\n'), ((2737, 2805), 'paperqa.Docs', 'paperqa.Docs', ([], {'llm': 'self.llm', 'summary_llm': '"""gpt-3.5-turbo"""', 'memory': '(True)'}), "(llm=self.llm, summary_llm='gpt-3.5-turbo', memory=True)\n", (2749, 2805), False, 'import paperqa\n'), ((3810, 3835), 'os.getenv', 'os.getenv', (['"""SERP_API_KEY"""'], {}), "('SERP_API_KEY')\n", (3819, 3835), False, 'import os\n')] |
import sys
import getpass
from dotenv import load_dotenv, dotenv_values
import pandas as pd
from IPython.display import display, Markdown, Latex, HTML, JSON
import langchain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from cmd import PROMPT
import os
from pyexpat.errors import messages
import openai
import tiktoken
sys.path.append(r"/Users/dovcohen/Documents/Projects/AI/NL2SQL")
#from .OpenAI_Func import Num_Tokens_From_String, OpenAI_Embeddings_Cost
from ChatGPT.src.lib.OpenAI_Func import Num_Tokens_From_String, OpenAI_Embeddings_Cost
from ChatGPT.src.lib.DB_Func import run_query
from ChatGPT.src.lib.OpenAI_Func import Prompt_Cost, OpenAI_Usage_Cost
## Vector Datastore
from ChatGPT.src.lib.lib_OpenAI_Embeddings import VDS, OpenAI_Embeddings
class GenAI_NL2SQL():
def __init__(self, OPENAI_API_KEY, Model, Embedding_Model, Encoding_Base, Max_Tokens, Temperature, \
Token_Cost, DB, MYSQL_User, MYSQL_PWD, VDSDB=None, VDSDB_Filename=None):
self._LLM_Model = Model
self._Embedding_Model = Embedding_Model
self._Encoding_Base = Encoding_Base
self._Max_Tokens = Max_Tokens
self._Temperature = Temperature
self._Token_Cost = Token_Cost
self._OpenAI_API_Key = OPENAI_API_KEY
self._DB = DB
self._MYSQL_Credemtals = {'User':MYSQL_User,'PWD':MYSQL_PWD}
self.Set_OpenAI_API_Key()
if VDSDB is not None:
self._VDSDB = VDSDB
self._VDS = VDS(VDSDB_Filename, Encoding_Base, Embedding_Model, Token_Cost, Max_Tokens)
self._VDS.Load_VDS_DF(Verbose=True)
def Set_OpenAI_API_Key(self):
openai.api_key = self._OpenAI_API_Key
return 0
def Print_Open_AI_Key(self):
print(self._OpenAI_API_Key)
def Print_MySQL_Keys(self):
print(self._MYSQL_Credemtals)
##############################################################################
def Prompt_Question(self, _Prompt_Template_, Inputs):
"""
"""
for i,j in Inputs.items():
Prompt = _Prompt_Template_.replace(i,j)
return Prompt
##############################################################################
def OpenAI_Completion(self, Prompt):
try:
#Make your OpenAI API request here
response = openai.Completion.create(
model=self._LLM_Model,
prompt=Prompt,
max_tokens=self._Max_Tokens,
temperature=self._Temperature,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
except openai.error.APIError as e:
#Handle API error here, e.g. retry or log
print(f"OpenAI API returned an API Error: {e}")
return -1
except openai.error.APIConnectionError as e:
#Handle connection error here
print(f"Failed to connect to OpenAI API: {e}")
return -1
except openai.error.RateLimitError as e:
#Handle rate limit error (we recommend using exponential backoff)
print(f"OpenAI API request exceeded rate limit: {e}")
return -1
return(response)
#############################################################################
def OpenAI_Text_Extraction(self, Response, Type='SQL'):
if Type == 'SQL':
## Call prompt that removes extraneaous characters from the returned query
Prompt_Template, status = self.Load_Prompt_Template('../prompt_templates/OpenAI_SQL_Extraction.txt')
if status == 0:
Prompt = self.Prompt_Question(Prompt_Template, Inputs={'{RESPONSE}':str(Response)})
Rtn = self.OpenAI_Completion(Prompt)
Txt = str(Rtn['choices'][0]['text'])
elif Type == 'Text':
Txt = str(Response['choices'][0]['text'])
else:
print(f'Type: {Type} is Unsupported ')
Txt = ''
return(Txt)
##############################################################################
def Prompt_Query(self, Prompt_Template, Question = '', Verbose=False, Debug=False):
status = 0
df = pd.DataFrame()
# Construct prompt
Prompt = self.Prompt_Question(Prompt_Template,{'{Question}':Question})
# Estimate input prompt cost
Cost, Tokens_Used = Prompt_Cost(Prompt, self._LLM_Model, self._Token_Cost, self._Encoding_Base)
if Verbose:
print('Input')
print(f'Total Cost: {round(Cost,3)} Tokens Used {Tokens_Used}')
# Send prompt to LLM
Response = self.OpenAI_Completion(Prompt)
if Debug:
print(f'Prompt: \n',Prompt,'\n')
print('Response \n',Response,'\n')
Cost, Tokens_Used = OpenAI_Usage_Cost(Response, self._LLM_Model, self._Token_Cost )
if Verbose:
print('Output')
print(f'Total Cost: {round(Cost,3)} Tokens Used {Tokens_Used}','\n')
# extract query from LLM response
Query = self.OpenAI_Text_Extraction(Response, Type='SQL')
if Verbose:
print(Query)
return Query
##############################################################################
# Given an single input question, run the entire process
def GPT_Completion(self, Question, Prompt_Template, Correct_Query=False, Correction_Prompt=None, \
Max_Iterations=0,Verbose=False, QueryDB = False, Update_VDS=True):
Correct_Query_Iterations = 0
# calculate Question Embedding vector
Question_Emb = self._VDS.OpenAI_Get_Embedding(Text=Question, Verbose=True)
# Few Shot Prompt - Search VDS for questions that are similar to the question posed
# 11/2/2023: Using Cosine simlarity function
N_Shot_Prompt_Examples = self._VDS.Search_VDS(Question_Emb, Similarity_Func = 'Cosine', Top_N=1)
print(f'N_Shot_Prompt_Examples {N_Shot_Prompt_Examples}')
return 0
# Construct prompt
if Verbose:
print('Call Prompt_Query')
Query = self.Prompt_Query(Prompt_Template, Question, Verbose=True)
# Test query the DB -
if QueryDB:
status, df = run_query(Query = Query, Credentials = self._MYSQL_Credemtals, DB=self._DB)
# if query was malformed, llm halucianated for example
if Correct_Query and (status == -5):
while (status == -5) and (Correct_Query_Iterations < Max_Iterations):
Correct_Query_Iterations += 1
print('Attempting to correct query syntax error')
Query = self.Prompt_Query(Correction_Prompt, Question, Verbose)
# Query the DB
status, df = run_query(Query = Query, Credentials = self._MYSQL_Credemtals,\
DB=self._DB)
print('\n',df)
if Update_VDS:
rtn = ''
while rtn not in ('Y','N'):
print(f'Add results to Vector Datastore DB? Y or N')
rtn = input('Prompt> Question: ')
if rtn == 'Y':
self.Insert_VDS(Question=Question, Query=Query, Metadata='', Embedding=Question_Emb)
# Return Query
return Query
##############################################################################
def Load_Prompt_Template(self, File=None):
if File:
try:
with open(File, 'r') as file:
Template = file.read().replace('\n', '')
Status = 0
except:
print(f'Prompt file {File} load failed ')
Status = -1
return "", Status
return Template, Status
#############################################################################
def LangChain_Initiate_LLM(self, Model='OpenAI'):
if Model=='OpenAI':
self._LLM = OpenAI(temperature=self._Temperature, model_name=self._LLM_Model, \
max_tokens=self._Max_Tokens, openai_api_key=self._OpenAI_API_Key)
return 0
else:
print('Model Unsupported')
return -1
# Langchain Completion
def LangChainCompletion(self, Prompt, Input):
chain = LLMChain(llm=self._LLM, prompt=Prompt)
return chain.run(Input)
| [
"langchain.llms.OpenAI",
"langchain.chains.LLMChain"
] | [((394, 457), 'sys.path.append', 'sys.path.append', (['"""/Users/dovcohen/Documents/Projects/AI/NL2SQL"""'], {}), "('/Users/dovcohen/Documents/Projects/AI/NL2SQL')\n", (409, 457), False, 'import sys\n'), ((4264, 4278), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4276, 4278), True, 'import pandas as pd\n'), ((4452, 4527), 'ChatGPT.src.lib.OpenAI_Func.Prompt_Cost', 'Prompt_Cost', (['Prompt', 'self._LLM_Model', 'self._Token_Cost', 'self._Encoding_Base'], {}), '(Prompt, self._LLM_Model, self._Token_Cost, self._Encoding_Base)\n', (4463, 4527), False, 'from ChatGPT.src.lib.OpenAI_Func import Prompt_Cost, OpenAI_Usage_Cost\n'), ((4872, 4934), 'ChatGPT.src.lib.OpenAI_Func.OpenAI_Usage_Cost', 'OpenAI_Usage_Cost', (['Response', 'self._LLM_Model', 'self._Token_Cost'], {}), '(Response, self._LLM_Model, self._Token_Cost)\n', (4889, 4934), False, 'from ChatGPT.src.lib.OpenAI_Func import Prompt_Cost, OpenAI_Usage_Cost\n'), ((8369, 8407), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'self._LLM', 'prompt': 'Prompt'}), '(llm=self._LLM, prompt=Prompt)\n', (8377, 8407), False, 'from langchain.chains import LLMChain\n'), ((1546, 1621), 'ChatGPT.src.lib.lib_OpenAI_Embeddings.VDS', 'VDS', (['VDSDB_Filename', 'Encoding_Base', 'Embedding_Model', 'Token_Cost', 'Max_Tokens'], {}), '(VDSDB_Filename, Encoding_Base, Embedding_Model, Token_Cost, Max_Tokens)\n', (1549, 1621), False, 'from ChatGPT.src.lib.lib_OpenAI_Embeddings import VDS, OpenAI_Embeddings\n'), ((2404, 2585), 'openai.Completion.create', 'openai.Completion.create', ([], {'model': 'self._LLM_Model', 'prompt': 'Prompt', 'max_tokens': 'self._Max_Tokens', 'temperature': 'self._Temperature', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)'}), '(model=self._LLM_Model, prompt=Prompt, max_tokens=\n self._Max_Tokens, temperature=self._Temperature, top_p=1,\n frequency_penalty=0, presence_penalty=0)\n', (2428, 2585), False, 'import openai\n'), ((6308, 6379), 'ChatGPT.src.lib.DB_Func.run_query', 'run_query', ([], {'Query': 'Query', 'Credentials': 'self._MYSQL_Credemtals', 'DB': 'self._DB'}), '(Query=Query, Credentials=self._MYSQL_Credemtals, DB=self._DB)\n', (6317, 6379), False, 'from ChatGPT.src.lib.DB_Func import run_query\n'), ((8017, 8152), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': 'self._Temperature', 'model_name': 'self._LLM_Model', 'max_tokens': 'self._Max_Tokens', 'openai_api_key': 'self._OpenAI_API_Key'}), '(temperature=self._Temperature, model_name=self._LLM_Model,\n max_tokens=self._Max_Tokens, openai_api_key=self._OpenAI_API_Key)\n', (8023, 8152), False, 'from langchain.llms import OpenAI\n'), ((6851, 6922), 'ChatGPT.src.lib.DB_Func.run_query', 'run_query', ([], {'Query': 'Query', 'Credentials': 'self._MYSQL_Credemtals', 'DB': 'self._DB'}), '(Query=Query, Credentials=self._MYSQL_Credemtals, DB=self._DB)\n', (6860, 6922), False, 'from ChatGPT.src.lib.DB_Func import run_query\n')] |
# Copyright 2023-2024 ByteBrain AI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import langchain
from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.schema import Document
from langchain.vectorstores import Weaviate
from weaviate import Client
langchain.verbose = True
texts = [
"Scala is a functional Programming Language",
"I love functional programming",
"fp is too simple an is not hard to understand",
"women must adore their husbands",
"ZIO is a good library for writing fp apps",
"Feminism is the belief that all genders should have equal rights and opportunities.",
"This movement is about making the world a better place for everyone",
"The purpose of ZIO Chat Bot is to provide list of ZIO Projects",
"I've got a cold and I've sore throat",
"ZIO chat bot is an open source project."
]
docs = [Document(page_content=t, metadata={"source": i}) for i, t in enumerate(texts)]
embeddings: OpenAIEmbeddings = OpenAIEmbeddings()
weaviate_client = Client(url="http://localhost:8080")
vector_store = Weaviate.from_documents(docs, embedding=embeddings, weaviate_url = "http://127.0.0.1:8080")
# vector_store = FAISS.from_documents(documents=docs, embedding=embeddings)
retriever = vector_store.as_retriever()
retrievalQA = RetrievalQAWithSourcesChain.from_llm(llm=OpenAI(verbose=True), retriever=retriever)
async def run_qa():
result = await retrievalQA.acall({'question': 'what is the zio chat?'})
print(result)
print("Hello")
if __name__ == "__main__":
import tracemalloc
tracemalloc.start()
asyncio.run(run_qa())
| [
"langchain.vectorstores.Weaviate.from_documents",
"langchain.llms.OpenAI",
"langchain.schema.Document",
"langchain.embeddings.OpenAIEmbeddings"
] | [((1605, 1623), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1621, 1623), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1643, 1678), 'weaviate.Client', 'Client', ([], {'url': '"""http://localhost:8080"""'}), "(url='http://localhost:8080')\n", (1649, 1678), False, 'from weaviate import Client\n'), ((1695, 1789), 'langchain.vectorstores.Weaviate.from_documents', 'Weaviate.from_documents', (['docs'], {'embedding': 'embeddings', 'weaviate_url': '"""http://127.0.0.1:8080"""'}), "(docs, embedding=embeddings, weaviate_url=\n 'http://127.0.0.1:8080')\n", (1718, 1789), False, 'from langchain.vectorstores import Weaviate\n'), ((1494, 1542), 'langchain.schema.Document', 'Document', ([], {'page_content': 't', 'metadata': "{'source': i}"}), "(page_content=t, metadata={'source': i})\n", (1502, 1542), False, 'from langchain.schema import Document\n'), ((2196, 2215), 'tracemalloc.start', 'tracemalloc.start', ([], {}), '()\n', (2213, 2215), False, 'import tracemalloc\n'), ((1961, 1981), 'langchain.llms.OpenAI', 'OpenAI', ([], {'verbose': '(True)'}), '(verbose=True)\n', (1967, 1981), False, 'from langchain.llms import OpenAI\n')] |
from langchain.document_loaders import DirectoryLoader
from langchain.indexes import VectorstoreIndexCreator
import langchain
langchain.verbose = True
# loader = DirectoryLoader("../langchain/docs/_build/html/", glob="**/*.html")
loader = DirectoryLoader("../demo/", glob="*.html")
index = VectorstoreIndexCreator().from_loaders([loader])
print("index created")
result = index.query("呪術廻戦の概要を1文で説明してください。")
print(f"result: {result}")
| [
"langchain.indexes.VectorstoreIndexCreator",
"langchain.document_loaders.DirectoryLoader"
] | [((242, 284), 'langchain.document_loaders.DirectoryLoader', 'DirectoryLoader', (['"""../demo/"""'], {'glob': '"""*.html"""'}), "('../demo/', glob='*.html')\n", (257, 284), False, 'from langchain.document_loaders import DirectoryLoader\n'), ((293, 318), 'langchain.indexes.VectorstoreIndexCreator', 'VectorstoreIndexCreator', ([], {}), '()\n', (316, 318), False, 'from langchain.indexes import VectorstoreIndexCreator\n')] |
import streamlit as st
import langchain
from langchain_community.chat_models import ChatOllama
from langchain.cache import InMemoryCache
from dotenv import load_dotenv
from langchain_community.embeddings import OllamaEmbeddings
import os
from PIL import Image
from chroma_main import answer_no_retriever
langchain.cache = InMemoryCache()
load_dotenv()
CHROMA_DB = "./chroma_db"
MODEL = os.getenv("MODEL", "llama2")
OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
llm = ChatOllama(base_url=OLLAMA_BASE_URL, model=MODEL, temperature=0.0)
ollama_embeddings = OllamaEmbeddings(base_url=OLLAMA_BASE_URL, model="codellama")
st.button("clear history", type="primary")
if st.button:
st.session_state.messages = []
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("What is your query?"):
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").markdown(prompt)
response = answer_no_retriever(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
# message_placeholder.markdown(response + "▌")
message_placeholder.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
| [
"langchain_community.embeddings.OllamaEmbeddings",
"langchain_community.chat_models.ChatOllama",
"langchain.cache.InMemoryCache"
] | [((324, 339), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (337, 339), False, 'from langchain.cache import InMemoryCache\n'), ((341, 354), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (352, 354), False, 'from dotenv import load_dotenv\n'), ((390, 418), 'os.getenv', 'os.getenv', (['"""MODEL"""', '"""llama2"""'], {}), "('MODEL', 'llama2')\n", (399, 418), False, 'import os\n'), ((438, 492), 'os.getenv', 'os.getenv', (['"""OLLAMA_BASE_URL"""', '"""http://localhost:11434"""'], {}), "('OLLAMA_BASE_URL', 'http://localhost:11434')\n", (447, 492), False, 'import os\n'), ((499, 565), 'langchain_community.chat_models.ChatOllama', 'ChatOllama', ([], {'base_url': 'OLLAMA_BASE_URL', 'model': 'MODEL', 'temperature': '(0.0)'}), '(base_url=OLLAMA_BASE_URL, model=MODEL, temperature=0.0)\n', (509, 565), False, 'from langchain_community.chat_models import ChatOllama\n'), ((586, 647), 'langchain_community.embeddings.OllamaEmbeddings', 'OllamaEmbeddings', ([], {'base_url': 'OLLAMA_BASE_URL', 'model': '"""codellama"""'}), "(base_url=OLLAMA_BASE_URL, model='codellama')\n", (602, 647), False, 'from langchain_community.embeddings import OllamaEmbeddings\n'), ((651, 693), 'streamlit.button', 'st.button', (['"""clear history"""'], {'type': '"""primary"""'}), "('clear history', type='primary')\n", (660, 693), True, 'import streamlit as st\n'), ((968, 1004), 'streamlit.chat_input', 'st.chat_input', (['"""What is your query?"""'], {}), "('What is your query?')\n", (981, 1004), True, 'import streamlit as st\n'), ((1010, 1079), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (1042, 1079), True, 'import streamlit as st\n'), ((1149, 1176), 'chroma_main.answer_no_retriever', 'answer_no_retriever', (['prompt'], {}), '(prompt)\n', (1168, 1176), False, 'from chroma_main import answer_no_retriever\n'), ((1364, 1440), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': response}"], {}), "({'role': 'assistant', 'content': response})\n", (1396, 1440), True, 'import streamlit as st\n'), ((879, 911), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (894, 911), True, 'import streamlit as st\n'), ((921, 952), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (932, 952), True, 'import streamlit as st\n'), ((1187, 1215), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (1202, 1215), True, 'import streamlit as st\n'), ((1247, 1257), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (1255, 1257), True, 'import streamlit as st\n'), ((1084, 1107), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (1099, 1107), True, 'import streamlit as st\n')] |
# Copyright 2023-2024 ByteBrain AI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import langchain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.schema import Document
from langchain.vectorstores import FAISS
langchain.verbose = True
# langchain.debug=True
import asyncio
from core.utils.upgrade_sqlite import upgrade_sqlite_version
upgrade_sqlite_version()
embeddings: OpenAIEmbeddings = OpenAIEmbeddings()
texts = [
"Scala is a functional Programming Language",
"I love functional programming",
"fp is too simple an is not hard to understand",
"women must adore their husbands",
"ZIO is a good library for writing fp apps",
"Feminism is the belief that all genders should have equal rights and opportunities.",
"This movement is about making the world a better place for everyone",
"The purpose of ZIO Chat Bot is to provide list of ZIO Projects",
"I've got a cold and I've sore throat",
"ZIO chat bot is an open source project."
]
docs = [Document(page_content=t, metadata={"source": i}) for i, t in enumerate(texts)]
vectorstore = FAISS.from_documents(documents=docs, embedding=OpenAIEmbeddings())
retriever = vectorstore.as_retriever()
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
qa = load_qa_with_sources_chain(llm=OpenAI(), verbose=True)
question = 'what is the zio chat bot?'
async def run_qa():
result = await qa._acall({'question': question, 'input_documents': retriever.get_relevant_documents(question)})
print(result)
print("Hello")
if __name__ == "__main__":
asyncio.run(run_qa(), debug=True)
| [
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.llms.OpenAI",
"langchain.schema.Document"
] | [((896, 920), 'core.utils.upgrade_sqlite.upgrade_sqlite_version', 'upgrade_sqlite_version', ([], {}), '()\n', (918, 920), False, 'from core.utils.upgrade_sqlite import upgrade_sqlite_version\n'), ((952, 970), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (968, 970), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1547, 1595), 'langchain.schema.Document', 'Document', ([], {'page_content': 't', 'metadata': "{'source': i}"}), "(page_content=t, metadata={'source': i})\n", (1555, 1595), False, 'from langchain.schema import Document\n'), ((1688, 1706), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1704, 1706), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1865, 1873), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (1871, 1873), False, 'from langchain.llms import OpenAI\n')] |
import logging
from dotenv import load_dotenv
from llama_index import VectorStoreIndex
import pandas as pd
from ragas.metrics import answer_relevancy
from ragas.llama_index import evaluate
from ragas.llms import LangchainLLM
from langchain.chat_models import AzureChatOpenAI
from langchain.embeddings import AzureOpenAIEmbeddings
from app.llama_index.vector_store import setup_vector_store
from app.llama_index.llm import setup_service_context
from app.utils.env import get_env_variable
from app.eval.constants import (
DATASET_JSON_PATH,
EVAL_METRICS,
EVAL_VECTOR_STORE_NAME,
SERVICE_CONTEXT_VERSION,
)
from app.eval.dataset_generation import generate_ragas_qr_pairs
def setup_ragas_llm():
load_dotenv()
try:
api_key = get_env_variable("OPENAI_API_KEY")
api_version = get_env_variable("OPENAI_API_VERSION")
deployment_name = get_env_variable("OPENAI_DEPLOYMENT_NAME")
except EnvironmentError as e:
raise e
azure_model = AzureChatOpenAI(
deployment_name=deployment_name,
model=api_version,
openai_api_key=api_key,
openai_api_type="azure",
)
logging.info("Azure OpenAI model for Ragas successfully set up.")
return LangchainLLM(azure_model)
def setup_ragas_embeddings():
load_dotenv()
try:
deployment = get_env_variable("OPENAI_DEPLOYMENT_EMBEDDINGS")
api_base = get_env_variable("OPENAI_API_BASE")
api_key = get_env_variable("OPENAI_API_KEY")
api_version = get_env_variable("OPENAI_API_VERSION")
except EnvironmentError as e:
raise e
azure_embeddings = AzureOpenAIEmbeddings(
azure_deployment=deployment,
model="text-embedding-ada-002",
openai_api_type="azure",
openai_api_base=api_base,
openai_api_key=api_key,
openai_api_version=api_version,
)
logging.info("Azure OpenAI Embeddings for Ragas successfully set up.")
return azure_embeddings
def run_ragas_evaluation():
eval_questions, eval_answers = generate_ragas_qr_pairs(DATASET_JSON_PATH)
eval_embeddings = setup_ragas_embeddings()
eval_llm = setup_ragas_llm()
eval_vector_store = setup_vector_store(EVAL_VECTOR_STORE_NAME)
eval_service_context = setup_service_context(SERVICE_CONTEXT_VERSION, azure=True)
index = VectorStoreIndex.from_vector_store(
vector_store=eval_vector_store, service_context=eval_service_context
)
query_engine = index.as_query_engine()
logging.info("Ragas evaluation successfully set up.")
metrics = EVAL_METRICS
answer_relevancy.embeddings = eval_embeddings
for m in metrics:
m.__setattr__("llm", eval_llm)
m.__setattr__("embeddings", eval_embeddings)
logging.info("Ragas metrics successfully set up.")
result = evaluate(query_engine, metrics, eval_questions, eval_answers)
logging.info("Ragas evaluation successfully finished.")
df = result.to_pandas()
df.to_csv("app/eval/eval_data/ragas_eval.csv", index=False)
logging.info("Ragas evaluation successfully saved to csv file.")
eval = pd.read_csv("app/eval/eval_data/ragas_eval.csv", sep=",")
logging.info("Ragas evaluation successfully finished.")
return eval
| [
"langchain.embeddings.AzureOpenAIEmbeddings",
"langchain.chat_models.AzureChatOpenAI"
] | [((718, 731), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (729, 731), False, 'from dotenv import load_dotenv\n'), ((993, 1113), 'langchain.chat_models.AzureChatOpenAI', 'AzureChatOpenAI', ([], {'deployment_name': 'deployment_name', 'model': 'api_version', 'openai_api_key': 'api_key', 'openai_api_type': '"""azure"""'}), "(deployment_name=deployment_name, model=api_version,\n openai_api_key=api_key, openai_api_type='azure')\n", (1008, 1113), False, 'from langchain.chat_models import AzureChatOpenAI\n'), ((1153, 1218), 'logging.info', 'logging.info', (['"""Azure OpenAI model for Ragas successfully set up."""'], {}), "('Azure OpenAI model for Ragas successfully set up.')\n", (1165, 1218), False, 'import logging\n'), ((1230, 1255), 'ragas.llms.LangchainLLM', 'LangchainLLM', (['azure_model'], {}), '(azure_model)\n', (1242, 1255), False, 'from ragas.llms import LangchainLLM\n'), ((1292, 1305), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1303, 1305), False, 'from dotenv import load_dotenv\n'), ((1628, 1827), 'langchain.embeddings.AzureOpenAIEmbeddings', 'AzureOpenAIEmbeddings', ([], {'azure_deployment': 'deployment', 'model': '"""text-embedding-ada-002"""', 'openai_api_type': '"""azure"""', 'openai_api_base': 'api_base', 'openai_api_key': 'api_key', 'openai_api_version': 'api_version'}), "(azure_deployment=deployment, model=\n 'text-embedding-ada-002', openai_api_type='azure', openai_api_base=\n api_base, openai_api_key=api_key, openai_api_version=api_version)\n", (1649, 1827), False, 'from langchain.embeddings import AzureOpenAIEmbeddings\n'), ((1877, 1947), 'logging.info', 'logging.info', (['"""Azure OpenAI Embeddings for Ragas successfully set up."""'], {}), "('Azure OpenAI Embeddings for Ragas successfully set up.')\n", (1889, 1947), False, 'import logging\n'), ((2041, 2083), 'app.eval.dataset_generation.generate_ragas_qr_pairs', 'generate_ragas_qr_pairs', (['DATASET_JSON_PATH'], {}), '(DATASET_JSON_PATH)\n', (2064, 2083), False, 'from app.eval.dataset_generation import generate_ragas_qr_pairs\n'), ((2188, 2230), 'app.llama_index.vector_store.setup_vector_store', 'setup_vector_store', (['EVAL_VECTOR_STORE_NAME'], {}), '(EVAL_VECTOR_STORE_NAME)\n', (2206, 2230), False, 'from app.llama_index.vector_store import setup_vector_store\n'), ((2258, 2316), 'app.llama_index.llm.setup_service_context', 'setup_service_context', (['SERVICE_CONTEXT_VERSION'], {'azure': '(True)'}), '(SERVICE_CONTEXT_VERSION, azure=True)\n', (2279, 2316), False, 'from app.llama_index.llm import setup_service_context\n'), ((2329, 2437), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'eval_vector_store', 'service_context': 'eval_service_context'}), '(vector_store=eval_vector_store,\n service_context=eval_service_context)\n', (2363, 2437), False, 'from llama_index import VectorStoreIndex\n'), ((2495, 2548), 'logging.info', 'logging.info', (['"""Ragas evaluation successfully set up."""'], {}), "('Ragas evaluation successfully set up.')\n", (2507, 2548), False, 'import logging\n'), ((2745, 2795), 'logging.info', 'logging.info', (['"""Ragas metrics successfully set up."""'], {}), "('Ragas metrics successfully set up.')\n", (2757, 2795), False, 'import logging\n'), ((2809, 2870), 'ragas.llama_index.evaluate', 'evaluate', (['query_engine', 'metrics', 'eval_questions', 'eval_answers'], {}), '(query_engine, metrics, eval_questions, eval_answers)\n', (2817, 2870), False, 'from ragas.llama_index import evaluate\n'), ((2875, 2930), 'logging.info', 'logging.info', (['"""Ragas evaluation successfully finished."""'], {}), "('Ragas evaluation successfully finished.')\n", (2887, 2930), False, 'import logging\n'), ((3027, 3091), 'logging.info', 'logging.info', (['"""Ragas evaluation successfully saved to csv file."""'], {}), "('Ragas evaluation successfully saved to csv file.')\n", (3039, 3091), False, 'import logging\n'), ((3103, 3160), 'pandas.read_csv', 'pd.read_csv', (['"""app/eval/eval_data/ragas_eval.csv"""'], {'sep': '""","""'}), "('app/eval/eval_data/ragas_eval.csv', sep=',')\n", (3114, 3160), True, 'import pandas as pd\n'), ((3165, 3220), 'logging.info', 'logging.info', (['"""Ragas evaluation successfully finished."""'], {}), "('Ragas evaluation successfully finished.')\n", (3177, 3220), False, 'import logging\n'), ((759, 793), 'app.utils.env.get_env_variable', 'get_env_variable', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (775, 793), False, 'from app.utils.env import get_env_variable\n'), ((816, 854), 'app.utils.env.get_env_variable', 'get_env_variable', (['"""OPENAI_API_VERSION"""'], {}), "('OPENAI_API_VERSION')\n", (832, 854), False, 'from app.utils.env import get_env_variable\n'), ((881, 923), 'app.utils.env.get_env_variable', 'get_env_variable', (['"""OPENAI_DEPLOYMENT_NAME"""'], {}), "('OPENAI_DEPLOYMENT_NAME')\n", (897, 923), False, 'from app.utils.env import get_env_variable\n'), ((1336, 1384), 'app.utils.env.get_env_variable', 'get_env_variable', (['"""OPENAI_DEPLOYMENT_EMBEDDINGS"""'], {}), "('OPENAI_DEPLOYMENT_EMBEDDINGS')\n", (1352, 1384), False, 'from app.utils.env import get_env_variable\n'), ((1404, 1439), 'app.utils.env.get_env_variable', 'get_env_variable', (['"""OPENAI_API_BASE"""'], {}), "('OPENAI_API_BASE')\n", (1420, 1439), False, 'from app.utils.env import get_env_variable\n'), ((1458, 1492), 'app.utils.env.get_env_variable', 'get_env_variable', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1474, 1492), False, 'from app.utils.env import get_env_variable\n'), ((1515, 1553), 'app.utils.env.get_env_variable', 'get_env_variable', (['"""OPENAI_API_VERSION"""'], {}), "('OPENAI_API_VERSION')\n", (1531, 1553), False, 'from app.utils.env import get_env_variable\n')] |
import os
import uuid
import langchain
import requests
import streamlit as st
from dotenv import load_dotenv, find_dotenv
from langchain_community.callbacks import get_openai_callback
from langchain.schema import HumanMessage, AIMessage
from playsound import playsound
from streamlit_chat import message
from advisor.agents import init_convo_agent
langchain.debug = True
def init():
load_dotenv(find_dotenv())
st.set_page_config(
page_title="Your Restaurant Advisor",
page_icon="👩🍳",
)
st.header("Your Restaurant Advisor 👩🍳")
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
def setup_agent():
if 'agent' not in st.session_state:
random_session_id = str(uuid.uuid4())
st.session_state.agent = init_convo_agent(random_session_id)
def get_response_from_ai(human_input):
setup_agent()
print("="*20)
with get_openai_callback() as cb:
result = st.session_state.agent.run(human_input)
print("Cost:", cb)
return result
def get_voice_message(message):
payload = {
"text": message,
"model_id": "eleven_monolingual_v1",
"voice_settings": {
"stability": 0,
"similarity_boost": 0,
}
}
headers = {
"accept": "audio/mpeg",
"xi-api-key": os.getenv("ELEVEN_LABS_API_KEY"),
"Content-Type": "application/json"
}
response = requests.post('https://api.elevenlabs.io/v1/text-to-speech/21m00Tcm4TlvDq8ikWAM?optimize_streaming_latency=0', json=payload, headers=headers)
if response.status_code == 200 and response.content:
with open("audio.mp3", "wb") as f:
f.write(response.content)
playsound("audio.mp3")
return response.content
def main():
init()
with st.sidebar:
user_input = st.text_input("your message", value="")
if "messages" not in st.session_state:
st.session_state.messages = []
if user_input:
st.session_state.messages.append(HumanMessage(content=user_input))
with st.spinner("Thinking..."):
response = get_response_from_ai(user_input)
# get_voice_message(response)
st.session_state.messages.append(AIMessage(content=response))
messages = st.session_state.get('messages', [])
for i, msg in enumerate(messages):
if i % 2 == 0:
message(msg.content, is_user=True, avatar_style="thumbs", key=str(i) + "_user")
else:
message(msg.content, is_user=False, avatar_style="avataaars", key=str(i) + "_ai")
if __name__ == "__main__":
main()
| [
"langchain.schema.AIMessage",
"langchain.schema.HumanMessage",
"langchain_community.callbacks.get_openai_callback"
] | [((424, 502), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Your Restaurant Advisor"""', 'page_icon': '"""👩\u200d🍳"""'}), "(page_title='Your Restaurant Advisor', page_icon='👩\\u200d🍳')\n", (442, 502), True, 'import streamlit as st\n'), ((525, 570), 'streamlit.header', 'st.header', (['"""Your Restaurant Advisor 👩\u200d🍳"""'], {}), "('Your Restaurant Advisor 👩\\u200d🍳')\n", (534, 570), True, 'import streamlit as st\n'), ((744, 801), 'streamlit.markdown', 'st.markdown', (['hide_streamlit_style'], {'unsafe_allow_html': '(True)'}), '(hide_streamlit_style, unsafe_allow_html=True)\n', (755, 801), True, 'import streamlit as st\n'), ((1596, 1747), 'requests.post', 'requests.post', (['"""https://api.elevenlabs.io/v1/text-to-speech/21m00Tcm4TlvDq8ikWAM?optimize_streaming_latency=0"""'], {'json': 'payload', 'headers': 'headers'}), "(\n 'https://api.elevenlabs.io/v1/text-to-speech/21m00Tcm4TlvDq8ikWAM?optimize_streaming_latency=0'\n , json=payload, headers=headers)\n", (1609, 1747), False, 'import requests\n'), ((2453, 2489), 'streamlit.session_state.get', 'st.session_state.get', (['"""messages"""', '[]'], {}), "('messages', [])\n", (2473, 2489), True, 'import streamlit as st\n'), ((404, 417), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (415, 417), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((942, 977), 'advisor.agents.init_convo_agent', 'init_convo_agent', (['random_session_id'], {}), '(random_session_id)\n', (958, 977), False, 'from advisor.agents import init_convo_agent\n'), ((1064, 1085), 'langchain_community.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (1083, 1085), False, 'from langchain_community.callbacks import get_openai_callback\n'), ((1110, 1149), 'streamlit.session_state.agent.run', 'st.session_state.agent.run', (['human_input'], {}), '(human_input)\n', (1136, 1149), True, 'import streamlit as st\n'), ((1497, 1529), 'os.getenv', 'os.getenv', (['"""ELEVEN_LABS_API_KEY"""'], {}), "('ELEVEN_LABS_API_KEY')\n", (1506, 1529), False, 'import os\n'), ((1884, 1906), 'playsound.playsound', 'playsound', (['"""audio.mp3"""'], {}), "('audio.mp3')\n", (1893, 1906), False, 'from playsound import playsound\n'), ((2007, 2046), 'streamlit.text_input', 'st.text_input', (['"""your message"""'], {'value': '""""""'}), "('your message', value='')\n", (2020, 2046), True, 'import streamlit as st\n'), ((895, 907), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (905, 907), False, 'import uuid\n'), ((2191, 2223), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user_input'}), '(content=user_input)\n', (2203, 2223), False, 'from langchain.schema import HumanMessage, AIMessage\n'), ((2238, 2263), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (2248, 2263), True, 'import streamlit as st\n'), ((2408, 2435), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'response'}), '(content=response)\n', (2417, 2435), False, 'from langchain.schema import HumanMessage, AIMessage\n')] |
import os.path
import chromadb
import langchain.embeddings
import win32com.client
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.document_loaders import TextLoader
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import UnstructuredPDFLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
import json
from os.path import isdir, isfile, join
from os import listdir
import openai
from optiondata import Option_data
from signalManager import SignalManager
loaded = False
dbFolder = './vectorstore/'
workspace = './workspace/'
metadatas = dict()
chroma: Chroma
option_data = Option_data()
openai.api_key = option_data.openai_api_key
option_data.optionSignals.changed_checked_api.connect(lambda: reloadDB())
def getExtension(fname: str) -> str:
spl = fname.split('.')
if len(spl) == 1:
return ''
return spl[-1]
def processFile(rootpath, path, fname, documents):
ext = getExtension(fname)
allPath = join(rootpath, path)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=400,
chunk_overlap=0,
separators=['\n\n', '\n', ' ', '']
)
if ext == 'txt':
loader = TextLoader(join(allPath, fname), encoding='utf8')
document = loader.load()
print('loaded file {0} with TextLoader.'.format(fname))
elif ext == 'pdf':
loader = UnstructuredPDFLoader(join(allPath, fname))
document = loader.load()
print('loaded file {0} with UnstructuredPDFLoader.'.format(fname))
else:
print("can't process file {0}".format(fname))
return
documents.extend(text_splitter.split_documents(document))
def CheckFile(rootpath, path, fname):
metaResult = checkMetadata(rootpath, path, fname)
if metaResult == 2: # New File
print('file: {0} has added to workspace'.format(fname))
createNewDocument(rootpath, path, fname)
elif metaResult == 1: # File modified
print('file: {0} needs to be updated'.format(fname))
updateDocument(rootpath, path, fname)
else: # Recent file
print('file: {0} is up to date'.format(fname))
def iterateDirectory(rootpath, path):
allPath = join(rootpath, path)
dirs = [d for d in listdir(allPath) if isdir(join(allPath, d))]
files = [f for f in listdir(allPath) if isfile(join(allPath, f))]
for d in dirs:
iterateDirectory(rootpath, join(path, d))
for f in files:
CheckFile(rootpath, path, f)
def get_file_metadata(path, filename):
return os.path.getmtime(join(os.path.abspath(path), filename))
def create_or_update_metadata(workspacePath, filePath, fileName, docID, idxNum):
global metadatas
path = join(workspacePath, filePath)
meta = {'path': join(filePath, fileName),
'modified': get_file_metadata(path, fileName),
'docID': docID,
'idxNum': idxNum}
metadatas['files'][join(filePath, fileName)] = meta
def checkMetadata(workspacePath, filePath, fileName) -> int: # 0 : same, 1 : not same, 2 : not found
global metadatas
file = join(filePath, fileName)
if file not in metadatas['files']:
return 2
modified_origin = metadatas['files'][file]['modified']
path = join(workspacePath, filePath)
modified = get_file_metadata(path, fileName)
if modified == modified_origin:
return 0
else:
return 1
def createNewDocument(workspacePath, filePath, fileName):
global chroma
global metadatas
idx = metadatas['lastID']
metadatas['lastID'] = metadatas['lastID'] + 1
docs = []
processFile(workspacePath, filePath, fileName, docs)
ids = []
for i in range(len(docs)):
ids.append('{0}d{1}'.format(idx, i))
embedding = OpenAIEmbeddings()
if len(docs) != 0:
chroma.add_documents(documents=docs, ids=ids)
create_or_update_metadata(workspacePath, filePath, fileName, idx, len(docs))
def updateDocument(workspacePath, filePath, fileName):
global chroma
global metadatas
file = join(filePath, fileName)
docs = []
processFile(workspacePath, filePath, fileName, docs)
idx = metadatas['files'][file]['docID']
idNum = metadatas['files'][file]['idxNum']
coll = chroma._client.get_collection('langchain')
ids = []
newIds = []
for i in range(idNum):
ids.append('{0}d{1}'.format(idx, i))
for i in range(len(docs)):
newIds.append('{0}d{1}'.format(idx, i))
coll.delete(ids=ids)
embedding = OpenAIEmbeddings()
chroma.add_documents(documents=docs, ids=newIds)
create_or_update_metadata(workspacePath, filePath, fileName, idx, len(docs))
def initMetadata():
global metadatas
metadatas['files'] = dict()
metadatas['lastID'] = 0
saveMetadata(dbFolder + '/metadata.json')
def saveMetadata(path):
global metadatas
with open(path, "w") as f:
json.dump(metadatas, f,
indent=4)
def loadMetadata(path):
global metadatas
with open(path, "r") as f:
metadatas = json.load(f)
f.close()
def createDB():
global chroma
from chromadb.config import Settings
embedding = OpenAIEmbeddings()
chroma = Chroma(
persist_directory=dbFolder,
embedding_function=embedding
)
initMetadata()
iterateDirectory(workspace, '')
saveMetadata(dbFolder + 'metadata.json')
chroma.persist()
def loadDB():
global loaded
if loaded:
return
global chroma
try:
embedding = OpenAIEmbeddings()
chroma = Chroma(persist_directory=dbFolder, embedding_function=embedding)
loadMetadata(join(dbFolder, 'metadata.json'))
iterateDirectory(workspace, '')
saveMetadata(join(dbFolder, 'metadata.json'))
chroma.persist()
loaded = True
except:
print('failed to loadDB')
def reloadDB():
print('reloading DB')
option_data.load_option()
openai.api_key = option_data.openai_api_key
loadDB()
def promptLangchain(query):
global chroma
if chroma is None:
print("chroma didn't set")
return 'err'
retriever = chroma.as_retriever()
openai = OpenAI()
openai.max_tokens = 256
qa = RetrievalQA.from_chain_type(
llm=ChatOpenAI(temperature=option_data.temperature),
chain_type='stuff',
retriever=retriever
)
return qa.run(query)
| [
"langchain.chat_models.ChatOpenAI",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.OpenAI",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.vectorstores.Chroma"
] | [((887, 900), 'optiondata.Option_data', 'Option_data', ([], {}), '()\n', (898, 900), False, 'from optiondata import Option_data\n'), ((1242, 1262), 'os.path.join', 'join', (['rootpath', 'path'], {}), '(rootpath, path)\n', (1246, 1262), False, 'from os.path import isdir, isfile, join\n'), ((1284, 1388), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(400)', 'chunk_overlap': '(0)', 'separators': "['\\n\\n', '\\n', ' ', '']"}), "(chunk_size=400, chunk_overlap=0, separators=\n ['\\n\\n', '\\n', ' ', ''])\n", (1314, 1388), False, 'from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter\n'), ((2460, 2480), 'os.path.join', 'join', (['rootpath', 'path'], {}), '(rootpath, path)\n', (2464, 2480), False, 'from os.path import isdir, isfile, join\n'), ((2969, 2998), 'os.path.join', 'join', (['workspacePath', 'filePath'], {}), '(workspacePath, filePath)\n', (2973, 2998), False, 'from os.path import isdir, isfile, join\n'), ((3354, 3378), 'os.path.join', 'join', (['filePath', 'fileName'], {}), '(filePath, fileName)\n', (3358, 3378), False, 'from os.path import isdir, isfile, join\n'), ((3506, 3535), 'os.path.join', 'join', (['workspacePath', 'filePath'], {}), '(workspacePath, filePath)\n', (3510, 3535), False, 'from os.path import isdir, isfile, join\n'), ((4021, 4039), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (4037, 4039), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((4309, 4333), 'os.path.join', 'join', (['filePath', 'fileName'], {}), '(filePath, fileName)\n', (4313, 4333), False, 'from os.path import isdir, isfile, join\n'), ((4771, 4789), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (4787, 4789), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((5433, 5451), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (5449, 5451), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((5465, 5529), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': 'dbFolder', 'embedding_function': 'embedding'}), '(persist_directory=dbFolder, embedding_function=embedding)\n', (5471, 5529), False, 'from langchain.vectorstores import Chroma\n'), ((6439, 6447), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (6445, 6447), False, 'from langchain.llms import OpenAI\n'), ((3019, 3043), 'os.path.join', 'join', (['filePath', 'fileName'], {}), '(filePath, fileName)\n', (3023, 3043), False, 'from os.path import isdir, isfile, join\n'), ((3185, 3209), 'os.path.join', 'join', (['filePath', 'fileName'], {}), '(filePath, fileName)\n', (3189, 3209), False, 'from os.path import isdir, isfile, join\n'), ((5159, 5192), 'json.dump', 'json.dump', (['metadatas', 'f'], {'indent': '(4)'}), '(metadatas, f, indent=4)\n', (5168, 5192), False, 'import json\n'), ((5309, 5321), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5318, 5321), False, 'import json\n'), ((5785, 5803), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (5801, 5803), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((5821, 5885), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': 'dbFolder', 'embedding_function': 'embedding'}), '(persist_directory=dbFolder, embedding_function=embedding)\n', (5827, 5885), False, 'from langchain.vectorstores import Chroma\n'), ((1464, 1484), 'os.path.join', 'join', (['allPath', 'fname'], {}), '(allPath, fname)\n', (1468, 1484), False, 'from os.path import isdir, isfile, join\n'), ((2504, 2520), 'os.listdir', 'listdir', (['allPath'], {}), '(allPath)\n', (2511, 2520), False, 'from os import listdir\n'), ((2573, 2589), 'os.listdir', 'listdir', (['allPath'], {}), '(allPath)\n', (2580, 2589), False, 'from os import listdir\n'), ((2673, 2686), 'os.path.join', 'join', (['path', 'd'], {}), '(path, d)\n', (2677, 2686), False, 'from os.path import isdir, isfile, join\n'), ((5907, 5938), 'os.path.join', 'join', (['dbFolder', '"""metadata.json"""'], {}), "(dbFolder, 'metadata.json')\n", (5911, 5938), False, 'from os.path import isdir, isfile, join\n'), ((6001, 6032), 'os.path.join', 'join', (['dbFolder', '"""metadata.json"""'], {}), "(dbFolder, 'metadata.json')\n", (6005, 6032), False, 'from os.path import isdir, isfile, join\n'), ((6526, 6573), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'option_data.temperature'}), '(temperature=option_data.temperature)\n', (6536, 6573), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1662, 1682), 'os.path.join', 'join', (['allPath', 'fname'], {}), '(allPath, fname)\n', (1666, 1682), False, 'from os.path import isdir, isfile, join\n'), ((2530, 2546), 'os.path.join', 'join', (['allPath', 'd'], {}), '(allPath, d)\n', (2534, 2546), False, 'from os.path import isdir, isfile, join\n'), ((2600, 2616), 'os.path.join', 'join', (['allPath', 'f'], {}), '(allPath, f)\n', (2604, 2616), False, 'from os.path import isdir, isfile, join\n')] |
"""
A script for retrieval-based question answering using the langchain library.
This script demonstrates how to integrate a retrieval system with a chat model for answering questions.
It utilizes Chroma for retrieval of relevant information and ChatOpenAI for
generating answers based on the retrieved content.
The RedundantFilterRetriever is used for efficient retrieval,
filtering out redundant information and focusing on the most relevant content.
This setup is ideal for answering questions with context from a specific knowledge base.
Features:
- Initialize ChatOpenAI for language model-based interactions.
- Use OpenAI embeddings for document retrieval.
- Load a Chroma database for document retrieval based on embeddings.
- Use RedundantFilterRetriever for enhanced retrieval efficiency.
- Set up a RetrievalQA chain combining the chat model and the retriever.
- Answer a specific question using the RetrievalQA chain.
Usage:
Run the script to ask a question about the English language and get an answer based on
retrieved content from the Chroma database and processed through the RedundantFilterRetriever.
"""
import langchain
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.chroma import Chroma
from mod_05_redundant_filter_retriever import RedundantFilterRetriever
langchain.debug = True
load_dotenv()
# Initialize a ChatOpenAI instance for language model interactions.
chat = ChatOpenAI()
# Initialize OpenAI embeddings for document retrieval.
embeddings = OpenAIEmbeddings()
# Load a Chroma database for document retrieval.
db = Chroma(persist_directory="emb", embedding_function=embeddings)
# Initialize the RedundantFilterRetriever with OpenAI embeddings and Chroma database.
# This retriever filters out redundant information, focusing on the most relevant content.
retriever = RedundantFilterRetriever(embeddings=embeddings, chroma=db)
# Configure the RetrievalQA chain with the chat model and the enhanced retriever.
# This chain combines the capabilities of ChatOpenAI and
# RedundantFilterRetriever for efficient question answering.
# https://python.langchain.com/docs/modules/chains/document/
chain = RetrievalQA.from_chain_type(llm=chat, retriever=retriever, chain_type="stuff")
# Run the chain to answer a question based on retrieved content.
result = chain.run("What is an interesting fact about the English language?")
print(result)
| [
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.vectorstores.chroma.Chroma",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.chat_models.ChatOpenAI"
] | [((1459, 1472), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1470, 1472), False, 'from dotenv import load_dotenv\n'), ((1549, 1561), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1559, 1561), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1631, 1649), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1647, 1649), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1705, 1767), 'langchain.vectorstores.chroma.Chroma', 'Chroma', ([], {'persist_directory': '"""emb"""', 'embedding_function': 'embeddings'}), "(persist_directory='emb', embedding_function=embeddings)\n", (1711, 1767), False, 'from langchain.vectorstores.chroma import Chroma\n'), ((1958, 2016), 'mod_05_redundant_filter_retriever.RedundantFilterRetriever', 'RedundantFilterRetriever', ([], {'embeddings': 'embeddings', 'chroma': 'db'}), '(embeddings=embeddings, chroma=db)\n', (1982, 2016), False, 'from mod_05_redundant_filter_retriever import RedundantFilterRetriever\n'), ((2287, 2365), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'chat', 'retriever': 'retriever', 'chain_type': '"""stuff"""'}), "(llm=chat, retriever=retriever, chain_type='stuff')\n", (2314, 2365), False, 'from langchain.chains import RetrievalQA\n')] |
import os
import gradio as gr
import langchain
from langchain.llms import OpenAI
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import UnstructuredURLLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from dotenv import load_dotenv
load_dotenv() # take .env variables
llm = OpenAI(temperature=0.9, max_tokens=500)
def echo(message, history, links):
question = message
urls = links.split()
loader = UnstructuredURLLoader(urls)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
docs = text_splitter.split_documents(data)
vectorindex_openai = FAISS.from_documents(docs, OpenAIEmbeddings())
chain = RetrievalQAWithSourcesChain.from_llm(llm=llm, retriever=vectorindex_openai.as_retriever())
response = chain({"question": question}, return_only_outputs=True)
formatted_response = response['answer']
if 'sources' in response:
formatted_response += "\nSources: " + response['sources']
return formatted_response
demo = gr.ChatInterface(echo,
additional_inputs=[
gr.Textbox("[Paste Links Here]", label="News Links"),
]
)
if __name__ == "__main__":
demo.launch(show_api=False)
| [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.OpenAI",
"langchain.document_loaders.UnstructuredURLLoader",
"langchain.embeddings.OpenAIEmbeddings"
] | [((470, 483), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (481, 483), False, 'from dotenv import load_dotenv\n'), ((512, 551), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)', 'max_tokens': '(500)'}), '(temperature=0.9, max_tokens=500)\n', (518, 551), False, 'from langchain.llms import OpenAI\n'), ((649, 676), 'langchain.document_loaders.UnstructuredURLLoader', 'UnstructuredURLLoader', (['urls'], {}), '(urls)\n', (670, 676), False, 'from langchain.document_loaders import UnstructuredURLLoader\n'), ((727, 793), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(200)'}), '(chunk_size=1000, chunk_overlap=200)\n', (757, 793), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((898, 916), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (914, 916), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1368, 1420), 'gradio.Textbox', 'gr.Textbox', (['"""[Paste Links Here]"""'], {'label': '"""News Links"""'}), "('[Paste Links Here]', label='News Links')\n", (1378, 1420), True, 'import gradio as gr\n')] |
import json
import random
import langchain
from dotenv import load_dotenv
import gradio as gr
import logging
from langchain.chains import LLMChain
from langchain.prompts.chat import (
ChatPromptTemplate
)
import pydantic.v1.error_wrappers
from typing import Any, Dict, Tuple
from transist.llm import create_openai_llm, parse_json_maybe_invalid, ExtractionOutputParser
from transist.prompt import system_prompt, draft_question_prompt, extract_facts_prompt
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
thinking = [
"Give me a few seconds to understand what you told me.",
"Let me take a moment to process the information you've shared",
"Please allow me a short pause to fully comprehend the details you provided."
]
sufficient_facts_response = "Sufficient facts to generate section {section}"
move_to_next_section = "Let's proceed by moving on to the next section about {section}"
class CarbonAssistant(object):
section_order = [
(0, "info"),
(2, "methodology"),
(3, "quantification"),
(4, "monitoring"),
(5, "safeguards"),
(1, "details"),
(99, "closing")
]
def __init__(self):
self.state = "extract"
self.sector = "afolu"
self.extract_parser = ExtractionOutputParser()
self.curr_section_index = 0
self.curr_section_facts: Dict = self._load_section_facts(self.curr_section_index)
self.completed_section: Dict[Tuple, Dict] = {}
self.curr_questions = []
def _load_section_facts(self, section_index):
section_template = self._section_template(section_index)
return json.loads(section_template)
def _section_template(self, section_index):
section_number, section = CarbonAssistant.section_order[section_index]
section_dir = f"{section_number:02d}_{section}"
section_file = f"{section_number:02d}_{self.sector}_{section}.json"
filepath = f"data/templates/sector={self.sector}/{section_dir}/{section_file}"
log.info("Getting template for %s from file: %s", section, filepath)
return open(filepath, "r").read()
@property
def curr_section(self):
return CarbonAssistant.section_order[self.curr_section_index][1]
def design(self, message, history, openai_api_key=None):
try:
llm = create_openai_llm(openai_api_key)
for out in self.design_with_llm(llm, message, history):
yield out
except pydantic.v1.error_wrappers.ValidationError as e:
if any(["OPENAI_API_KEY" in error['msg']for error in e.errors()]):
raise gr.Error("An OpenAI API key needs to be provided in the Additional Inputs section below")
else:
raise gr.Error(pydantic.v1.error_wrappers.display_errors(e.errors()))
def design_with_llm(self, llm, message, history):
if self.state == "draft":
questions = self.draft_questions(llm, self.curr_section_facts)
if self.sufficient_to_generate(questions):
yield sufficient_facts_response % self.curr_section
self.complete_section()
if not self.next_section():
self.state = "generate"
yield "Generating document sections"
else:
self.state = "draft"
yield move_to_next_section % self.curr_section
for out in self.design_with_llm(llm, message, history):
yield out
else:
self.curr_questions = questions
self.state = "extract"
yield "Let's continue gathering information about your carbon project"
yield questions
elif self.state == "extract":
yield f"Thank you for providing information about your project. {random.choice(thinking)}"
extracted = self.extract_facts(llm, message, history, self.curr_section_facts)
if extracted.get("keys_updated", []):
extracted_facts = extracted.get("extracted_project_facts", {})
self.curr_section_facts.update(extracted_facts)
log.info("Updated facts doc: %s", self.curr_section_facts)
self.state = "draft"
else:
self.state = "explore"
for out in self.design_with_llm(llm, message, history):
yield out
elif self.state == "explore":
yield "I understand that you need some help in answering these questions."
yield "Give me a moment to try and find some relevant information which can help."
explore_results = self.explore(llm, message, history, self.curr_section_facts)
self.state = "extract"
yield explore_results
def draft_questions(self, llm, facts_document):
questions_chain = LLMChain(
llm=llm,
prompt=ChatPromptTemplate.from_messages([system_prompt, draft_question_prompt]),
output_key="questions",
verbose=True)
questions = questions_chain.predict(json_template=json.dumps(facts_document))
return questions
def extract_facts(self, llm, message, history, facts_document) -> Dict[Any, Any]:
extract_chain = LLMChain(
llm=llm,
prompt=ChatPromptTemplate.from_messages([system_prompt, extract_facts_prompt]),
output_parser=self.extract_parser,
output_key="extracted",
verbose=True)
extracted: Dict[str, Any] = extract_chain.predict_and_parse(
project_facts_document=json.dumps(facts_document),
project_information=message)
if not extracted:
log.warning("Could not extracted using extract chain: '%s'", extracted)
return extracted
def explore(self, llm, message, history, facts_document):
return f"""Some relevant search results to\n\nUser: {message}
In context of \nhistory:
{history}"""
@staticmethod
def sufficient_to_generate(drafted_questions) -> bool:
return drafted_questions.strip() == "GENERATE"
def complete_section(self):
self.curr_questions = []
curr_section = CarbonAssistant.section_order[self.curr_section_index]
if curr_section in self.completed_section:
completed_facts = self.completed_section.get(curr_section)
completed_facts.update(self.curr_section_facts)
else:
self.completed_section[curr_section] = self.curr_section_facts
def next_section(self) -> bool:
if self.curr_section_index + 1 >= len(CarbonAssistant.section_order):
self.curr_section_facts = {}
return False
else:
assert (0, "info") in self.complete_section(), \
"Cannot move to next section without completing project info"
self.curr_section_index += 1
self.curr_section_facts = self._load_section_facts(self.curr_section_index)
project_info_facts = self.completed_section[(0, "info")]
self.curr_section_facts.update(project_info_facts)
return True
def main():
langchain.verbose = True
assistant = CarbonAssistant()
openai_api_key = gr.Textbox(placeholder="Please enter you OpenAI API key here",
label="Open AI API Key", render=False)
demo = gr.ChatInterface(
title="Verra Carbon Project Design Assistant",
description="""
I'm a virtual assistant who can help you in writing the baseline section for
your carbon project to be registered with the Verra registry. Please start by
telling me something about your project.
""",
textbox=gr.Textbox(placeholder="Start by telling me about your project",
scale=7),
fn=assistant.design,
additional_inputs=[openai_api_key],
examples=[["The name of my project is BrewHat Bunguluru Waste Management", None],
["My project falls under the Waste Management sectoral scope", None],
["My project is about reducing GHG emission from biomass waste", None]]
)
demo.queue().launch()
if __name__ == "__main__":
# Take environment variables from .env file
load_dotenv()
main()
| [
"langchain.prompts.chat.ChatPromptTemplate.from_messages"
] | [((462, 501), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (481, 501), False, 'import logging\n'), ((508, 535), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (525, 535), False, 'import logging\n'), ((7347, 7453), 'gradio.Textbox', 'gr.Textbox', ([], {'placeholder': '"""Please enter you OpenAI API key here"""', 'label': '"""Open AI API Key"""', 'render': '(False)'}), "(placeholder='Please enter you OpenAI API key here', label=\n 'Open AI API Key', render=False)\n", (7357, 7453), True, 'import gradio as gr\n'), ((8396, 8409), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (8407, 8409), False, 'from dotenv import load_dotenv\n'), ((1289, 1313), 'transist.llm.ExtractionOutputParser', 'ExtractionOutputParser', ([], {}), '()\n', (1311, 1313), False, 'from transist.llm import create_openai_llm, parse_json_maybe_invalid, ExtractionOutputParser\n'), ((1659, 1687), 'json.loads', 'json.loads', (['section_template'], {}), '(section_template)\n', (1669, 1687), False, 'import json\n'), ((2363, 2396), 'transist.llm.create_openai_llm', 'create_openai_llm', (['openai_api_key'], {}), '(openai_api_key)\n', (2380, 2396), False, 'from transist.llm import create_openai_llm, parse_json_maybe_invalid, ExtractionOutputParser\n'), ((7840, 7913), 'gradio.Textbox', 'gr.Textbox', ([], {'placeholder': '"""Start by telling me about your project"""', 'scale': '(7)'}), "(placeholder='Start by telling me about your project', scale=7)\n", (7850, 7913), True, 'import gradio as gr\n'), ((4984, 5056), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_prompt, draft_question_prompt]'], {}), '([system_prompt, draft_question_prompt])\n', (5016, 5056), False, 'from langchain.prompts.chat import ChatPromptTemplate\n'), ((5178, 5204), 'json.dumps', 'json.dumps', (['facts_document'], {}), '(facts_document)\n', (5188, 5204), False, 'import json\n'), ((5392, 5463), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_prompt, extract_facts_prompt]'], {}), '([system_prompt, extract_facts_prompt])\n', (5424, 5463), False, 'from langchain.prompts.chat import ChatPromptTemplate\n'), ((5679, 5705), 'json.dumps', 'json.dumps', (['facts_document'], {}), '(facts_document)\n', (5689, 5705), False, 'import json\n'), ((2656, 2755), 'gradio.Error', 'gr.Error', (['"""An OpenAI API key needs to be provided in the Additional Inputs section below"""'], {}), "(\n 'An OpenAI API key needs to be provided in the Additional Inputs section below'\n )\n", (2664, 2755), True, 'import gradio as gr\n'), ((3901, 3924), 'random.choice', 'random.choice', (['thinking'], {}), '(thinking)\n', (3914, 3924), False, 'import random\n')] |
# Import Langchain modules
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
# Import Environment Modules
import os
from dotenv import load_dotenv
# Import API Modules
from fastapi import FastAPI
from fastapi.responses import HTMLResponse, JSONResponse
import uvicorn
# Import Other Modules
import json
import logging
import warnings
warnings.filterwarnings("ignore")
# Load configuration
with open('config.json', 'r') as f:
config = json.load(f)
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def environment_setup() -> None:
"""
Load environment variables and set OpenAI API key.
"""
load_dotenv()
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
def load_documents(document_path: str) -> list:
"""
Load the pdf file and split it into pages.
"""
try:
loader = PyPDFLoader(document_path)
pages = loader.load_and_split()
return pages
except Exception as e:
logging.error(f"Error loading documents from {document_path}: {e}")
return []
def split_documents(pages: list) -> list:
"""
Split the pages into chunks.
"""
try:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=200,
chunk_overlap=0,
length_function=len,
is_separator_regex=True,
)
docs = text_splitter.split_documents(pages)
return docs
except Exception as e:
logging.error(f"Error splitting documents: {e}")
return []
def process_documents() -> list:
"""
Process all documents in the specified path.
"""
document_paths = [os.path.join(config['DOCUMENTS_PATH'], f) for f in os.listdir(config['DOCUMENTS_PATH']) if f.endswith(".pdf")]
all_docs = []
for document_path in document_paths:
pages = load_documents(document_path)
docs = split_documents(pages)
all_docs.extend(docs)
return all_docs
def embeddings(docs: list) -> FAISS:
"""
Load the embeddings and store them in a vector store.
"""
try:
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(docs, embeddings)
return db
except Exception as e:
logging.error(f"Error creating embeddings: {e}")
return None
def initialize_model() -> OpenAI:
"""
Initialize the model.
"""
llm = OpenAI()
return llm
def LLM_chain(llm: OpenAI, db: FAISS) -> RetrievalQA:
"""
Create a retrieval chain with the LLM and vector store.
"""
chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=db.as_retriever(search_kwargs={"k": 5}))
return chain
def initialize_all() -> tuple:
"""
Initialize all components.
"""
environment_setup()
docs = process_documents()
db = embeddings(docs)
llm = initialize_model()
llm_chain = LLM_chain(llm, db)
return llm_chain, db
def process_message(chain: RetrievalQA, user_message: str, db: FAISS) -> str:
"""
Process the user's message and return the bot's response.
"""
try:
query = user_message
docs = db.similarity_search(query)
result = chain.run(input_documents=docs, query=query)
return result
except Exception as e:
logging.error(f"Error generating response: {e}", exc_info=True)
return "Sorry, I couldn't understand your message."
def setup_fastapi(llm_chain: RetrievalQA, db: FAISS) -> FastAPI:
"""
Setup FastAPI with routes.
"""
app = FastAPI()
@app.get("/", response_class=HTMLResponse)
def read_root() -> HTMLResponse:
"""
Serve the chatbot HTML page.
"""
try:
with open('templates/chatbot.html', 'r') as f:
html_content = f.read()
return HTMLResponse(content=html_content, status_code=200)
except Exception as e:
logging.error(f"Error reading HTML file: {e}", exc_info=True)
return HTMLResponse(content="Sorry, something went wrong.", status_code=500)
@app.get("/chatbot/{user_message}")
def get_bot_response(user_message: str) -> JSONResponse:
"""
Process the user's message and return the bot's response.
"""
try:
bot_response = process_message(llm_chain, user_message, db)
return JSONResponse(content={"answer": bot_response})
except Exception as e:
logging.error(f"Error processing message: {e}", exc_info=True)
return JSONResponse(content={"answer": "Sorry, something went wrong."})
return app
if __name__ == "__main__":
try:
llm_chain, db = initialize_all()
fastapi_app = setup_fastapi(llm_chain, db)
uvicorn.run(fastapi_app, host="0.0.0.0", port=8000)
except Exception as e:
logging.error(f"Error during initialization: {e}", exc_info=True) | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.OpenAI",
"langchain.vectorstores.FAISS.from_documents",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.document_loaders.PyPDFLoader"
] | [((573, 606), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (596, 606), False, 'import warnings\n'), ((712, 808), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n", (731, 808), False, 'import logging\n'), ((678, 690), 'json.load', 'json.load', (['f'], {}), '(f)\n', (687, 690), False, 'import json\n'), ((913, 926), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (924, 926), False, 'from dotenv import load_dotenv\n'), ((962, 989), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (971, 989), False, 'import os\n'), ((2654, 2662), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (2660, 2662), False, 'from langchain.llms import OpenAI\n'), ((3800, 3809), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (3807, 3809), False, 'from fastapi import FastAPI\n'), ((1128, 1154), 'langchain.document_loaders.PyPDFLoader', 'PyPDFLoader', (['document_path'], {}), '(document_path)\n', (1139, 1154), False, 'from langchain.document_loaders import PyPDFLoader\n'), ((1462, 1575), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(200)', 'chunk_overlap': '(0)', 'length_function': 'len', 'is_separator_regex': '(True)'}), '(chunk_size=200, chunk_overlap=0,\n length_function=len, is_separator_regex=True)\n', (1492, 1575), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1926, 1967), 'os.path.join', 'os.path.join', (["config['DOCUMENTS_PATH']", 'f'], {}), "(config['DOCUMENTS_PATH'], f)\n", (1938, 1967), False, 'import os\n'), ((2374, 2392), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2390, 2392), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2406, 2444), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['docs', 'embeddings'], {}), '(docs, embeddings)\n', (2426, 2444), False, 'from langchain.vectorstores import FAISS\n'), ((5019, 5070), 'uvicorn.run', 'uvicorn.run', (['fastapi_app'], {'host': '"""0.0.0.0"""', 'port': '(8000)'}), "(fastapi_app, host='0.0.0.0', port=8000)\n", (5030, 5070), False, 'import uvicorn\n'), ((1251, 1318), 'logging.error', 'logging.error', (['f"""Error loading documents from {document_path}: {e}"""'], {}), "(f'Error loading documents from {document_path}: {e}')\n", (1264, 1318), False, 'import logging\n'), ((1738, 1786), 'logging.error', 'logging.error', (['f"""Error splitting documents: {e}"""'], {}), "(f'Error splitting documents: {e}')\n", (1751, 1786), False, 'import logging\n'), ((1977, 2013), 'os.listdir', 'os.listdir', (["config['DOCUMENTS_PATH']"], {}), "(config['DOCUMENTS_PATH'])\n", (1987, 2013), False, 'import os\n'), ((2498, 2546), 'logging.error', 'logging.error', (['f"""Error creating embeddings: {e}"""'], {}), "(f'Error creating embeddings: {e}')\n", (2511, 2546), False, 'import logging\n'), ((3552, 3615), 'logging.error', 'logging.error', (['f"""Error generating response: {e}"""'], {'exc_info': '(True)'}), "(f'Error generating response: {e}', exc_info=True)\n", (3565, 3615), False, 'import logging\n'), ((4087, 4138), 'fastapi.responses.HTMLResponse', 'HTMLResponse', ([], {'content': 'html_content', 'status_code': '(200)'}), '(content=html_content, status_code=200)\n', (4099, 4138), False, 'from fastapi.responses import HTMLResponse, JSONResponse\n'), ((4629, 4675), 'fastapi.responses.JSONResponse', 'JSONResponse', ([], {'content': "{'answer': bot_response}"}), "(content={'answer': bot_response})\n", (4641, 4675), False, 'from fastapi.responses import HTMLResponse, JSONResponse\n'), ((5106, 5171), 'logging.error', 'logging.error', (['f"""Error during initialization: {e}"""'], {'exc_info': '(True)'}), "(f'Error during initialization: {e}', exc_info=True)\n", (5119, 5171), False, 'import logging\n'), ((4182, 4243), 'logging.error', 'logging.error', (['f"""Error reading HTML file: {e}"""'], {'exc_info': '(True)'}), "(f'Error reading HTML file: {e}', exc_info=True)\n", (4195, 4243), False, 'import logging\n'), ((4263, 4332), 'fastapi.responses.HTMLResponse', 'HTMLResponse', ([], {'content': '"""Sorry, something went wrong."""', 'status_code': '(500)'}), "(content='Sorry, something went wrong.', status_code=500)\n", (4275, 4332), False, 'from fastapi.responses import HTMLResponse, JSONResponse\n'), ((4719, 4781), 'logging.error', 'logging.error', (['f"""Error processing message: {e}"""'], {'exc_info': '(True)'}), "(f'Error processing message: {e}', exc_info=True)\n", (4732, 4781), False, 'import logging\n'), ((4801, 4865), 'fastapi.responses.JSONResponse', 'JSONResponse', ([], {'content': "{'answer': 'Sorry, something went wrong.'}"}), "(content={'answer': 'Sorry, something went wrong.'})\n", (4813, 4865), False, 'from fastapi.responses import HTMLResponse, JSONResponse\n')] |
import langchain
import re
from typing import TypeVar, Optional
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from mdutils.mdutils import MdUtils
from openai import ChatCompletion
## you can use typing.Self after python 3.11
Self = TypeVar("Self")
def set_up() -> None:
load_dotenv()
langchain.verbose = True
return
def get_gpt_response(query: str) -> str:
response = ChatCompletion.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": query}]
)
return response["choices"][0]["message"]["content"].strip()
"""
def time_measurement(func: Callable, val: Any) -> Any:
start = time.time()
response = func(**val)
elapsed_time = time.time() - start
return response, elapsed_time
"""
def create_llm(llm_name: str) -> ChatOpenAI:
return ChatOpenAI(temperature=0, model_name=llm_name)
def create_CBmemory() -> ConversationBufferMemory:
return ConversationBufferMemory(
return_messages=True, memory_key="chat_history", output_key="output"
)
def sep_md(mdFile: MdUtils) -> None:
mdFile.new_line()
mdFile.new_line("---")
mdFile.new_line()
def host_validation(host: Optional[str]):
# hostが文字列であればTrue
# TODO: 文字列の内容を加味すべき
if not host:
return False
elif isinstance(host, str):
return True
def port_validation(port: Optional[str]):
# portが半角数字文字列であればTrue
# それ以外はFalse
if not port:
return False
return True if re.fullmatch("[0-9]+", port) else False
| [
"langchain.memory.ConversationBufferMemory",
"langchain.chat_models.ChatOpenAI"
] | [((319, 334), 'typing.TypeVar', 'TypeVar', (['"""Self"""'], {}), "('Self')\n", (326, 334), False, 'from typing import TypeVar, Optional\n'), ((363, 376), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (374, 376), False, 'from dotenv import load_dotenv\n'), ((475, 570), 'openai.ChatCompletion.create', 'ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo"""', 'messages': "[{'role': 'user', 'content': query}]"}), "(model='gpt-3.5-turbo', messages=[{'role': 'user',\n 'content': query}])\n", (496, 570), False, 'from openai import ChatCompletion\n'), ((892, 938), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': 'llm_name'}), '(temperature=0, model_name=llm_name)\n', (902, 938), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1003, 1101), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'return_messages': '(True)', 'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(return_messages=True, memory_key='chat_history',\n output_key='output')\n", (1027, 1101), False, 'from langchain.memory import ConversationBufferMemory\n'), ((1549, 1577), 're.fullmatch', 're.fullmatch', (['"""[0-9]+"""', 'port'], {}), "('[0-9]+', port)\n", (1561, 1577), False, 'import re\n')] |
import os
from datasets import get_dataset
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.callbacks import get_openai_callback
from utils.timer import Timer
import logging
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from config import api_key, load_config
import wandb
import langchain
langchain.verbose = True
# If you don't want your script to sync to the cloud
os.environ["WANDB_MODE"] = "offline"
CLASSIFIY_PROMPT = """
You are a text-to-SQL expert able to identify poorly formulated questions in natural language.
The dataset used is consisting of questions and their corresponding golden SQL queries. You will be given the database schema of the database corresponding to the question.
Furthermore, you will also be given a hint that provides additional information that is needed to correctly convert the question and interpret the database schema.
However, some of the questions in the data are poorly formulated or contain errors.
Below is a classification scheme for the questions that are to be converted into SQL queries.
0 = Correct question. May still contain minor errors in language or minor ambiguities that do not affect the interpretation and generation of the SQL query
1 = Is unclear, ambiguous, unspecific or contain grammatical errors that surely is going to affect the interpretation and generation of the SQL query. The question
is unspecific in which columns that are to be returned. The question is not asking for a specific column, but asks generally about a table in the database.
2 = The question contains minor errors in language or minor ambiguities that might affect the interpretation and generation of the SQL query.
3 = The question is wrongly formulated when considering the structure of the database schema. The information that the question is asking for is not possible to accurately retrieve from the database.
Here are some examples of questions that would be classified with 1 and an explanation of why:
Example 1: List the customer who made the transaction id 3682978
Explanation: The question is unspecific in which columns that are to be returned. It asks to list the customers, but does not specify which columns that are to be returned from the client table.
Example 2: Which district has the largest amount of female clients?
Explanation: The question is unspecific in which columns that are to be returned. It asks "which district", but does not specify which columns that are to be returned from the district table.
Example 3: What is the average amount of transactions done in the year of 1998 ?
Explanation: Is unclear, ambiguous, unspecific or contain grammatical errors that surely is going to affect the interpretation and generation of the SQL query.
Here is an example of a question that would be classified with 2 and an explanation of why:
Example 1: What are the top 5 loans by region names for the month of Mars 1997?
Explanation: The statement 'top 5' could be ambiguous. It could mean the top 5 loans by amount or the top 5 loans by number of loans.
Here are some examples of questions that would be classified with 3 and an explanation of why:
Example 1: What is the disposition id of the oldest client in the Prague region?
Explanation: The question is wrongly formulated when considering the structure of the database schema. There can be multiple disposition ids for a client,
since a client can have multiple accounts. The question is not asking for a specific disposition id, but asks generally about a client.
Here are some examples of questions that would be classified with 0 and an explanation of why:
Example 1: List the id of the customer who made the transaction id : 3682978
Explanation: Clear and correct question.
Example 2: What is the name of the district that has the largest amount of female clients?
Explanation: Specific and correct question.
Example 3: What is the disposition id(s) of the oldest client in the Prague region?
Explanation: The question is open for disposition ids which is correct when considering the sql-schema.
Example 4: What was the average number of withdrawal transactions conducted by female clients from the Prague region during the year 1998?
Explanation: Clear and correct question.
Database schema:
{database_schema}
Hint:
{evidence}
Below you will be provided with the correct SQL-query that represents what the questions is trying to ask for.
Gold query:
{gold_query}
Please classify the question below according to the classification scheme above, the examples, the hint and the SQL gold query provided.
Also please assume that all dates, values, names and numbers in the questions are correct.
Question:
{question}
In your answer DO NOT return anything else than the mark as a sole number. Do not return any corresponding text or explanations.
"""
#1 = Gray area, minor errors that may or may not affect the interpretation and generation of the SQL query.
class Classifier():
total_tokens = 0
prompt_tokens = 0
total_cost = 0
completion_tokens = 0
last_call_execution_time = 0
total_call_execution_time = 0
def __init__(self, llm):
self.llm = llm
self.prompt_template = CLASSIFIY_PROMPT
prompt = PromptTemplate(
# input_variables=["question", "database_schema","evidence"],
input_variables=["question", "database_schema", "evidence", 'gold_query'],
template=CLASSIFIY_PROMPT,
)
self.chain = LLMChain(llm=llm, prompt=prompt)
def classify_question(self, question, schema, evidence, gold_query):
with get_openai_callback() as cb:
with Timer() as t:
response = self.chain.run({
'question': question,
'database_schema': schema,
'evidence': evidence,
'gold_query': gold_query
})
logging.info(f"OpenAI API execution time: {t.elapsed_time:.2f}")
self.last_call_execution_time = t.elapsed_time
self.total_call_execution_time += t.elapsed_time
self.total_tokens += cb.total_tokens
self.prompt_tokens += cb.prompt_tokens
self.total_cost += cb.total_cost
self.completion_tokens += cb.completion_tokens
return response
accepted_faults = [1, 3]
def main():
config = load_config("classifier_config.yaml")
wandb.init(
project=config.project,
config=config,
name=config.current_experiment,
entity=config.entity
)
artifact = wandb.Artifact('experiment_results', type='dataset')
table = wandb.Table(columns=["Question", "Classified_quality", "Difficulty"]) ## Är det något mer vi vill ha med här?
wandb_cm = wandb.Table(columns=['0', '1', '2', '3'])
metrics_table = wandb.Table(columns=["Class", "Precision", "Recall", "F1 Score", "Accuracy"])
weighted_avg_table = wandb.Table(columns=["Metric", "Weighted Average"])
# "Weighted Averages", weighted_averages['precision'], weighted_averages['recall'], weighted_averages['f1'], weighted_averages['accuracy']
llm = ChatOpenAI(
openai_api_key=api_key,
model_name=config.llm_settings.model,
temperature=config.llm_settings.temperature,
request_timeout=config.llm_settings.request_timeout
)
dataset = get_dataset("BIRDCorrectedFinancialGoldAnnotated")
classifier = Classifier(llm)
wandb.config['prompt'] = classifier.prompt_template
no_data_points = dataset.get_number_of_data_points()
tp = 0
fp = 0
tn = 0
fn = 0
confusion_matrix = np.zeros((4,4))
annotation_counts = {0: 0, 1: 0, 2: 0, 3: 0}
for i in range(no_data_points):
data_point = dataset.get_data_point(i)
evidence = data_point['evidence']
db_id = data_point['db_id']
question = data_point['question']
gold_query = data_point['SQL']
difficulty = data_point['difficulty'] if 'difficulty' in data_point else ""
annotated_question_quality = data_point["annotation"]
sql_schema = dataset.get_schema_and_sample_data(db_id)
classified_quality = classifier.classify_question(question, sql_schema, evidence, gold_query)
classified_quality = int(classified_quality) if classified_quality.isdigit() else None
print('classified_quality: ',classified_quality)
if classified_quality is not None:
for annotated_quality in annotated_question_quality:
annotation_counts[annotated_quality] +=1
confusion_matrix[annotated_quality][classified_quality] += 1
print('confusion matrix:')
print(confusion_matrix)
# Converting to integer
confusion_matrix = np.array(confusion_matrix).astype(int)
print('annotation counts: ',annotation_counts)
labels = [0, 1, 2, 3]
sns.heatmap(confusion_matrix, annot=True, fmt="d", cmap="YlOrRd", xticklabels=labels, yticklabels=labels)
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(f'{config.current_experiment}_heatmap.png')
wandb.log({"confusion_matrix_heatmap": wandb.Image(f'{config.current_experiment}_heatmap.png')})
metrics = {'precision': 0, 'recall': 0, 'f1': 0, 'accuracy': 0}
weighted_sums = {'precision': 0, 'recall': 0, 'f1': 0, 'accuracy': 0}
total_instances = np.sum(confusion_matrix)
for i in range(4):
row_data = confusion_matrix[i].tolist()
print('row_data: ', row_data)
wandb_cm.add_data(*row_data)
tp = confusion_matrix[i][i]
fp = sum(confusion_matrix[:, i]) - tp
fn = sum(confusion_matrix[i, :]) - tp
tn = np.sum(confusion_matrix) - (tp + fp + fn)
precision = tp / (tp + fp) if (tp + fp) != 0 else 0
recall = tp / (tp + fn) if (tp + fn) != 0 else 0
f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) != 0 else 0
accuracy = (tp + tn) / (tp + tn + fp + fn)
metrics[i] = {'precision': precision, 'recall': recall, 'f1': f1, 'accuracy': accuracy}
metrics_table.add_data(i, metrics[i]['precision'], metrics[i]['recall'], metrics[i]['f1'], metrics[i]['accuracy'])
class_weight = sum(confusion_matrix[i, :])
weighted_sums['precision'] += precision * class_weight
weighted_sums['recall'] += recall * class_weight
weighted_sums['f1'] += f1 * class_weight
weighted_sums['accuracy'] += accuracy * class_weight
print('metrics for class ', i, ': ', metrics[i])
# metrics now contains the precision, recall, and F1-score for each category
# annotated_question_qualities = set(annotated_question_quality)
# if classified_quality.isdigit() and int(classified_quality) == 1:
# if any(element in annotated_question_qualities for element in accepted_faults):
# tp += 1
# else:
# fp += 1
# elif classified_quality.isdigit() and int(classified_quality) == 0:
# if any(element in annotated_question_qualities for element in accepted_faults):
# fn += 1
# else:
# tn += 1
# precision = tp / (tp + fp)
# recall = tp / (tp + fn)
# f1 = 2 * ((precision * recall) / (precision + recall))
# accuracy = (tp + tn) / (tp + tn + fp + fn)
table.add_data(question, classified_quality, difficulty)
wandb.log({
"total_tokens": classifier.total_tokens,
"prompt_tokens": classifier.prompt_tokens,
"completion_tokens": classifier.completion_tokens,
"total_cost": classifier.total_cost,
"openAPI_call_execution_time": classifier.last_call_execution_time,
}, step=i+1)
print("Predicted quality: ", classified_quality, " Annotated quality: ", " ".join(map(str, annotated_question_quality)))
weighted_averages = {metric: total / total_instances for metric, total in weighted_sums.items()}
print("Weighted Averages:", weighted_averages)
# weighted_avg_table.add_data("Weighted Averages", weighted_averages['precision'], weighted_averages['recall'], weighted_averages['f1'], weighted_averages['accuracy'])
weighted_avg_table.add_data("Precision", weighted_averages['precision'])
weighted_avg_table.add_data("Recall", weighted_averages['recall'])
weighted_avg_table.add_data("F1 Score", weighted_averages['f1'])
weighted_avg_table.add_data("Accuracy", weighted_averages['accuracy'])
wandb.run.summary["total_tokens"] = classifier.total_tokens
wandb.run.summary["prompt_tokens"] = classifier.prompt_tokens
wandb.run.summary["completion_tokens"] = classifier.completion_tokens
wandb.run.summary["total_cost"] = classifier.total_cost
wandb.run.summary['total_predicted_execution_time'] = dataset.total_predicted_execution_time
wandb.run.summary['total_openAPI_execution_time'] = classifier.total_call_execution_time
artifact.add(wandb_cm, "ConfusionMatrix_predictions")
artifact.add(table, "query_results")
artifact.add(metrics_table, "metrics")
artifact.add(weighted_avg_table, "weighted_averages_metric_table")
wandb.log_artifact(artifact)
artifact_code = wandb.Artifact('code', type='code')
artifact_code.add_file("src/run_classifier.py")
wandb.log_artifact(artifact_code)
wandb.finish()
if __name__ == "__main__":
main() | [
"langchain.prompts.PromptTemplate",
"langchain.callbacks.get_openai_callback",
"langchain.chat_models.ChatOpenAI",
"langchain.chains.LLMChain"
] | [((6515, 6552), 'config.load_config', 'load_config', (['"""classifier_config.yaml"""'], {}), "('classifier_config.yaml')\n", (6526, 6552), False, 'from config import api_key, load_config\n'), ((6558, 6666), 'wandb.init', 'wandb.init', ([], {'project': 'config.project', 'config': 'config', 'name': 'config.current_experiment', 'entity': 'config.entity'}), '(project=config.project, config=config, name=config.\n current_experiment, entity=config.entity)\n', (6568, 6666), False, 'import wandb\n'), ((6716, 6768), 'wandb.Artifact', 'wandb.Artifact', (['"""experiment_results"""'], {'type': '"""dataset"""'}), "('experiment_results', type='dataset')\n", (6730, 6768), False, 'import wandb\n'), ((6781, 6850), 'wandb.Table', 'wandb.Table', ([], {'columns': "['Question', 'Classified_quality', 'Difficulty']"}), "(columns=['Question', 'Classified_quality', 'Difficulty'])\n", (6792, 6850), False, 'import wandb\n'), ((6906, 6947), 'wandb.Table', 'wandb.Table', ([], {'columns': "['0', '1', '2', '3']"}), "(columns=['0', '1', '2', '3'])\n", (6917, 6947), False, 'import wandb\n'), ((6968, 7045), 'wandb.Table', 'wandb.Table', ([], {'columns': "['Class', 'Precision', 'Recall', 'F1 Score', 'Accuracy']"}), "(columns=['Class', 'Precision', 'Recall', 'F1 Score', 'Accuracy'])\n", (6979, 7045), False, 'import wandb\n'), ((7071, 7122), 'wandb.Table', 'wandb.Table', ([], {'columns': "['Metric', 'Weighted Average']"}), "(columns=['Metric', 'Weighted Average'])\n", (7082, 7122), False, 'import wandb\n'), ((7278, 7457), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'config.llm_settings.model', 'temperature': 'config.llm_settings.temperature', 'request_timeout': 'config.llm_settings.request_timeout'}), '(openai_api_key=api_key, model_name=config.llm_settings.model,\n temperature=config.llm_settings.temperature, request_timeout=config.\n llm_settings.request_timeout)\n', (7288, 7457), False, 'from langchain.chat_models import ChatOpenAI\n'), ((7503, 7553), 'datasets.get_dataset', 'get_dataset', (['"""BIRDCorrectedFinancialGoldAnnotated"""'], {}), "('BIRDCorrectedFinancialGoldAnnotated')\n", (7514, 7553), False, 'from datasets import get_dataset\n'), ((7770, 7786), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (7778, 7786), True, 'import numpy as np\n'), ((9068, 9177), 'seaborn.heatmap', 'sns.heatmap', (['confusion_matrix'], {'annot': '(True)', 'fmt': '"""d"""', 'cmap': '"""YlOrRd"""', 'xticklabels': 'labels', 'yticklabels': 'labels'}), "(confusion_matrix, annot=True, fmt='d', cmap='YlOrRd',\n xticklabels=labels, yticklabels=labels)\n", (9079, 9177), True, 'import seaborn as sns\n'), ((9178, 9202), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (9188, 9202), True, 'import matplotlib.pyplot as plt\n'), ((9207, 9236), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (9217, 9236), True, 'import matplotlib.pyplot as plt\n'), ((9246, 9301), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{config.current_experiment}_heatmap.png"""'], {}), "(f'{config.current_experiment}_heatmap.png')\n", (9257, 9301), True, 'import matplotlib.pyplot as plt\n'), ((9569, 9593), 'numpy.sum', 'np.sum', (['confusion_matrix'], {}), '(confusion_matrix)\n', (9575, 9593), True, 'import numpy as np\n'), ((13640, 13668), 'wandb.log_artifact', 'wandb.log_artifact', (['artifact'], {}), '(artifact)\n', (13658, 13668), False, 'import wandb\n'), ((13690, 13725), 'wandb.Artifact', 'wandb.Artifact', (['"""code"""'], {'type': '"""code"""'}), "('code', type='code')\n", (13704, 13725), False, 'import wandb\n'), ((13782, 13815), 'wandb.log_artifact', 'wandb.log_artifact', (['artifact_code'], {}), '(artifact_code)\n', (13800, 13815), False, 'import wandb\n'), ((13821, 13835), 'wandb.finish', 'wandb.finish', ([], {}), '()\n', (13833, 13835), False, 'import wandb\n'), ((5346, 5466), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question', 'database_schema', 'evidence', 'gold_query']", 'template': 'CLASSIFIY_PROMPT'}), "(input_variables=['question', 'database_schema', 'evidence',\n 'gold_query'], template=CLASSIFIY_PROMPT)\n", (5360, 5466), False, 'from langchain.prompts import PromptTemplate\n'), ((5598, 5630), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (5606, 5630), False, 'from langchain.chains import LLMChain\n'), ((11746, 12026), 'wandb.log', 'wandb.log', (["{'total_tokens': classifier.total_tokens, 'prompt_tokens': classifier.\n prompt_tokens, 'completion_tokens': classifier.completion_tokens,\n 'total_cost': classifier.total_cost, 'openAPI_call_execution_time':\n classifier.last_call_execution_time}"], {'step': '(i + 1)'}), "({'total_tokens': classifier.total_tokens, 'prompt_tokens':\n classifier.prompt_tokens, 'completion_tokens': classifier.\n completion_tokens, 'total_cost': classifier.total_cost,\n 'openAPI_call_execution_time': classifier.last_call_execution_time},\n step=i + 1)\n", (11755, 12026), False, 'import wandb\n'), ((5719, 5740), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (5738, 5740), False, 'from langchain.callbacks import get_openai_callback\n'), ((6031, 6095), 'logging.info', 'logging.info', (['f"""OpenAI API execution time: {t.elapsed_time:.2f}"""'], {}), "(f'OpenAI API execution time: {t.elapsed_time:.2f}')\n", (6043, 6095), False, 'import logging\n'), ((8942, 8968), 'numpy.array', 'np.array', (['confusion_matrix'], {}), '(confusion_matrix)\n', (8950, 8968), True, 'import numpy as np\n'), ((9346, 9401), 'wandb.Image', 'wandb.Image', (['f"""{config.current_experiment}_heatmap.png"""'], {}), "(f'{config.current_experiment}_heatmap.png')\n", (9357, 9401), False, 'import wandb\n'), ((9882, 9906), 'numpy.sum', 'np.sum', (['confusion_matrix'], {}), '(confusion_matrix)\n', (9888, 9906), True, 'import numpy as np\n'), ((5765, 5772), 'utils.timer.Timer', 'Timer', ([], {}), '()\n', (5770, 5772), False, 'from utils.timer import Timer\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.