code
stringlengths 141
97.3k
| apis
sequencelengths 1
24
| extract_api
stringlengths 113
214k
|
---|---|---|
import base64
import email
from enum import Enum
from typing import Any, Dict, List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.gmail.base import GmailBaseTool
from langchain.tools.gmail.utils import clean_email_body
class Resource(str, Enum):
"""Enumerator of Resources to search."""
THREADS = "threads"
MESSAGES = "messages"
class SearchArgsSchema(BaseModel):
"""Input for SearchGmailTool."""
# From https://support.google.com/mail/answer/7190?hl=en
query: str = Field(
...,
description="The Gmail query. Example filters include from:sender,"
" to:recipient, subject:subject, -filtered_term,"
" in:folder, is:important|read|starred, after:year/mo/date, "
"before:year/mo/date, label:label_name"
' "exact phrase".'
" Search newer/older than using d (day), m (month), and y (year): "
"newer_than:2d, older_than:1y."
" Attachments with extension example: filename:pdf. Multiple term"
" matching example: from:amy OR from:david.",
)
resource: Resource = Field(
default=Resource.MESSAGES,
description="Whether to search for threads or messages.",
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
class GmailSearch(GmailBaseTool):
"""Tool that searches for messages or threads in Gmail."""
name: str = "search_gmail"
description: str = (
"Use this tool to search for email messages or threads."
" The input must be a valid Gmail query."
" The output is a JSON list of the requested resource."
)
args_schema: Type[SearchArgsSchema] = SearchArgsSchema
def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Add the thread message snippets to the thread results
results = []
for thread in threads:
thread_id = thread["id"]
thread_data = (
self.api_resource.users()
.threads()
.get(userId="me", id=thread_id)
.execute()
)
messages = thread_data["messages"]
thread["messages"] = []
for message in messages:
snippet = message["snippet"]
thread["messages"].append({"snippet": snippet, "id": message["id"]})
results.append(thread)
return results
def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
results = []
for message in messages:
message_id = message["id"]
message_data = (
self.api_resource.users()
.messages()
.get(userId="me", format="raw", id=message_id)
.execute()
)
raw_message = base64.urlsafe_b64decode(message_data["raw"])
email_msg = email.message_from_bytes(raw_message)
subject = email_msg["Subject"]
sender = email_msg["From"]
message_body = email_msg.get_payload()
body = clean_email_body(message_body)
results.append(
{
"id": message["id"],
"threadId": message_data["threadId"],
"snippet": message_data["snippet"],
"body": body,
"subject": subject,
"sender": sender,
}
)
return results
def _run(
self,
query: str,
resource: Resource = Resource.MESSAGES,
max_results: int = 10,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
"""Run the tool."""
results = (
self.api_resource.users()
.messages()
.list(userId="me", q=query, maxResults=max_results)
.execute()
.get(resource.value, [])
)
if resource == Resource.THREADS:
return self._parse_threads(results)
elif resource == Resource.MESSAGES:
return self._parse_messages(results)
else:
raise NotImplementedError(f"Resource of type {resource} not implemented.")
| [
"langchain.tools.gmail.utils.clean_email_body",
"langchain.pydantic_v1.Field"
] | [((606, 1054), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david."""'}), '(..., description=\n \'The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david.\'\n )\n', (611, 1054), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1181, 1276), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'Resource.MESSAGES', 'description': '"""Whether to search for threads or messages."""'}), "(default=Resource.MESSAGES, description=\n 'Whether to search for threads or messages.')\n", (1186, 1276), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1318, 1391), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of results to return."""'}), "(default=10, description='The maximum number of results to return.')\n", (1323, 1391), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((2960, 3005), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (["message_data['raw']"], {}), "(message_data['raw'])\n", (2984, 3005), False, 'import base64\n'), ((3031, 3068), 'email.message_from_bytes', 'email.message_from_bytes', (['raw_message'], {}), '(raw_message)\n', (3055, 3068), False, 'import email\n'), ((3224, 3254), 'langchain.tools.gmail.utils.clean_email_body', 'clean_email_body', (['message_body'], {}), '(message_body)\n', (3240, 3254), False, 'from langchain.tools.gmail.utils import clean_email_body\n')] |
from langchain import PromptTemplate
from codedog.templates import grimoire_en
TRANSLATE_PROMPT = PromptTemplate(
template=grimoire_en.TRANSLATE_PR_REVIEW, input_variables=["language", "description", "content"]
)
| [
"langchain.PromptTemplate"
] | [((100, 217), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'grimoire_en.TRANSLATE_PR_REVIEW', 'input_variables': "['language', 'description', 'content']"}), "(template=grimoire_en.TRANSLATE_PR_REVIEW, input_variables=[\n 'language', 'description', 'content'])\n", (114, 217), False, 'from langchain import PromptTemplate\n')] |
# Importing necessary library
import streamlit as st
# Setting up the page configuration
st.set_page_config(
page_title="QuickDigest AI",
page_icon=":brain:",
layout="wide",
initial_sidebar_state="expanded"
)
# Defining the function to display the home page
def home():
import streamlit as st
from streamlit_extras.badges import badge
from streamlit_extras.colored_header import colored_header
from streamlit_extras.let_it_rain import rain
# Displaying a rain animation with specified parameters
rain(
emoji="🎈",
font_size=54,
falling_speed=5,
animation_length="1",
)
# Displaying a colored header with specified parameters
colored_header(
label="QuickDigest AI🧠, Your Intelligent Data Companion",
description="~ Powered by OpenAI, Llamaindex, AssemblyAI, Langchain, Replicate, Clipdrop",
color_name="violet-70",
)
# Displaying information and warnings in the sidebar
st.sidebar.info(
"Visit [OpenAI Pricing](https://openai.com/pricing#language-models) to get an overview of costs incurring depending upon the model chosen."
)
st.sidebar.info(
"For key & data privacy concerns, We do not store your Key, it will be removed after your session ends. Also OpenAI will not use data submitted by customers via our API to train or improve our models, unless you explicitly decide to share your data with us for this purpose, For more info please visit [OpenAI FAQs](https://help.openai.com/en/articles/7039943-data-usage-for-consumer-services-faq)."
)
st.sidebar.warning(
"LLMs may produce inaccurate information about people, places, or facts. Don't entirely trust them."
)
# Displaying markdown text on the page
st.markdown(
"<h6>Discover a new horizon of data interaction with QuickDigest AI, your intelligent companion in navigating through diverse data formats. QuickDigest AI is meticulously crafted to simplify and enrich your engagement with data, ensuring a seamless flow of insights right at your fingertips.</h6>",
unsafe_allow_html=True
)
st.markdown(
"**Effortless Data Extraction and Interaction:** QuickDigest AI stands as a beacon of innovation, allowing users to upload and interact with a variety of file formats including PDFs, Word documents, text files, and even audio/video files. The platform's cutting-edge technology ensures a smooth extraction of data, paving the way for meaningful conversations with the information gleaned from these files."
)
st.markdown(
"**Engage with your Datasets:** Dive into datasets like never before. QuickDigest AI invites you to upload your dataset and engage in a dialogue with it. Our advanced AI algorithms facilitate a conversational interaction with your dataset, making the extraction of insights an intuitive and enriching experience."
)
st.markdown(
"**Real-Time Web Search:** One of the limitations of large language models is there limited knowledge. QuickDigest AI's real-time web search feature ensures you're always ahead with the latest information. Be it market trends, news updates, or the newest research findings, QuickDigest AI brings the world to you in real-time."
)
st.markdown(
"**Ignite Your Creative Spark:** For product creators, QuickDigest AI unveils a realm of possibilities. Bored of simple product images, The Product Advertising Image Creator is your tool to craft captivating advertising images that resonate with your audience. Additionally, the Image Generator feature is your canvas to bring your creative visions to life, creating visually appealing images that speak volumes."
)
st.markdown("---")
# Displaying a support section with badges and link button
st.markdown("<h5>Support Us</h5>", unsafe_allow_html=True)
col1, col2, col3, col4 = st.columns(4)
with col1:
st.write("Star this repository on Github")
badge(type="github", name="codingis4noobs2/QuickDigest")
with col2:
st.write("Follow me on twitter")
badge(type="twitter", name="4gameparth")
with col3:
st.write("Buy me a coffee")
badge(type="buymeacoffee", name="codingis4noobs2")
with col4:
st.link_button("Upvote on Replit", "https://replit.com/@ParthShah38/QuickDigestAI?v=1")
# Function to display chat with files page
def chat_with_files():
import os
import streamlit as st
from streamlit_extras.badges import badge
from streamlit_extras.colored_header import colored_header
from llama_index import (
OpenAIEmbedding,
ServiceContext,
set_global_service_context,
)
from llama_index.llms import OpenAI
from llama_index.chat_engine.types import StreamingAgentChatResponse
from llama_index import SimpleDirectoryReader, VectorStoreIndex
import assemblyai as aai
from PyPDF2 import PdfReader
from docx import Document
# Cache the result to avoid recomputation
@st.cache_resource(show_spinner="Indexing documents...Please have patience")
def build_index(files):
documents = SimpleDirectoryReader(input_files=files).load_data()
index = VectorStoreIndex.from_documents(documents)
return index
# Handle streaming responses
def handle_stream(root, stream: StreamingAgentChatResponse):
text = ""
root.markdown("Thinking...")
for token in stream.response_gen:
text += token
root.markdown(text)
return text
# Define constants and settings
CACHE_DIR = "./uploads"
aai.settings.api_key = st.secrets['assembly_api_key']
# Render chat messages
def render_message(message):
with st.chat_message(message["role"]):
st.write(message["text"])
# Transcribe audio and video files
def transcribe_audio_video(file_path):
transcriber = aai.Transcriber()
transcript = transcriber.transcribe(file_path)
transcript_path = file_path + ".txt"
with open(transcript_path, "w") as f:
f.write(transcript.text)
return transcript_path
# Upload files and cache them
def upload_files(types=["pdf", "txt", "mp3", "mp4", 'mpeg', 'doc', 'docx'], **kwargs):
files = st.file_uploader(
label=f"Upload files", type=types, **kwargs
)
if not files:
st.info(f"Please add documents, Note: Scanned documents are not supported yet!")
st.stop()
return cache_files(files, types=types)
# Cache uploaded files
def cache_files(files, types=["pdf", "txt", "mp3", "mp4", 'mpeg', 'doc', 'docx']) -> list[str]:
filepaths = []
for file in files:
# Determine the file extension from the mime type
ext = file.type.split("/")[-1]
if ext == "plain": # Handle text/plain mime type
ext = "txt"
elif ext in ["vnd.openxmlformats-officedocument.wordprocessingml.document", "vnd.ms-word"]:
ext = "docx" # or "doc" depending on your needs
if ext not in types:
continue
filepath = f"{CACHE_DIR}/{file.name}"
with open(filepath, "wb") as f:
f.write(file.getvalue())
if ext in ["mp3", "mp4"]:
filepath = transcribe_audio_video(filepath)
filepaths.append(filepath)
# st.sidebar.write("Uploaded files", filepaths) # Debug statement
with st.sidebar:
with st.expander("Uploaded Files"):
filepaths_pretty = "\n".join(f"- {filepath}" for filepath in filepaths)
st.markdown(f"{filepaths_pretty}")
return filepaths
def transcribe_and_save(file_path):
transcriber = aai.Transcriber()
transcript = transcriber.transcribe(file_path)
transcript_path = file_path + ".txt"
with open(transcript_path, "w") as f:
f.write(transcript.text)
return transcript_path
# Save extracted text to a txt file
def save_extracted_text_to_txt(text, filename):
txt_filename = os.path.splitext(filename)[0] + ".txt"
txt_filepath = os.path.join('uploads', txt_filename)
with open(txt_filepath, 'w', encoding='utf-8') as txt_file:
txt_file.write(text)
return txt_filepath
# Get OpenAI API key from session state
def get_key():
return st.session_state["openai_api_key"]
# Read text from Word document
def read_word_file(file_path):
doc = Document(file_path)
full_text = []
for para in doc.paragraphs:
full_text.append(para.text)
return '\n'.join(full_text)
# Process uploaded documents
def process_documents(documents):
processed_docs = []
for doc in documents:
if doc.endswith('.pdf'):
processed_docs.append(process_pdf(doc))
elif doc.endswith(('.doc', '.docx')):
text = read_word_file(doc)
txt_filepath = save_extracted_text_to_txt(text, os.path.basename(doc))
processed_docs.append(txt_filepath)
elif doc.endswith(('.mp3', '.mp4', '.mpeg')):
processed_docs.append(transcribe_and_save(doc))
else:
processed_docs.append(doc)
return processed_docs
# Process PDF files
def process_pdf(pdf_path):
reader = PdfReader(pdf_path)
all_text = ""
for page in reader.pages:
extracted_text = page.extract_text()
if extracted_text:
processed_text = ' '.join(extracted_text.split('\n'))
all_text += processed_text + "\n\n"
txt_filepath = save_extracted_text_to_txt(all_text, os.path.basename(pdf_path))
os.remove(pdf_path) # Delete the original PDF file
return txt_filepath
# Main logic for handling OpenAI API key and document processing
if "openai_api_key" not in st.session_state:
openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password")
if not openai_api_key:
st.sidebar.warning("Please add your OpenAI API key to continue!!")
st.warning("Please add your OpenAI API key to continue!!")
st.sidebar.info("To obtain your OpenAI API key, please visit [OpenAI](https://platform.openai.com/account/api-keys). They provide a $5 credit to allow you to experiment with their models. If you're unsure about how to get the API key, you can follow this [Tutorial](https://www.maisieai.com/help/how-to-get-an-openai-api-key-for-chatgpt). While obtaining the API key doesn't require a compulsory payment, once your allotted credit is exhausted, a payment will be necessary to continue using their services.")
st.stop()
st.session_state["openai_api_key"] = openai_api_key
st.sidebar.text_input("Enter Youtube Video ID(Coming soon)", disabled=True)
st.sidebar.text_input("Enter Spotify Podast link(Coming soon)", disabled=True)
openai_api_key = get_key()
if openai_api_key:
st.toast('OpenAI API Key Added ✅')
# Define service-context
with st.sidebar:
with st.expander("Advanced Settings"):
st.session_state['temperature'] = st.number_input("Enter Temperature", help="It determines how creative the model should be", min_value=0.0,max_value=1.0, value=0.1)
llm = OpenAI(temperature=st.session_state['temperature'], model='gpt-3.5-turbo', api_key=openai_api_key)
embed_model = OpenAIEmbedding(api_key=openai_api_key)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
set_global_service_context(service_context)
# Upload PDFs, DOCs, TXTs, MP3s, and MP4s
documents = upload_files(types=["pdf", "txt", "mp3", "mp4", 'mpeg', 'doc', 'docx'], accept_multiple_files=True)
# Process the uploaded documents
processed_documents = process_documents(documents)
if not processed_documents:
st.warning("No documents uploaded!")
st.stop()
index = build_index(processed_documents)
query_engine = index.as_chat_engine(chat_mode="condense_question", streaming=True)
messages = st.session_state.get("messages", [])
if not messages:
messages.append({"role": "assistant", "text": "Hi!"})
for message in messages:
render_message(message)
if user_query := st.chat_input():
message = {"role": "user", "text": user_query}
messages.append(message)
render_message(message)
with st.chat_message("assistant"):
stream = query_engine.stream_chat(user_query)
text = handle_stream(st.empty(), stream)
message = {"role": "assistant", "text": text}
messages.append(message)
st.session_state.messages = messages
# Function to use LLMs with web search
def use_llms_with_web():
from langchain.agents import ConversationalChatAgent, AgentExecutor
from langchain.callbacks import StreamlitCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from langchain.tools import DuckDuckGoSearchRun
import streamlit as st
st.title("Use web search with LLMs")
# Taking OpenAI API key input from the user
openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password")
# Initializing message history and memory
msgs = StreamlitChatMessageHistory()
memory = ConversationBufferMemory(
chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output"
)
# Resetting chat history logic
if len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"):
msgs.clear()
msgs.add_ai_message("How can I help you?")
st.session_state.steps = {}
# Defining avatars for chat messages
avatars = {"human": "user", "ai": "assistant"}
for idx, msg in enumerate(msgs.messages):
with st.chat_message(avatars[msg.type]):
# Render intermediate steps if any were saved
for step in st.session_state.steps.get(str(idx), []):
if step[0].tool == "_Exception":
continue
with st.status(f"**{step[0].tool}**: {step[0].tool_input}", state="complete"):
st.write(step[0].log)
st.write(step[1])
st.write(msg.content)
# Taking new input from the user
if prompt := st.chat_input(placeholder="Who won the 2022 Cricket World Cup?"):
st.chat_message("user").write(prompt)
# Checking if OpenAI API key is provided
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
# Initializing LLM and tools for web search
llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True)
tools = [DuckDuckGoSearchRun(name="Search")]
chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools)
executor = AgentExecutor.from_agent_and_tools(
agent=chat_agent,
tools=tools,
memory=memory,
return_intermediate_steps=True,
handle_parsing_errors=True,
)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
response = executor(prompt, callbacks=[st_cb])
st.write(response["output"])
st.session_state.steps[str(len(msgs.messages) - 1)] = response["intermediate_steps"]
# Function to display chat with dataset page
def chat_with_dataset():
from langchain.agents import AgentType
from langchain.agents import create_pandas_dataframe_agent
from langchain.callbacks import StreamlitCallbackHandler
from langchain.chat_models import ChatOpenAI
import streamlit as st
import pandas as pd
import os
file_formats = {
"csv": pd.read_csv,
"xls": pd.read_excel,
"xlsx": pd.read_excel,
"xlsm": pd.read_excel,
"xlsb": pd.read_excel,
}
def clear_submit():
"""
Clear the Submit Button State
Returns:
"""
st.session_state["submit"] = False
@st.cache_data()
def load_data(uploaded_file):
"""
Load data from the uploaded file based on its extension.
"""
try:
ext = os.path.splitext(uploaded_file.name)[1][1:].lower()
except:
ext = uploaded_file.split(".")[-1]
if ext in file_formats:
return file_formats[ext](uploaded_file)
else:
st.error(f"Unsupported file format: {ext}")
return None
st.title("Chat with your dataset")
st.info("Asking one question at a time will result in a better output")
uploaded_file = st.file_uploader(
"Upload a Data file",
type=list(file_formats.keys()),
help="Various File formats are Support",
on_change=clear_submit,
)
df = None # Initialize df to None outside the if block
if uploaded_file:
df = load_data(uploaded_file) # df will be assigned a value if uploaded_file is truthy
if df is None: # Check if df is still None before proceeding
st.warning("No data file uploaded or there was an error in loading the data.")
return # Exit the function early if df is None
openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password")
st.sidebar.info("If you face a KeyError: 'content' error, Press the clear conversation histroy button")
if "messages" not in st.session_state or st.sidebar.button("Clear conversation history"):
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
# Display previous chat messages
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input(placeholder="What is this data about?"):
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
# Check if OpenAI API key is provided
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
llm = ChatOpenAI(
temperature=0, model="gpt-3.5-turbo-0613", openai_api_key=openai_api_key, streaming=True
)
pandas_df_agent = create_pandas_dataframe_agent(
llm,
df,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
handle_parsing_errors=True,
)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
response = pandas_df_agent.run(st.session_state.messages, callbacks=[st_cb])
st.session_state.messages.append({"role": "assistant", "content": response})
st.write(response)
# Function to display transform products page
def transform_products():
import streamlit as st
import requests
import os
import replicate
import io
from PIL import Image
st.session_state['replicate_api_token'] = st.sidebar.text_input("Replicate API Token", type='password')
os.environ['REPLICATE_API_TOKEN'] = st.session_state['replicate_api_token']
if not st.session_state['replicate_api_token']:
st.sidebar.warning('Please enter your Replicate API Token to continue!!')
st.sidebar.info("You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)")
st.stop()
if st.session_state['replicate_api_token']:
st.info("This model works best with product images having transparent or plain backgrounds")
# Prompt user to upload an image file
img = st.file_uploader("Upload your product image", type=['png', 'jpg', 'jpeg'])
if img is not None:
has_plain_background = st.toggle("Does your product image have a plain or transparent background? If not, let us do the hard work for you!")
prompt = st.text_input("Enter Prompt", help="Enter something you imagine...")
negative_prompt = st.text_input("Enter Negative Prompt", help="Write what you don't want in the generated images")
submit = st.button("Submit")
if submit:
if has_plain_background:
# If image already has a plain background, prepare it for Replicate
image = Image.open(img)
bytes_obj = io.BytesIO()
image.save(bytes_obj, format='PNG')
bytes_obj.seek(0)
else:
# If image does not have a plain background, send it to ClipDrop to remove background
image_file_object = img.read()
r = requests.post('https://clipdrop-api.co/remove-background/v1',
files={
'image_file': ('uploaded_image.jpg', image_file_object, 'image/jpeg')
},
headers={'x-api-key': st.secrets['clipdrop_api_key']}
)
if r.ok:
# If background removal is successful, prepare image for Replicate
image = Image.open(io.BytesIO(r.content))
bytes_obj = io.BytesIO()
image.save(bytes_obj, format='PNG')
bytes_obj.seek(0)
else:
r.raise_for_status()
st.error('Failed to remove background. Try again.')
st.stop()
# Send image to Replicate for transformation
output = replicate.run(
"logerzhu/ad-inpaint:b1c17d148455c1fda435ababe9ab1e03bc0d917cc3cf4251916f22c45c83c7df",
input={"image_path": bytes_obj, "prompt": prompt, "image_num": 4}
)
col1, col2 = st.columns(2)
with col1:
st.image(output[1])
st.image(output[2])
with col2:
st.image(output[3])
st.image(output[4])
# Function to generate images based on user input
def generate_images():
import streamlit as st
import replicate
import os
st.session_state['replicate_api_token'] = st.sidebar.text_input("Replicate API Token", type='password')
os.environ['REPLICATE_API_TOKEN'] = st.session_state['replicate_api_token']
if not st.session_state['replicate_api_token']:
st.sidebar.warning('Please enter your Replicate API Token to continue!!')
st.sidebar.info("You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)")
st.stop()
if st.session_state['replicate_api_token']:
prompt = st.text_input(
"Enter prompt",
help="Write something you can imagine..."
)
negative_prompt = st.text_input(
"Enter Negative prompt",
help="Write what you don't want to see in the generated images"
)
submit = st.button("Submit")
if submit:
output = replicate.run(
"stability-ai/sdxl:8beff3369e81422112d93b89ca01426147de542cd4684c244b673b105188fe5f",
input={
"prompt": prompt,
"negative_prompt": negative_prompt,
"num_outputs": 4
},
)
col1, col2 = st.columns(2)
with col1:
st.image(output[0])
st.image(output[2])
with col2:
st.image(output[1])
st.image(output[3])
# Dictonary to store all functions as pages
page_names_to_funcs = {
"Home 🏠": home,
"Chat with files 📁": chat_with_files,
"Chat with dataset 📖": chat_with_dataset,
"Use web search with LLMs 🌐": use_llms_with_web,
"Generate Images 🖌️": generate_images,
"Transform your products 🎨": transform_products,
}
# display page by dictionary
demo_name = st.sidebar.selectbox("Choose a page to navigate to", page_names_to_funcs.keys())
page_names_to_funcs[demo_name]()
| [
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.chat_models.ChatOpenAI",
"langchain.memory.chat_message_histories.StreamlitChatMessageHistory",
"langchain.memory.ConversationBufferMemory",
"langchain.agents.ConversationalChatAgent.from_llm_and_tools",
"langchain.tools.DuckDuckGoSearchRun",
"langchain.agents.create_pandas_dataframe_agent"
] | [((91, 213), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""QuickDigest AI"""', 'page_icon': '""":brain:"""', 'layout': '"""wide"""', 'initial_sidebar_state': '"""expanded"""'}), "(page_title='QuickDigest AI', page_icon=':brain:', layout\n ='wide', initial_sidebar_state='expanded')\n", (109, 213), True, 'import streamlit as st\n'), ((540, 608), 'streamlit_extras.let_it_rain.rain', 'rain', ([], {'emoji': '"""🎈"""', 'font_size': '(54)', 'falling_speed': '(5)', 'animation_length': '"""1"""'}), "(emoji='🎈', font_size=54, falling_speed=5, animation_length='1')\n", (544, 608), False, 'from streamlit_extras.let_it_rain import rain\n'), ((717, 918), 'streamlit_extras.colored_header.colored_header', 'colored_header', ([], {'label': '"""QuickDigest AI🧠, Your Intelligent Data Companion"""', 'description': '"""~ Powered by OpenAI, Llamaindex, AssemblyAI, Langchain, Replicate, Clipdrop"""', 'color_name': '"""violet-70"""'}), "(label='QuickDigest AI🧠, Your Intelligent Data Companion',\n description=\n '~ Powered by OpenAI, Llamaindex, AssemblyAI, Langchain, Replicate, Clipdrop'\n , color_name='violet-70')\n", (731, 918), False, 'from streamlit_extras.colored_header import colored_header\n'), ((998, 1164), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""Visit [OpenAI Pricing](https://openai.com/pricing#language-models) to get an overview of costs incurring depending upon the model chosen."""'], {}), "(\n 'Visit [OpenAI Pricing](https://openai.com/pricing#language-models) to get an overview of costs incurring depending upon the model chosen.'\n )\n", (1013, 1164), True, 'import streamlit as st\n'), ((1173, 1599), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""For key & data privacy concerns, We do not store your Key, it will be removed after your session ends. Also OpenAI will not use data submitted by customers via our API to train or improve our models, unless you explicitly decide to share your data with us for this purpose, For more info please visit [OpenAI FAQs](https://help.openai.com/en/articles/7039943-data-usage-for-consumer-services-faq)."""'], {}), "(\n 'For key & data privacy concerns, We do not store your Key, it will be removed after your session ends. Also OpenAI will not use data submitted by customers via our API to train or improve our models, unless you explicitly decide to share your data with us for this purpose, For more info please visit [OpenAI FAQs](https://help.openai.com/en/articles/7039943-data-usage-for-consumer-services-faq).'\n )\n", (1188, 1599), True, 'import streamlit as st\n'), ((1608, 1738), 'streamlit.sidebar.warning', 'st.sidebar.warning', (['"""LLMs may produce inaccurate information about people, places, or facts. Don\'t entirely trust them."""'], {}), '(\n "LLMs may produce inaccurate information about people, places, or facts. Don\'t entirely trust them."\n )\n', (1626, 1738), True, 'import streamlit as st\n'), ((1795, 2139), 'streamlit.markdown', 'st.markdown', (['"""<h6>Discover a new horizon of data interaction with QuickDigest AI, your intelligent companion in navigating through diverse data formats. QuickDigest AI is meticulously crafted to simplify and enrich your engagement with data, ensuring a seamless flow of insights right at your fingertips.</h6>"""'], {'unsafe_allow_html': '(True)'}), "(\n '<h6>Discover a new horizon of data interaction with QuickDigest AI, your intelligent companion in navigating through diverse data formats. QuickDigest AI is meticulously crafted to simplify and enrich your engagement with data, ensuring a seamless flow of insights right at your fingertips.</h6>'\n , unsafe_allow_html=True)\n", (1806, 2139), True, 'import streamlit as st\n'), ((2156, 2585), 'streamlit.markdown', 'st.markdown', (['"""**Effortless Data Extraction and Interaction:** QuickDigest AI stands as a beacon of innovation, allowing users to upload and interact with a variety of file formats including PDFs, Word documents, text files, and even audio/video files. The platform\'s cutting-edge technology ensures a smooth extraction of data, paving the way for meaningful conversations with the information gleaned from these files."""'], {}), '(\n "**Effortless Data Extraction and Interaction:** QuickDigest AI stands as a beacon of innovation, allowing users to upload and interact with a variety of file formats including PDFs, Word documents, text files, and even audio/video files. The platform\'s cutting-edge technology ensures a smooth extraction of data, paving the way for meaningful conversations with the information gleaned from these files."\n )\n', (2167, 2585), True, 'import streamlit as st\n'), ((2594, 2930), 'streamlit.markdown', 'st.markdown', (['"""**Engage with your Datasets:** Dive into datasets like never before. QuickDigest AI invites you to upload your dataset and engage in a dialogue with it. Our advanced AI algorithms facilitate a conversational interaction with your dataset, making the extraction of insights an intuitive and enriching experience."""'], {}), "(\n '**Engage with your Datasets:** Dive into datasets like never before. QuickDigest AI invites you to upload your dataset and engage in a dialogue with it. Our advanced AI algorithms facilitate a conversational interaction with your dataset, making the extraction of insights an intuitive and enriching experience.'\n )\n", (2605, 2930), True, 'import streamlit as st\n'), ((2939, 3289), 'streamlit.markdown', 'st.markdown', (['"""**Real-Time Web Search:** One of the limitations of large language models is there limited knowledge. QuickDigest AI\'s real-time web search feature ensures you\'re always ahead with the latest information. Be it market trends, news updates, or the newest research findings, QuickDigest AI brings the world to you in real-time."""'], {}), '(\n "**Real-Time Web Search:** One of the limitations of large language models is there limited knowledge. QuickDigest AI\'s real-time web search feature ensures you\'re always ahead with the latest information. Be it market trends, news updates, or the newest research findings, QuickDigest AI brings the world to you in real-time."\n )\n', (2950, 3289), True, 'import streamlit as st\n'), ((3298, 3734), 'streamlit.markdown', 'st.markdown', (['"""**Ignite Your Creative Spark:** For product creators, QuickDigest AI unveils a realm of possibilities. Bored of simple product images, The Product Advertising Image Creator is your tool to craft captivating advertising images that resonate with your audience. Additionally, the Image Generator feature is your canvas to bring your creative visions to life, creating visually appealing images that speak volumes."""'], {}), "(\n '**Ignite Your Creative Spark:** For product creators, QuickDigest AI unveils a realm of possibilities. Bored of simple product images, The Product Advertising Image Creator is your tool to craft captivating advertising images that resonate with your audience. Additionally, the Image Generator feature is your canvas to bring your creative visions to life, creating visually appealing images that speak volumes.'\n )\n", (3309, 3734), True, 'import streamlit as st\n'), ((3744, 3762), 'streamlit.markdown', 'st.markdown', (['"""---"""'], {}), "('---')\n", (3755, 3762), True, 'import streamlit as st\n'), ((3835, 3893), 'streamlit.markdown', 'st.markdown', (['"""<h5>Support Us</h5>"""'], {'unsafe_allow_html': '(True)'}), "('<h5>Support Us</h5>', unsafe_allow_html=True)\n", (3846, 3893), True, 'import streamlit as st\n'), ((3923, 3936), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (3933, 3936), True, 'import streamlit as st\n'), ((5058, 5133), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '"""Indexing documents...Please have patience"""'}), "(show_spinner='Indexing documents...Please have patience')\n", (5075, 5133), True, 'import streamlit as st\n'), ((10968, 11043), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Enter Youtube Video ID(Coming soon)"""'], {'disabled': '(True)'}), "('Enter Youtube Video ID(Coming soon)', disabled=True)\n", (10989, 11043), True, 'import streamlit as st\n'), ((11048, 11126), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Enter Spotify Podast link(Coming soon)"""'], {'disabled': '(True)'}), "('Enter Spotify Podast link(Coming soon)', disabled=True)\n", (11069, 11126), True, 'import streamlit as st\n'), ((13550, 13586), 'streamlit.title', 'st.title', (['"""Use web search with LLMs"""'], {}), "('Use web search with LLMs')\n", (13558, 13586), True, 'import streamlit as st\n'), ((13656, 13712), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""OpenAI API Key"""'], {'type': '"""password"""'}), "('OpenAI API Key', type='password')\n", (13677, 13712), True, 'import streamlit as st\n'), ((13770, 13799), 'langchain.memory.chat_message_histories.StreamlitChatMessageHistory', 'StreamlitChatMessageHistory', ([], {}), '()\n', (13797, 13799), False, 'from langchain.memory.chat_message_histories import StreamlitChatMessageHistory\n'), ((13813, 13930), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'chat_memory': 'msgs', 'return_messages': '(True)', 'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(chat_memory=msgs, return_messages=True, memory_key\n ='chat_history', output_key='output')\n", (13837, 13930), False, 'from langchain.memory import ConversationBufferMemory\n'), ((16632, 16647), 'streamlit.cache_data', 'st.cache_data', ([], {}), '()\n', (16645, 16647), True, 'import streamlit as st\n'), ((17100, 17134), 'streamlit.title', 'st.title', (['"""Chat with your dataset"""'], {}), "('Chat with your dataset')\n", (17108, 17134), True, 'import streamlit as st\n'), ((17139, 17210), 'streamlit.info', 'st.info', (['"""Asking one question at a time will result in a better output"""'], {}), "('Asking one question at a time will result in a better output')\n", (17146, 17210), True, 'import streamlit as st\n'), ((17819, 17875), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""OpenAI API Key"""'], {'type': '"""password"""'}), "('OpenAI API Key', type='password')\n", (17840, 17875), True, 'import streamlit as st\n'), ((17885, 17998), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""If you face a KeyError: \'content\' error, Press the clear conversation histroy button"""'], {}), '(\n "If you face a KeyError: \'content\' error, Press the clear conversation histroy button"\n )\n', (17900, 17998), True, 'import streamlit as st\n'), ((19630, 19691), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Replicate API Token"""'], {'type': '"""password"""'}), "('Replicate API Token', type='password')\n", (19651, 19691), True, 'import streamlit as st\n'), ((22919, 22980), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Replicate API Token"""'], {'type': '"""password"""'}), "('Replicate API Token', type='password')\n", (22940, 22980), True, 'import streamlit as st\n'), ((3960, 4002), 'streamlit.write', 'st.write', (['"""Star this repository on Github"""'], {}), "('Star this repository on Github')\n", (3968, 4002), True, 'import streamlit as st\n'), ((4011, 4067), 'streamlit_extras.badges.badge', 'badge', ([], {'type': '"""github"""', 'name': '"""codingis4noobs2/QuickDigest"""'}), "(type='github', name='codingis4noobs2/QuickDigest')\n", (4016, 4067), False, 'from streamlit_extras.badges import badge\n'), ((4091, 4123), 'streamlit.write', 'st.write', (['"""Follow me on twitter"""'], {}), "('Follow me on twitter')\n", (4099, 4123), True, 'import streamlit as st\n'), ((4132, 4172), 'streamlit_extras.badges.badge', 'badge', ([], {'type': '"""twitter"""', 'name': '"""4gameparth"""'}), "(type='twitter', name='4gameparth')\n", (4137, 4172), False, 'from streamlit_extras.badges import badge\n'), ((4196, 4223), 'streamlit.write', 'st.write', (['"""Buy me a coffee"""'], {}), "('Buy me a coffee')\n", (4204, 4223), True, 'import streamlit as st\n'), ((4232, 4282), 'streamlit_extras.badges.badge', 'badge', ([], {'type': '"""buymeacoffee"""', 'name': '"""codingis4noobs2"""'}), "(type='buymeacoffee', name='codingis4noobs2')\n", (4237, 4282), False, 'from streamlit_extras.badges import badge\n'), ((4306, 4397), 'streamlit.link_button', 'st.link_button', (['"""Upvote on Replit"""', '"""https://replit.com/@ParthShah38/QuickDigestAI?v=1"""'], {}), "('Upvote on Replit',\n 'https://replit.com/@ParthShah38/QuickDigestAI?v=1')\n", (4320, 4397), True, 'import streamlit as st\n'), ((5251, 5293), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (5282, 5293), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n'), ((5963, 5980), 'assemblyai.Transcriber', 'aai.Transcriber', ([], {}), '()\n', (5978, 5980), True, 'import assemblyai as aai\n'), ((6337, 6398), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': 'f"""Upload files"""', 'type': 'types'}), "(label=f'Upload files', type=types, **kwargs)\n", (6353, 6398), True, 'import streamlit as st\n'), ((7852, 7869), 'assemblyai.Transcriber', 'aai.Transcriber', ([], {}), '()\n', (7867, 7869), True, 'import assemblyai as aai\n'), ((8262, 8299), 'os.path.join', 'os.path.join', (['"""uploads"""', 'txt_filename'], {}), "('uploads', txt_filename)\n", (8274, 8299), False, 'import os\n'), ((8628, 8647), 'docx.Document', 'Document', (['file_path'], {}), '(file_path)\n', (8636, 8647), False, 'from docx import Document\n'), ((9524, 9543), 'PyPDF2.PdfReader', 'PdfReader', (['pdf_path'], {}), '(pdf_path)\n', (9533, 9543), False, 'from PyPDF2 import PdfReader\n'), ((9898, 9917), 'os.remove', 'os.remove', (['pdf_path'], {}), '(pdf_path)\n', (9907, 9917), False, 'import os\n'), ((10122, 10178), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""OpenAI API Key"""'], {'type': '"""password"""'}), "('OpenAI API Key', type='password')\n", (10143, 10178), True, 'import streamlit as st\n'), ((11195, 11229), 'streamlit.toast', 'st.toast', (['"""OpenAI API Key Added ✅"""'], {}), "('OpenAI API Key Added ✅')\n", (11203, 11229), True, 'import streamlit as st\n'), ((11535, 11637), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': "st.session_state['temperature']", 'model': '"""gpt-3.5-turbo"""', 'api_key': 'openai_api_key'}), "(temperature=st.session_state['temperature'], model='gpt-3.5-turbo',\n api_key=openai_api_key)\n", (11541, 11637), False, 'from llama_index.llms import OpenAI\n'), ((11656, 11695), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'api_key': 'openai_api_key'}), '(api_key=openai_api_key)\n', (11671, 11695), False, 'from llama_index import OpenAIEmbedding, ServiceContext, set_global_service_context\n'), ((11722, 11784), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (11750, 11784), False, 'from llama_index import OpenAIEmbedding, ServiceContext, set_global_service_context\n'), ((11793, 11836), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (11819, 11836), False, 'from llama_index import OpenAIEmbedding, ServiceContext, set_global_service_context\n'), ((12378, 12414), 'streamlit.session_state.get', 'st.session_state.get', (['"""messages"""', '[]'], {}), "('messages', [])\n", (12398, 12414), True, 'import streamlit as st\n'), ((14009, 14048), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Reset chat history"""'], {}), "('Reset chat history')\n", (14026, 14048), True, 'import streamlit as st\n'), ((14812, 14876), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Who won the 2022 Cricket World Cup?"""'}), "(placeholder='Who won the 2022 Cricket World Cup?')\n", (14825, 14876), True, 'import streamlit as st\n'), ((15159, 15248), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'openai_api_key', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', openai_api_key=openai_api_key,\n streaming=True)\n", (15169, 15248), False, 'from langchain.chat_models import ChatOpenAI\n'), ((15319, 15383), 'langchain.agents.ConversationalChatAgent.from_llm_and_tools', 'ConversationalChatAgent.from_llm_and_tools', ([], {'llm': 'llm', 'tools': 'tools'}), '(llm=llm, tools=tools)\n', (15361, 15383), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((15404, 15549), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'chat_agent', 'tools': 'tools', 'memory': 'memory', 'return_intermediate_steps': '(True)', 'handle_parsing_errors': '(True)'}), '(agent=chat_agent, tools=tools, memory=\n memory, return_intermediate_steps=True, handle_parsing_errors=True)\n', (15438, 15549), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((17662, 17740), 'streamlit.warning', 'st.warning', (['"""No data file uploaded or there was an error in loading the data."""'], {}), "('No data file uploaded or there was an error in loading the data.')\n", (17672, 17740), True, 'import streamlit as st\n'), ((18034, 18081), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Clear conversation history"""'], {}), "('Clear conversation history')\n", (18051, 18081), True, 'import streamlit as st\n'), ((18341, 18394), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""What is this data about?"""'}), "(placeholder='What is this data about?')\n", (18354, 18394), True, 'import streamlit as st\n'), ((18404, 18473), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (18436, 18473), True, 'import streamlit as st\n'), ((18701, 18806), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-3.5-turbo-0613"""', 'openai_api_key': 'openai_api_key', 'streaming': '(True)'}), "(temperature=0, model='gpt-3.5-turbo-0613', openai_api_key=\n openai_api_key, streaming=True)\n", (18711, 18806), False, 'from langchain.chat_models import ChatOpenAI\n'), ((18851, 18975), 'langchain.agents.create_pandas_dataframe_agent', 'create_pandas_dataframe_agent', (['llm', 'df'], {'verbose': '(True)', 'agent_type': 'AgentType.OPENAI_FUNCTIONS', 'handle_parsing_errors': '(True)'}), '(llm, df, verbose=True, agent_type=AgentType.\n OPENAI_FUNCTIONS, handle_parsing_errors=True)\n', (18880, 18975), False, 'from langchain.agents import create_pandas_dataframe_agent\n'), ((19833, 19906), 'streamlit.sidebar.warning', 'st.sidebar.warning', (['"""Please enter your Replicate API Token to continue!!"""'], {}), "('Please enter your Replicate API Token to continue!!')\n", (19851, 19906), True, 'import streamlit as st\n'), ((19915, 20045), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)"""'], {}), "(\n 'You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)'\n )\n", (19930, 20045), True, 'import streamlit as st\n'), ((20044, 20053), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (20051, 20053), True, 'import streamlit as st\n'), ((20119, 20221), 'streamlit.info', 'st.info', (['"""This model works best with product images having transparent or plain backgrounds"""'], {}), "(\n 'This model works best with product images having transparent or plain backgrounds'\n )\n", (20126, 20221), True, 'import streamlit as st\n'), ((20272, 20346), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your product image"""'], {'type': "['png', 'jpg', 'jpeg']"}), "('Upload your product image', type=['png', 'jpg', 'jpeg'])\n", (20288, 20346), True, 'import streamlit as st\n'), ((23122, 23195), 'streamlit.sidebar.warning', 'st.sidebar.warning', (['"""Please enter your Replicate API Token to continue!!"""'], {}), "('Please enter your Replicate API Token to continue!!')\n", (23140, 23195), True, 'import streamlit as st\n'), ((23204, 23334), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)"""'], {}), "(\n 'You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)'\n )\n", (23219, 23334), True, 'import streamlit as st\n'), ((23333, 23342), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (23340, 23342), True, 'import streamlit as st\n'), ((23409, 23481), 'streamlit.text_input', 'st.text_input', (['"""Enter prompt"""'], {'help': '"""Write something you can imagine..."""'}), "('Enter prompt', help='Write something you can imagine...')\n", (23422, 23481), True, 'import streamlit as st\n'), ((23543, 23651), 'streamlit.text_input', 'st.text_input', (['"""Enter Negative prompt"""'], {'help': '"""Write what you don\'t want to see in the generated images"""'}), '(\'Enter Negative prompt\', help=\n "Write what you don\'t want to see in the generated images")\n', (23556, 23651), True, 'import streamlit as st\n'), ((23699, 23718), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (23708, 23718), True, 'import streamlit as st\n'), ((5786, 5818), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (5801, 5818), True, 'import streamlit as st\n'), ((5832, 5857), 'streamlit.write', 'st.write', (["message['text']"], {}), "(message['text'])\n", (5840, 5857), True, 'import streamlit as st\n'), ((6455, 6540), 'streamlit.info', 'st.info', (['f"""Please add documents, Note: Scanned documents are not supported yet!"""'], {}), "(f'Please add documents, Note: Scanned documents are not supported yet!'\n )\n", (6462, 6540), True, 'import streamlit as st\n'), ((6548, 6557), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (6555, 6557), True, 'import streamlit as st\n'), ((9862, 9888), 'os.path.basename', 'os.path.basename', (['pdf_path'], {}), '(pdf_path)\n', (9878, 9888), False, 'import os\n'), ((10222, 10288), 'streamlit.sidebar.warning', 'st.sidebar.warning', (['"""Please add your OpenAI API key to continue!!"""'], {}), "('Please add your OpenAI API key to continue!!')\n", (10240, 10288), True, 'import streamlit as st\n'), ((10301, 10359), 'streamlit.warning', 'st.warning', (['"""Please add your OpenAI API key to continue!!"""'], {}), "('Please add your OpenAI API key to continue!!')\n", (10311, 10359), True, 'import streamlit as st\n'), ((10372, 10890), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""To obtain your OpenAI API key, please visit [OpenAI](https://platform.openai.com/account/api-keys). They provide a $5 credit to allow you to experiment with their models. If you\'re unsure about how to get the API key, you can follow this [Tutorial](https://www.maisieai.com/help/how-to-get-an-openai-api-key-for-chatgpt). While obtaining the API key doesn\'t require a compulsory payment, once your allotted credit is exhausted, a payment will be necessary to continue using their services."""'], {}), '(\n "To obtain your OpenAI API key, please visit [OpenAI](https://platform.openai.com/account/api-keys). They provide a $5 credit to allow you to experiment with their models. If you\'re unsure about how to get the API key, you can follow this [Tutorial](https://www.maisieai.com/help/how-to-get-an-openai-api-key-for-chatgpt). While obtaining the API key doesn\'t require a compulsory payment, once your allotted credit is exhausted, a payment will be necessary to continue using their services."\n )\n', (10387, 10890), True, 'import streamlit as st\n'), ((10893, 10902), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (10900, 10902), True, 'import streamlit as st\n'), ((12158, 12194), 'streamlit.warning', 'st.warning', (['"""No documents uploaded!"""'], {}), "('No documents uploaded!')\n", (12168, 12194), True, 'import streamlit as st\n'), ((12207, 12216), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (12214, 12216), True, 'import streamlit as st\n'), ((12603, 12618), 'streamlit.chat_input', 'st.chat_input', ([], {}), '()\n', (12616, 12618), True, 'import streamlit as st\n'), ((14310, 14344), 'streamlit.chat_message', 'st.chat_message', (['avatars[msg.type]'], {}), '(avatars[msg.type])\n', (14325, 14344), True, 'import streamlit as st\n'), ((14735, 14756), 'streamlit.write', 'st.write', (['msg.content'], {}), '(msg.content)\n', (14743, 14756), True, 'import streamlit as st\n'), ((15016, 15070), 'streamlit.info', 'st.info', (['"""Please add your OpenAI API key to continue."""'], {}), "('Please add your OpenAI API key to continue.')\n", (15023, 15070), True, 'import streamlit as st\n'), ((15083, 15092), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (15090, 15092), True, 'import streamlit as st\n'), ((15262, 15296), 'langchain.tools.DuckDuckGoSearchRun', 'DuckDuckGoSearchRun', ([], {'name': '"""Search"""'}), "(name='Search')\n", (15281, 15296), False, 'from langchain.tools import DuckDuckGoSearchRun\n'), ((15630, 15658), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (15645, 15658), True, 'import streamlit as st\n'), ((15819, 15847), 'streamlit.write', 'st.write', (["response['output']"], {}), "(response['output'])\n", (15827, 15847), True, 'import streamlit as st\n'), ((17027, 17070), 'streamlit.error', 'st.error', (['f"""Unsupported file format: {ext}"""'], {}), "(f'Unsupported file format: {ext}')\n", (17035, 17070), True, 'import streamlit as st\n'), ((18609, 18663), 'streamlit.info', 'st.info', (['"""Please add your OpenAI API key to continue."""'], {}), "('Please add your OpenAI API key to continue.')\n", (18616, 18663), True, 'import streamlit as st\n'), ((18676, 18685), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (18683, 18685), True, 'import streamlit as st\n'), ((19056, 19084), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (19071, 19084), True, 'import streamlit as st\n'), ((19275, 19351), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': response}"], {}), "({'role': 'assistant', 'content': response})\n", (19307, 19351), True, 'import streamlit as st\n'), ((19364, 19382), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (19372, 19382), True, 'import streamlit as st\n'), ((20411, 20538), 'streamlit.toggle', 'st.toggle', (['"""Does your product image have a plain or transparent background? If not, let us do the hard work for you!"""'], {}), "(\n 'Does your product image have a plain or transparent background? If not, let us do the hard work for you!'\n )\n", (20420, 20538), True, 'import streamlit as st\n'), ((20550, 20618), 'streamlit.text_input', 'st.text_input', (['"""Enter Prompt"""'], {'help': '"""Enter something you imagine..."""'}), "('Enter Prompt', help='Enter something you imagine...')\n", (20563, 20618), True, 'import streamlit as st\n'), ((20649, 20750), 'streamlit.text_input', 'st.text_input', (['"""Enter Negative Prompt"""'], {'help': '"""Write what you don\'t want in the generated images"""'}), '(\'Enter Negative Prompt\', help=\n "Write what you don\'t want in the generated images")\n', (20662, 20750), True, 'import streamlit as st\n'), ((20767, 20786), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (20776, 20786), True, 'import streamlit as st\n'), ((23760, 23953), 'replicate.run', 'replicate.run', (['"""stability-ai/sdxl:8beff3369e81422112d93b89ca01426147de542cd4684c244b673b105188fe5f"""'], {'input': "{'prompt': prompt, 'negative_prompt': negative_prompt, 'num_outputs': 4}"}), "(\n 'stability-ai/sdxl:8beff3369e81422112d93b89ca01426147de542cd4684c244b673b105188fe5f'\n , input={'prompt': prompt, 'negative_prompt': negative_prompt,\n 'num_outputs': 4})\n", (23773, 23953), False, 'import replicate\n'), ((24092, 24105), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (24102, 24105), True, 'import streamlit as st\n'), ((5182, 5222), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'files'}), '(input_files=files)\n', (5203, 5222), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n'), ((7594, 7623), 'streamlit.expander', 'st.expander', (['"""Uploaded Files"""'], {}), "('Uploaded Files')\n", (7605, 7623), True, 'import streamlit as st\n'), ((7729, 7763), 'streamlit.markdown', 'st.markdown', (['f"""{filepaths_pretty}"""'], {}), "(f'{filepaths_pretty}')\n", (7740, 7763), True, 'import streamlit as st\n'), ((8200, 8226), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (8216, 8226), False, 'import os\n'), ((11305, 11337), 'streamlit.expander', 'st.expander', (['"""Advanced Settings"""'], {}), "('Advanced Settings')\n", (11316, 11337), True, 'import streamlit as st\n'), ((11389, 11530), 'streamlit.number_input', 'st.number_input', (['"""Enter Temperature"""'], {'help': '"""It determines how creative the model should be"""', 'min_value': '(0.0)', 'max_value': '(1.0)', 'value': '(0.1)'}), "('Enter Temperature', help=\n 'It determines how creative the model should be', min_value=0.0,\n max_value=1.0, value=0.1)\n", (11404, 11530), True, 'import streamlit as st\n'), ((12770, 12798), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (12785, 12798), True, 'import streamlit as st\n'), ((14886, 14909), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (14901, 14909), True, 'import streamlit as st\n'), ((15705, 15719), 'streamlit.container', 'st.container', ([], {}), '()\n', (15717, 15719), True, 'import streamlit as st\n'), ((18272, 18300), 'streamlit.chat_message', 'st.chat_message', (["msg['role']"], {}), "(msg['role'])\n", (18287, 18300), True, 'import streamlit as st\n'), ((18482, 18505), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (18497, 18505), True, 'import streamlit as st\n'), ((19131, 19145), 'streamlit.container', 'st.container', ([], {}), '()\n', (19143, 19145), True, 'import streamlit as st\n'), ((22251, 22429), 'replicate.run', 'replicate.run', (['"""logerzhu/ad-inpaint:b1c17d148455c1fda435ababe9ab1e03bc0d917cc3cf4251916f22c45c83c7df"""'], {'input': "{'image_path': bytes_obj, 'prompt': prompt, 'image_num': 4}"}), "(\n 'logerzhu/ad-inpaint:b1c17d148455c1fda435ababe9ab1e03bc0d917cc3cf4251916f22c45c83c7df'\n , input={'image_path': bytes_obj, 'prompt': prompt, 'image_num': 4})\n", (22264, 22429), False, 'import replicate\n'), ((22507, 22520), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (22517, 22520), True, 'import streamlit as st\n'), ((24145, 24164), 'streamlit.image', 'st.image', (['output[0]'], {}), '(output[0])\n', (24153, 24164), True, 'import streamlit as st\n'), ((24181, 24200), 'streamlit.image', 'st.image', (['output[2]'], {}), '(output[2])\n', (24189, 24200), True, 'import streamlit as st\n'), ((24240, 24259), 'streamlit.image', 'st.image', (['output[1]'], {}), '(output[1])\n', (24248, 24259), True, 'import streamlit as st\n'), ((24276, 24295), 'streamlit.image', 'st.image', (['output[3]'], {}), '(output[3])\n', (24284, 24295), True, 'import streamlit as st\n'), ((12899, 12909), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (12907, 12909), True, 'import streamlit as st\n'), ((14569, 14641), 'streamlit.status', 'st.status', (['f"""**{step[0].tool}**: {step[0].tool_input}"""'], {'state': '"""complete"""'}), "(f'**{step[0].tool}**: {step[0].tool_input}', state='complete')\n", (14578, 14641), True, 'import streamlit as st\n'), ((14663, 14684), 'streamlit.write', 'st.write', (['step[0].log'], {}), '(step[0].log)\n', (14671, 14684), True, 'import streamlit as st\n'), ((14705, 14722), 'streamlit.write', 'st.write', (['step[1]'], {}), '(step[1])\n', (14713, 14722), True, 'import streamlit as st\n'), ((20968, 20983), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (20978, 20983), False, 'from PIL import Image\n'), ((21016, 21028), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (21026, 21028), False, 'import io\n'), ((21326, 21530), 'requests.post', 'requests.post', (['"""https://clipdrop-api.co/remove-background/v1"""'], {'files': "{'image_file': ('uploaded_image.jpg', image_file_object, 'image/jpeg')}", 'headers': "{'x-api-key': st.secrets['clipdrop_api_key']}"}), "('https://clipdrop-api.co/remove-background/v1', files={\n 'image_file': ('uploaded_image.jpg', image_file_object, 'image/jpeg')},\n headers={'x-api-key': st.secrets['clipdrop_api_key']})\n", (21339, 21530), False, 'import requests\n'), ((22568, 22587), 'streamlit.image', 'st.image', (['output[1]'], {}), '(output[1])\n', (22576, 22587), True, 'import streamlit as st\n'), ((22608, 22627), 'streamlit.image', 'st.image', (['output[2]'], {}), '(output[2])\n', (22616, 22627), True, 'import streamlit as st\n'), ((22675, 22694), 'streamlit.image', 'st.image', (['output[3]'], {}), '(output[3])\n', (22683, 22694), True, 'import streamlit as st\n'), ((22715, 22734), 'streamlit.image', 'st.image', (['output[4]'], {}), '(output[4])\n', (22723, 22734), True, 'import streamlit as st\n'), ((9163, 9184), 'os.path.basename', 'os.path.basename', (['doc'], {}), '(doc)\n', (9179, 9184), False, 'import os\n'), ((21868, 21880), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (21878, 21880), False, 'import io\n'), ((22078, 22129), 'streamlit.error', 'st.error', (['"""Failed to remove background. Try again."""'], {}), "('Failed to remove background. Try again.')\n", (22086, 22129), True, 'import streamlit as st\n'), ((22154, 22163), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (22161, 22163), True, 'import streamlit as st\n'), ((16802, 16838), 'os.path.splitext', 'os.path.splitext', (['uploaded_file.name'], {}), '(uploaded_file.name)\n', (16818, 16838), False, 'import os\n'), ((21809, 21830), 'io.BytesIO', 'io.BytesIO', (['r.content'], {}), '(r.content)\n', (21819, 21830), False, 'import io\n')] |
from time import monotonic
from rich.console import Console
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms import OpenAI
class Experiment:
"""
A class representing an experiment.
Attributes:
params (dict): A dictionary containing experiment parameters.
index (int): An integer representing the index of the experiment.
documents (list): A list of documents to be used in the experiment.
vectorstore_list (list): A list of vector stores used in the experiment.
retrievers_list (list): A list of retrievers used in the experiment.
chain (object): An object representing the chain used in the experiment.
query_results (dict): A dictionary containing the results of the queries.
run_time (float): A float representing the time it took to run the experiment.
embedding_time (float): A float representing the time it took to embed the documents.
Methods:
run(eval_queries): Runs the experiment with the given evaluation queries.
_evaluate(chain, eval_queries): Evaluates the experiment with the given chain and evaluation queries.
"""
def __init__(
self,
params,
index=0,
documents=None,
vectorstore_list=None,
retrievers_list=None,
chain=None,
):
"""
Initializes an Experiment object.
Args:
params (dict): A dictionary containing experiment parameters.
index (int): An integer representing the index of the experiment.
documents (list): A list of documents to be used in the experiment.
vectorstore_list (list): A list of vector stores used in the experiment.
retrievers_list (list): A list of retrievers used in the experiment.
chain (object): An object representing the chain used in the experiment.
"""
self.params = params
self.index = index
self.console = Console()
self.documents = documents
self.vectorstore_list = vectorstore_list
self.retrievers_list = retrievers_list
self.chain = chain
self.query_results = {}
# time variables
self.run_time = 0
self.embedding_time = 0
def run(self, eval_queries):
"""
Runs the experiment with the given evaluation queries.
Args:
eval_queries (list): A list of evaluation queries.
Returns:
A tuple containing the query results, the run time, and the embedding time.
"""
self.console.log(f"Experiment {self.index} started")
self.start_time = monotonic()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=self.params["chunk_size"], chunk_overlap=0
)
texts = text_splitter.split_documents(self.documents)
self.pre_embedding = monotonic()
db = self.params["vector_store"].from_documents(
texts, self.params["embeddings"]
)
self.post_embedding = monotonic()
self.vectorstore_list.append(db)
retriever = db.as_retriever(search_kwargs={"k": 2})
self.retrievers_list.append(retriever)
qa = self.chain.from_chain_type(
llm=OpenAI(), chain_type="stuff", retriever=retriever
)
self.pre_queries = monotonic()
self._evaluate(qa, eval_queries)
return self.query_results, self.run_time, self.embedding_time
def _evaluate(self, chain, eval_queries):
"""
Evaluates the experiment with the given chain and evaluation queries.
Args:
chain (object): An object representing the chain used in the experiment.
eval_queries (list): A list of evaluation queries.
"""
self.console.log(f"Evaluating experiment {self.index}")
for q in eval_queries:
res = chain.run(q)
self.query_results[q] = res
post_queries = monotonic()
self.console.log(f"Finished experiment {self.index}")
self.run_time = post_queries - self.start_time
self.embedding_time = self.post_embedding - self.pre_embedding
| [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.OpenAI"
] | [((1970, 1979), 'rich.console.Console', 'Console', ([], {}), '()\n', (1977, 1979), False, 'from rich.console import Console\n'), ((2643, 2654), 'time.monotonic', 'monotonic', ([], {}), '()\n', (2652, 2654), False, 'from time import monotonic\n'), ((2680, 2769), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': "self.params['chunk_size']", 'chunk_overlap': '(0)'}), "(chunk_size=self.params['chunk_size'],\n chunk_overlap=0)\n", (2710, 2769), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2880, 2891), 'time.monotonic', 'monotonic', ([], {}), '()\n', (2889, 2891), False, 'from time import monotonic\n'), ((3034, 3045), 'time.monotonic', 'monotonic', ([], {}), '()\n', (3043, 3045), False, 'from time import monotonic\n'), ((3341, 3352), 'time.monotonic', 'monotonic', ([], {}), '()\n', (3350, 3352), False, 'from time import monotonic\n'), ((3964, 3975), 'time.monotonic', 'monotonic', ([], {}), '()\n', (3973, 3975), False, 'from time import monotonic\n'), ((3253, 3261), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (3259, 3261), False, 'from langchain.llms import OpenAI\n')] |
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import re
import torch
import gradio as gr
from clc.langchain_application import LangChainApplication, torch_gc
from transformers import StoppingCriteriaList, StoppingCriteriaList
from clc.callbacks import Iteratorize, Stream
from clc.matching import key_words_match_intention, key_words_match_knowledge
from langchain.schema import Document
# 调试使用
# os.chdir("../../../")
class LangChainCFG:
llm_model_name = 'luwen_baichuan/output/zju_model_0813_100k' # 本地模型文件 or huggingface远程仓库
embedding_model_name = 'app/langchain_demo/model/text2vec' # 检索模型文件 or huggingface远程仓库
vector_store_path = 'app/langchain_demo/data/cache/legal_articles'
kg_vector_stores = {
'法律法条': 'app/langchain_demo/data/cache/legal_articles',
'法律书籍': 'app/langchain_demo/data/cache/legal_books',
'法律文书模板':'app/langchain_demo/data/cache/legal_templates',
'法律案例': 'app/langchain_demo/data/cache/legal_cases',
'法律考试': 'app/langchain_demo/data/cache/judicialExamination',
'日常法律问答': 'app/langchain_demo/data/cache/legal_QA',
}
config = LangChainCFG()
application = LangChainApplication(config)
def clear_session():
return '', None, ""
def predict(input,
kg_names=None,
history=None,
intention_reg=None,
**kwargs):
max_length=1024
top_k = 1
application.llm_service.max_token = max_length
# print(input)
if history == None:
history = []
search_text = ''
now_input = input
eos_token_ids = [application.llm_service.tokenizer.eos_token_id]
application.llm_service.history = history[-5:]
max_memory = 4096 - max_length
if intention_reg==["意图识别"]:
auto_kg_names = key_words_match_intention(input)
if len(auto_kg_names)==0:
search_text += "意图识别没有匹配到知识库。\n\n"
else:
match_kg_names = "、".join(list(auto_kg_names))
search_text += "意图识别匹配到知识库是:"+match_kg_names+"。\n\n"
kg_names = list(set(kg_names) | auto_kg_names)
kb_based = True if len(kg_names) != 0 else False
if len(history) != 0:
input = "".join(["</s>Human:" + i[0] + " </s>Assistant: " + i[1] for i in application.llm_service.history]) + \
"</s>Human:" + input
input = input[len("</s>Human:"):]
if len(input) > max_memory:
input = input[-max_memory:]
if kb_based:
related_docs_with_score_seq = []
for kg_name in kg_names:
if kg_name=="法律法条":
related_article = key_words_match_knowledge(application.all_articles, application.choices, now_input)
if related_article:
kg_matches = [(Document(page_content=related_article[0], metadata={"value": related_article[1]}),0)]
else:
application.source_service.load_vector_store(application.config.kg_vector_stores[kg_name])
kg_matches = application.source_service.vector_store.similarity_search_with_score(input, k=top_k)
else:
application.source_service.load_vector_store(application.config.kg_vector_stores[kg_name])
kg_matches = application.source_service.vector_store.similarity_search_with_score(input, k=top_k)
related_docs_with_score_seq.append(kg_matches)
related_docs_with_score = related_docs_with_score_seq
if len(related_docs_with_score) > 0:
input, context_with_score = application.generate_prompt(related_docs_with_score, input,kg_names)
search_text += context_with_score
torch_gc()
print("histroy in call: ", history)
prompt = f'</s>Human:{input} </s>Assistant: '
print("prompt: ",prompt)
inputs = application.llm_service.tokenizer(prompt, return_tensors="pt").to('cuda')
stopping_criteria = StoppingCriteriaList()
kwargs['inputs'] = inputs
kwargs['max_new_tokens'] = max_length
kwargs['repetition_penalty'] = float(1.2)
kwargs['stopping_criteria'] = stopping_criteria
history.append((now_input, ""))
def generate_with_callback(callback=None, **kwargs):
kwargs['stopping_criteria'].append(Stream(callback_func=callback))
with torch.no_grad():
application.llm_service.model.generate(**kwargs['inputs'],
max_new_tokens=kwargs['max_new_tokens'],
repetition_penalty=kwargs['repetition_penalty'],
stopping_criteria=kwargs["stopping_criteria"])
def generate_with_streaming(**kwargs):
return Iteratorize(generate_with_callback, kwargs, callback=None)
with generate_with_streaming(**kwargs) as generator:
for output in generator:
last = output[-1]
output = application.llm_service.tokenizer.decode(output, skip_special_tokens=True)
pattern = r"\n{5,}$"
pattern2 = r"\s{5,}$"
origin_output = output
output = output.split("Assistant:")[-1].strip()
history[-1] = (now_input, output)
yield "", history, history, search_text
if last in eos_token_ids or re.search(pattern, origin_output) or re.search(pattern2, origin_output):
break
with gr.Blocks() as demo:
state = gr.State()
with gr.Row():
with gr.Column(scale=1.5):
github_banner_path = 'https://raw.githubusercontent.com/LIANG-star177/chatgptapi/master/logo.png'
gr.HTML(f'<p align="center"><a href="https://github.com/LIANG-star177/chatgptapi/blob/master/logo.png"><img src={github_banner_path} height="100" width="200"/></a></p>')
with gr.Row():
intention_reg = gr.CheckboxGroup(["意图识别"],
label="自动选择知识库",
value=None,
interactive=True)
with gr.Row():
kg_names = gr.CheckboxGroup(list(config.kg_vector_stores.keys()),
label="手动选择知识库",
value=None,
interactive=True).style(height=200)
with gr.Row():
search = gr.Textbox(label='知识库检索结果')
with gr.Row():
gr.Markdown("""Powered by 浙江大学 阿里巴巴达摩院 华院计算 魔搭社区""")
with gr.Row():
gr.Markdown("""免责声明:本模型仅供学术研究之目的而提供,不保证结果的准确性、完整性或适用性。在使用模型生成的内容时,您应自行判断其适用性,并自担风险。""")
with gr.Column(scale=4):
with gr.Row():
chatbot = gr.Chatbot(label='智海-录问').style(height=500)
with gr.Row():
message = gr.Textbox(label='请输入问题')
with gr.Row():
clear_history = gr.Button("🧹 清除历史对话")
send = gr.Button("🚀 发送")
send.click(predict,
inputs=[
message,
kg_names,
state,
intention_reg,
],
outputs=[message, chatbot, state, search],
show_progress=True)
clear_history.click(fn=clear_session,
inputs=[],
outputs=[chatbot, state, search],
queue=False)
message.submit(predict,
inputs=[
message,
kg_names,
state,
intention_reg,
],
outputs=[message, chatbot, state, search],
show_progress=True)
demo.queue(concurrency_count=2).launch(
server_name='0.0.0.0',
server_port=7888,
share=True,
enable_queue=True,
inbrowser=True,
)
| [
"langchain.schema.Document"
] | [((1155, 1183), 'clc.langchain_application.LangChainApplication', 'LangChainApplication', (['config'], {}), '(config)\n', (1175, 1183), False, 'from clc.langchain_application import LangChainApplication, torch_gc\n'), ((3620, 3630), 'clc.langchain_application.torch_gc', 'torch_gc', ([], {}), '()\n', (3628, 3630), False, 'from clc.langchain_application import LangChainApplication, torch_gc\n'), ((3863, 3885), 'transformers.StoppingCriteriaList', 'StoppingCriteriaList', ([], {}), '()\n', (3883, 3885), False, 'from transformers import StoppingCriteriaList, StoppingCriteriaList\n'), ((5358, 5369), 'gradio.Blocks', 'gr.Blocks', ([], {}), '()\n', (5367, 5369), True, 'import gradio as gr\n'), ((5392, 5402), 'gradio.State', 'gr.State', ([], {}), '()\n', (5400, 5402), True, 'import gradio as gr\n'), ((1763, 1795), 'clc.matching.key_words_match_intention', 'key_words_match_intention', (['input'], {}), '(input)\n', (1788, 1795), False, 'from clc.matching import key_words_match_intention, key_words_match_knowledge\n'), ((4681, 4739), 'clc.callbacks.Iteratorize', 'Iteratorize', (['generate_with_callback', 'kwargs'], {'callback': 'None'}), '(generate_with_callback, kwargs, callback=None)\n', (4692, 4739), False, 'from clc.callbacks import Iteratorize, Stream\n'), ((5412, 5420), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5418, 5420), True, 'import gradio as gr\n'), ((4194, 4224), 'clc.callbacks.Stream', 'Stream', ([], {'callback_func': 'callback'}), '(callback_func=callback)\n', (4200, 4224), False, 'from clc.callbacks import Iteratorize, Stream\n'), ((4239, 4254), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4252, 4254), False, 'import torch\n'), ((5435, 5455), 'gradio.Column', 'gr.Column', ([], {'scale': '(1.5)'}), '(scale=1.5)\n', (5444, 5455), True, 'import gradio as gr\n'), ((5579, 5758), 'gradio.HTML', 'gr.HTML', (['f"""<p align="center"><a href="https://github.com/LIANG-star177/chatgptapi/blob/master/logo.png"><img src={github_banner_path} height="100" width="200"/></a></p>"""'], {}), '(\n f\'<p align="center"><a href="https://github.com/LIANG-star177/chatgptapi/blob/master/logo.png"><img src={github_banner_path} height="100" width="200"/></a></p>\'\n )\n', (5586, 5758), True, 'import gradio as gr\n'), ((6552, 6570), 'gradio.Column', 'gr.Column', ([], {'scale': '(4)'}), '(scale=4)\n', (6561, 6570), True, 'import gradio as gr\n'), ((2568, 2655), 'clc.matching.key_words_match_knowledge', 'key_words_match_knowledge', (['application.all_articles', 'application.choices', 'now_input'], {}), '(application.all_articles, application.choices,\n now_input)\n', (2593, 2655), False, 'from clc.matching import key_words_match_intention, key_words_match_knowledge\n'), ((5257, 5290), 're.search', 're.search', (['pattern', 'origin_output'], {}), '(pattern, origin_output)\n', (5266, 5290), False, 'import re\n'), ((5294, 5328), 're.search', 're.search', (['pattern2', 'origin_output'], {}), '(pattern2, origin_output)\n', (5303, 5328), False, 'import re\n'), ((5766, 5774), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5772, 5774), True, 'import gradio as gr\n'), ((5815, 5888), 'gradio.CheckboxGroup', 'gr.CheckboxGroup', (["['意图识别']"], {'label': '"""自动选择知识库"""', 'value': 'None', 'interactive': '(True)'}), "(['意图识别'], label='自动选择知识库', value=None, interactive=True)\n", (5831, 5888), True, 'import gradio as gr\n'), ((5978, 5986), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5984, 5986), True, 'import gradio as gr\n'), ((6248, 6256), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6254, 6256), True, 'import gradio as gr\n'), ((6283, 6310), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""知识库检索结果"""'}), "(label='知识库检索结果')\n", (6293, 6310), True, 'import gradio as gr\n'), ((6329, 6337), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6335, 6337), True, 'import gradio as gr\n'), ((6355, 6403), 'gradio.Markdown', 'gr.Markdown', (['"""Powered by 浙江大学 阿里巴巴达摩院 华院计算 魔搭社区"""'], {}), "('Powered by 浙江大学 阿里巴巴达摩院 华院计算 魔搭社区')\n", (6366, 6403), True, 'import gradio as gr\n'), ((6425, 6433), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6431, 6433), True, 'import gradio as gr\n'), ((6451, 6539), 'gradio.Markdown', 'gr.Markdown', (['"""免责声明:本模型仅供学术研究之目的而提供,不保证结果的准确性、完整性或适用性。在使用模型生成的内容时,您应自行判断其适用性,并自担风险。"""'], {}), "(\n '免责声明:本模型仅供学术研究之目的而提供,不保证结果的准确性、完整性或适用性。在使用模型生成的内容时,您应自行判断其适用性,并自担风险。')\n", (6462, 6539), True, 'import gradio as gr\n'), ((6589, 6597), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6595, 6597), True, 'import gradio as gr\n'), ((6698, 6706), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6704, 6706), True, 'import gradio as gr\n'), ((6734, 6759), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""请输入问题"""'}), "(label='请输入问题')\n", (6744, 6759), True, 'import gradio as gr\n'), ((6789, 6797), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6795, 6797), True, 'import gradio as gr\n'), ((6831, 6852), 'gradio.Button', 'gr.Button', (['"""🧹 清除历史对话"""'], {}), "('🧹 清除历史对话')\n", (6840, 6852), True, 'import gradio as gr\n'), ((6876, 6893), 'gradio.Button', 'gr.Button', (['"""🚀 发送"""'], {}), "('🚀 发送')\n", (6885, 6893), True, 'import gradio as gr\n'), ((6625, 6650), 'gradio.Chatbot', 'gr.Chatbot', ([], {'label': '"""智海-录问"""'}), "(label='智海-录问')\n", (6635, 6650), True, 'import gradio as gr\n'), ((2723, 2808), 'langchain.schema.Document', 'Document', ([], {'page_content': 'related_article[0]', 'metadata': "{'value': related_article[1]}"}), "(page_content=related_article[0], metadata={'value':\n related_article[1]})\n", (2731, 2808), False, 'from langchain.schema import Document\n')] |
"""This script is used to initialize the Qdrant db backend with Azure OpenAI."""
import os
from typing import Any, List, Optional, Tuple
import openai
from dotenv import load_dotenv
from langchain.docstore.document import Document
from langchain.text_splitter import NLTKTextSplitter
from langchain_community.document_loaders import DirectoryLoader, PyPDFium2Loader
from langchain_community.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings
from langchain_community.vectorstores import Qdrant
from loguru import logger
from omegaconf import DictConfig
from ultra_simple_config import load_config
from agent.utils.utility import generate_prompt
from agent.utils.vdb import init_vdb
load_dotenv()
@load_config(location="config/db.yml")
def get_db_connection(open_ai_token: str, cfg: DictConfig, collection_name: str) -> Qdrant:
"""Initializes a connection to the Qdrant DB.
Args:
open_ai_token (str): The openai token.
cfg (DictConfig): the config file.
collection_name (str): The name of the vector database collection.
Returns:
Qdrant: An Langchain Instance of the Qdrant DB.
"""
if cfg.openai.azure:
embedding = AzureOpenAIEmbeddings(deployment=cfg.openai.deployment, openai_api_version="2023-05-15", openai_api_key=open_ai_token) # type: ignore
else:
embedding = OpenAIEmbeddings(model=cfg.openai.deployment, openai_api_key=open_ai_token)
if collection_name is None or not collection_name:
collection_name = cfg.qdrant.collection_name_openai
return init_vdb(cfg, collection_name, embedding)
def embedd_documents_openai(dir: str, open_ai_token: str, collection_name: Optional[str] = None) -> None:
"""embedd_documents embedds the documents in the given directory.
:param cfg: Configuration from the file
:type cfg: DictConfig
:param dir: PDF Directory
:type dir: str
:param open_ai_token: OpenAI API Token
:type open_ai_token: str
"""
vector_db: Qdrant = get_db_connection(open_ai_token=open_ai_token, collection_name=collection_name)
splitter = NLTKTextSplitter(chunk_size=500, chunk_overlap=100)
loader = DirectoryLoader(dir, glob="*.pdf", loader_cls=PyPDFium2Loader)
docs = loader.load_and_split(splitter)
logger.info(f"Loaded {len(docs)} documents.")
texts = [doc.page_content for doc in docs]
metadatas = [doc.metadata for doc in docs]
vector_db.add_texts(texts=texts, metadatas=metadatas)
logger.info("SUCCESS: Texts embedded.")
def search_documents_openai(open_ai_token: str, query: str, amount: int, threshold: float = 0.0, collection_name: Optional[str] = None) -> List[Tuple[Document, float]]:
"""Searches the documents in the Qdrant DB with a specific query.
Args:
open_ai_token (str): The OpenAI API token.
query (str): The question for which documents should be searched.
Returns:
List[Tuple[Document, float]]: A list of search results, where each result is a tuple
containing a Document object and a float score.
"""
vector_db = get_db_connection(open_ai_token=open_ai_token, collection_name=collection_name)
docs = vector_db.similarity_search_with_score(query, k=amount, score_threshold=threshold)
logger.info("SUCCESS: Documents found.")
return docs
@load_config(location="config/ai/openai.yml")
def summarize_text_openai(text: str, token: str, cfg: DictConfig) -> str:
"""Summarizes the given text using the Luminous API.
Args:
text (str): The text to be summarized.
token (str): The token for the Luminous API.
Returns:
str: The summary of the text.
"""
prompt = generate_prompt(prompt_name="openai-summarization.j2", text=text, language="de")
openai.api_key = token
response = openai.Completion.create(
engine=cfg.openai.model,
prompt=prompt,
temperature=cfg.openai.temperature,
max_tokens=cfg.openai.max_tokens,
top_p=cfg.openai.top_p,
frequency_penalty=cfg.openai.frequency_penalty,
presence_penalty=cfg.openai.presence_penalty,
best_of=cfg.openai.best_of,
stop=cfg.openai.stop,
)
return response.choices[0].text
@load_config(location="config/ai/openai.yml")
def send_completion(text: str, query: str, token: str, cfg: DictConfig) -> str:
"""Sent completion request to OpenAI API.
Args:
text (str): The text on which the completion should be based.
query (str): The query for the completion.
token (str): The token for the OpenAI API.
cfg (DictConfig):
Returns:
str: Response from the OpenAI API.
"""
prompt = generate_prompt(prompt_name="openai-summarization.j2", text=text, query=query, language="de")
openai.api_key = token
response = openai.Completion.create(
engine=cfg.openai.model,
prompt=prompt,
temperature=cfg.openai.temperature,
max_tokens=cfg.openai.max_tokens,
top_p=cfg.openai.top_p,
frequency_penalty=cfg.openai.frequency_penalty,
presence_penalty=cfg.openai.presence_penalty,
best_of=cfg.openai.best_of,
stop=cfg.openai.stop,
)
return response.choices[0].text
def send_custom_completion_openai(
token: str,
prompt: str,
model: str = "gpt3.5",
max_tokens: int = 256,
stop_sequences: List[str] = ["###"],
temperature: float = 0,
) -> str:
"""Sent completion request to OpenAI API.
Args:
text (str): The text on which the completion should be based.
query (str): The query for the completion.
token (str): The token for the OpenAI API.
cfg (DictConfig):
Returns:
str: Response from the OpenAI API.
"""
openai.api_key = token
response = openai.Completion.create(
engine=model,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens,
stop_sequences=stop_sequences,
)
return response.choices[0].text
def qa_openai(token: str, documents: list[tuple[Document, float]], query: str, summarization: bool = False) -> tuple[Any, str, dict[Any, Any]]:
"""QA Function for OpenAI LLMs.
Args:
token (str): The token for the OpenAI API.
documents (list[tuple[Document, float]]): The documents to be searched.
query (str): The question for which the LLM should generate an answer.
summarization (bool, optional): If the Documents should be summarized. Defaults to False.
Returns:
tuple: answer, prompt, meta_data
"""
# if the list of documents contains only one document extract the text directly
if len(documents) == 1:
text = documents[0][0].page_content
meta_data = documents[0][0].metadata
else:
# extract the text from the documents
texts = [doc[0].page_content for doc in documents]
if summarization:
# call summarization
text = ""
for t in texts:
text += summarize_text_openai(text=t, token=token)
else:
# combine the texts to one text
text = " ".join(texts)
meta_data = [doc[0].metadata for doc in documents]
# load the prompt
prompt = generate_prompt("aleph_alpha_qa.j2", text=text, query=query)
try:
# call the luminous api
answer = send_completion(prompt, token)
except ValueError as e:
# if the code is PROMPT_TOO_LONG, split it into chunks
if e.args[0] == "PROMPT_TOO_LONG":
logger.info("Prompt too long. Summarizing.")
# summarize the text
short_text = summarize_text_openai(text, token)
# generate the prompt
prompt = generate_prompt("openai-qa.j2", text=short_text, query=query)
# call the luminous api
answer = send_completion(prompt, token)
# extract the answer
return answer, prompt, meta_data
if __name__ == "__main__":
token = os.getenv("OPENAI_API_KEY")
if not token:
raise ValueError("OPENAI_API_KEY is not set.")
embedd_documents_openai(dir="data", open_ai_token=token)
DOCS = search_documents_openai(open_ai_token="", query="Was ist Vanille?", amount=3)
print(f"DOCUMENTS: {DOCS}")
summary = summarize_text_openai(text="Below is an extract from the annual financial report of a company. ", token=token)
print(f"SUMMARY: {summary}")
| [
"langchain_community.embeddings.OpenAIEmbeddings",
"langchain_community.embeddings.AzureOpenAIEmbeddings",
"langchain.text_splitter.NLTKTextSplitter",
"langchain_community.document_loaders.DirectoryLoader"
] | [((692, 705), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (703, 705), False, 'from dotenv import load_dotenv\n'), ((709, 746), 'ultra_simple_config.load_config', 'load_config', ([], {'location': '"""config/db.yml"""'}), "(location='config/db.yml')\n", (720, 746), False, 'from ultra_simple_config import load_config\n'), ((3319, 3363), 'ultra_simple_config.load_config', 'load_config', ([], {'location': '"""config/ai/openai.yml"""'}), "(location='config/ai/openai.yml')\n", (3330, 3363), False, 'from ultra_simple_config import load_config\n'), ((4225, 4269), 'ultra_simple_config.load_config', 'load_config', ([], {'location': '"""config/ai/openai.yml"""'}), "(location='config/ai/openai.yml')\n", (4236, 4269), False, 'from ultra_simple_config import load_config\n'), ((1557, 1598), 'agent.utils.vdb.init_vdb', 'init_vdb', (['cfg', 'collection_name', 'embedding'], {}), '(cfg, collection_name, embedding)\n', (1565, 1598), False, 'from agent.utils.vdb import init_vdb\n'), ((2097, 2148), 'langchain.text_splitter.NLTKTextSplitter', 'NLTKTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(100)'}), '(chunk_size=500, chunk_overlap=100)\n', (2113, 2148), False, 'from langchain.text_splitter import NLTKTextSplitter\n'), ((2163, 2225), 'langchain_community.document_loaders.DirectoryLoader', 'DirectoryLoader', (['dir'], {'glob': '"""*.pdf"""', 'loader_cls': 'PyPDFium2Loader'}), "(dir, glob='*.pdf', loader_cls=PyPDFium2Loader)\n", (2178, 2225), False, 'from langchain_community.document_loaders import DirectoryLoader, PyPDFium2Loader\n'), ((2476, 2515), 'loguru.logger.info', 'logger.info', (['"""SUCCESS: Texts embedded."""'], {}), "('SUCCESS: Texts embedded.')\n", (2487, 2515), False, 'from loguru import logger\n'), ((3259, 3299), 'loguru.logger.info', 'logger.info', (['"""SUCCESS: Documents found."""'], {}), "('SUCCESS: Documents found.')\n", (3270, 3299), False, 'from loguru import logger\n'), ((3679, 3764), 'agent.utils.utility.generate_prompt', 'generate_prompt', ([], {'prompt_name': '"""openai-summarization.j2"""', 'text': 'text', 'language': '"""de"""'}), "(prompt_name='openai-summarization.j2', text=text, language='de'\n )\n", (3694, 3764), False, 'from agent.utils.utility import generate_prompt\n'), ((3803, 4122), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': 'cfg.openai.model', 'prompt': 'prompt', 'temperature': 'cfg.openai.temperature', 'max_tokens': 'cfg.openai.max_tokens', 'top_p': 'cfg.openai.top_p', 'frequency_penalty': 'cfg.openai.frequency_penalty', 'presence_penalty': 'cfg.openai.presence_penalty', 'best_of': 'cfg.openai.best_of', 'stop': 'cfg.openai.stop'}), '(engine=cfg.openai.model, prompt=prompt,\n temperature=cfg.openai.temperature, max_tokens=cfg.openai.max_tokens,\n top_p=cfg.openai.top_p, frequency_penalty=cfg.openai.frequency_penalty,\n presence_penalty=cfg.openai.presence_penalty, best_of=cfg.openai.\n best_of, stop=cfg.openai.stop)\n', (3827, 4122), False, 'import openai\n'), ((4683, 4781), 'agent.utils.utility.generate_prompt', 'generate_prompt', ([], {'prompt_name': '"""openai-summarization.j2"""', 'text': 'text', 'query': 'query', 'language': '"""de"""'}), "(prompt_name='openai-summarization.j2', text=text, query=\n query, language='de')\n", (4698, 4781), False, 'from agent.utils.utility import generate_prompt\n'), ((4820, 5139), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': 'cfg.openai.model', 'prompt': 'prompt', 'temperature': 'cfg.openai.temperature', 'max_tokens': 'cfg.openai.max_tokens', 'top_p': 'cfg.openai.top_p', 'frequency_penalty': 'cfg.openai.frequency_penalty', 'presence_penalty': 'cfg.openai.presence_penalty', 'best_of': 'cfg.openai.best_of', 'stop': 'cfg.openai.stop'}), '(engine=cfg.openai.model, prompt=prompt,\n temperature=cfg.openai.temperature, max_tokens=cfg.openai.max_tokens,\n top_p=cfg.openai.top_p, frequency_penalty=cfg.openai.frequency_penalty,\n presence_penalty=cfg.openai.presence_penalty, best_of=cfg.openai.\n best_of, stop=cfg.openai.stop)\n', (4844, 5139), False, 'import openai\n'), ((5804, 5941), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': 'model', 'prompt': 'prompt', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'stop_sequences': 'stop_sequences'}), '(engine=model, prompt=prompt, temperature=\n temperature, max_tokens=max_tokens, stop_sequences=stop_sequences)\n', (5828, 5941), False, 'import openai\n'), ((7267, 7327), 'agent.utils.utility.generate_prompt', 'generate_prompt', (['"""aleph_alpha_qa.j2"""'], {'text': 'text', 'query': 'query'}), "('aleph_alpha_qa.j2', text=text, query=query)\n", (7282, 7327), False, 'from agent.utils.utility import generate_prompt\n'), ((8017, 8044), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (8026, 8044), False, 'import os\n'), ((1188, 1311), 'langchain_community.embeddings.AzureOpenAIEmbeddings', 'AzureOpenAIEmbeddings', ([], {'deployment': 'cfg.openai.deployment', 'openai_api_version': '"""2023-05-15"""', 'openai_api_key': 'open_ai_token'}), "(deployment=cfg.openai.deployment, openai_api_version=\n '2023-05-15', openai_api_key=open_ai_token)\n", (1209, 1311), False, 'from langchain_community.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings\n'), ((1353, 1428), 'langchain_community.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': 'cfg.openai.deployment', 'openai_api_key': 'open_ai_token'}), '(model=cfg.openai.deployment, openai_api_key=open_ai_token)\n', (1369, 1428), False, 'from langchain_community.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings\n'), ((7566, 7610), 'loguru.logger.info', 'logger.info', (['"""Prompt too long. Summarizing."""'], {}), "('Prompt too long. Summarizing.')\n", (7577, 7610), False, 'from loguru import logger\n'), ((7761, 7822), 'agent.utils.utility.generate_prompt', 'generate_prompt', (['"""openai-qa.j2"""'], {'text': 'short_text', 'query': 'query'}), "('openai-qa.j2', text=short_text, query=query)\n", (7776, 7822), False, 'from agent.utils.utility import generate_prompt\n')] |
import sys
from langchain.chains.summarize import load_summarize_chain
from langchain import OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter()
# get transcript file key from args
file_key = sys.argv[1]
# get transcript text
text = open(file_key, "r").read()
llm = OpenAI(temperature=0)
texts = text_splitter.split_text(text)
from langchain.docstore.document import Document
docs = [Document(page_content=t) for t in texts]
chain = load_summarize_chain(llm, chain_type="map_reduce")
output = chain.run(docs)
print(output)
| [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.docstore.document.Document",
"langchain.chains.summarize.load_summarize_chain",
"langchain.OpenAI"
] | [((186, 218), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {}), '()\n', (216, 218), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((344, 365), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (350, 365), False, 'from langchain import OpenAI\n'), ((514, 564), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['llm'], {'chain_type': '"""map_reduce"""'}), "(llm, chain_type='map_reduce')\n", (534, 564), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((464, 488), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 't'}), '(page_content=t)\n', (472, 488), False, 'from langchain.docstore.document import Document\n')] |
from base64 import b64decode
import os
import textwrap
from math import ceil
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.llms import HuggingFaceHub
from langchain_openai.llms import OpenAI
from langchain.chains.summarize import load_summarize_chain
from langchain.docstore.document import Document
# from prompts import full_summary_template, snip_summary_template_with_context, snip_summary_template
class SummarizeSnip(BaseModel):
title: str
summary: str = None
transcript: str
encoded: bool = True
dev = os.getenv("FASTAPI_ENV") == "development"
# headers = {"Authorization": "Bearer " + FLOWISE_API_KEY}
app = FastAPI(docs_url="/api/llm/docs", redoc_url="/api/llm/redoc", openapi_url="/api/llm/openapi.json")
# CORS configuration
origins = [
"https://www.youtube.com",
"http://localhost:3000",
"https://www.sniptube.tech",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["GET", "POST", "OPTIONS"],
allow_headers=["content-type"]
)
@app.get("/api/llm/healthchecker")
def healthchecker():
return {"status": "success", "message": "Integrated FastAPI Framework with Next.js and chrome extension successfully!"}
@app.post("/api/llm/summarize/snip")
async def summarizeSnip(item: SummarizeSnip):
# set up model
# llm = GPT4All(model=model_path, temp=0.1)
# llm = Cohere(model="summarize-xlarge", cohere_api_key=COHERE_API_KEY, temperature=0.1)
# OpenAI(temperature=0.6) if not dev else
llm = OpenAI(temperature=0.6) if not dev else HuggingFaceHub(repo_id="tiiuae/falcon-7b-instruct", model_kwargs={"temperature": 0.6, 'max_new_tokens': 1000 })
if item.encoded:
# decode from base64
title = b64decode(item.title).decode("utf-8")
text = b64decode(item.transcript).decode("utf-8")
summary = b64decode(item.summary).decode("utf-8") if item.summary else None
else:
title = item.title
text = item.transcript
summary = item.summary if item.summary else None
PROMPT_SNIP_SUMMARY = PromptTemplate(template=snip_summary_template.format(title=title, text='{text}'), input_variables=["text"])
# TODO: refine chain? https://python.langchain.com/docs/modules/chains/popular/summarize#the-refine-chain
chain = load_summarize_chain(llm, chain_type="stuff", verbose=True, prompt=PROMPT_SNIP_SUMMARY)
# TODO: are metadata necessary?
text_document = [Document(page_content=text, metadata={"title": title, "summary": summary, "transcript": text})]
summary = chain.invoke({'input_documents': text_document}, return_only_outputs=True)['output_text'].strip()
wrapped_summary = textwrap.fill(summary, width=100)
return {"summary": wrapped_summary}
# ---------------------------PROMPTS----------------------------------------------------------------------------------------------------
snip_summary_template = """You are a youtube section summarizer. Which means you will be given the transcript of a section of a youtube video and you need to summarize that transcript of the youtube video into a concise sentence. The sentence should only describe the main points of the given transcript
TRANSCRIPT OF SECTION OF VIDEO TO CONCISELY SUMMARIZE:
{text}
CONCISE SUMMARIZED SENTENCE FROM TRANSCRIPT(only write one sentence):
"""
| [
"langchain_community.llms.HuggingFaceHub",
"langchain.chains.summarize.load_summarize_chain",
"langchain.docstore.document.Document",
"langchain_openai.llms.OpenAI"
] | [((109, 122), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (120, 122), False, 'from dotenv import load_dotenv\n'), ((930, 1033), 'fastapi.FastAPI', 'FastAPI', ([], {'docs_url': '"""/api/llm/docs"""', 'redoc_url': '"""/api/llm/redoc"""', 'openapi_url': '"""/api/llm/openapi.json"""'}), "(docs_url='/api/llm/docs', redoc_url='/api/llm/redoc', openapi_url=\n '/api/llm/openapi.json')\n", (937, 1033), False, 'from fastapi import FastAPI\n'), ((822, 846), 'os.getenv', 'os.getenv', (['"""FASTAPI_ENV"""'], {}), "('FASTAPI_ENV')\n", (831, 846), False, 'import os\n'), ((2603, 2695), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['llm'], {'chain_type': '"""stuff"""', 'verbose': '(True)', 'prompt': 'PROMPT_SNIP_SUMMARY'}), "(llm, chain_type='stuff', verbose=True, prompt=\n PROMPT_SNIP_SUMMARY)\n", (2623, 2695), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((2987, 3020), 'textwrap.fill', 'textwrap.fill', (['summary'], {'width': '(100)'}), '(summary, width=100)\n', (3000, 3020), False, 'import textwrap\n'), ((1819, 1842), 'langchain_openai.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.6)'}), '(temperature=0.6)\n', (1825, 1842), False, 'from langchain_openai.llms import OpenAI\n'), ((1859, 1974), 'langchain_community.llms.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': '"""tiiuae/falcon-7b-instruct"""', 'model_kwargs': "{'temperature': 0.6, 'max_new_tokens': 1000}"}), "(repo_id='tiiuae/falcon-7b-instruct', model_kwargs={\n 'temperature': 0.6, 'max_new_tokens': 1000})\n", (1873, 1974), False, 'from langchain_community.llms import HuggingFaceHub\n'), ((2748, 2846), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': "{'title': title, 'summary': summary, 'transcript': text}"}), "(page_content=text, metadata={'title': title, 'summary': summary,\n 'transcript': text})\n", (2756, 2846), False, 'from langchain.docstore.document import Document\n'), ((2042, 2063), 'base64.b64decode', 'b64decode', (['item.title'], {}), '(item.title)\n', (2051, 2063), False, 'from base64 import b64decode\n'), ((2095, 2121), 'base64.b64decode', 'b64decode', (['item.transcript'], {}), '(item.transcript)\n', (2104, 2121), False, 'from base64 import b64decode\n'), ((2156, 2179), 'base64.b64decode', 'b64decode', (['item.summary'], {}), '(item.summary)\n', (2165, 2179), False, 'from base64 import b64decode\n')] |
from typing import Any, Dict, List, Union
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema.messages import BaseMessage, get_buffer_string
class ConversationBufferWindowMemory(BaseChatMemory):
"""Buffer for storing conversation memory inside a limited size window."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
memory_key: str = "history" #: :meta private:
k: int = 5
"""Number of messages to store in buffer."""
@property
def buffer(self) -> Union[str, List[BaseMessage]]:
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
@property
def buffer_as_str(self) -> str:
"""Exposes the buffer as a string in case return_messages is True."""
messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else []
return get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
@property
def buffer_as_messages(self) -> List[BaseMessage]:
"""Exposes the buffer as a list of messages in case return_messages is False."""
return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else []
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
| [
"langchain.schema.messages.get_buffer_string"
] | [((899, 989), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (916, 989), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n')] |
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langchain.prompts import StringPromptTemplate
from langchain import OpenAI, SerpAPIWrapper, LLMChain
from typing import List, Union
from langchain.schema import AgentAction, AgentFinish
import re
from langchain.utilities import BashProcess
from langchain.tools.human.tool import HumanInputRun
from langchain_tools.cwtool import CloudWatchInsightQuery
import sys
# Set up a prompt template
class CustomPromptTemplate(StringPromptTemplate):
# The template to use
template: str
# The list of tools available
tools: List[Tool]
def format(self, **kwargs) -> str:
# Get the intermediate steps (AgentAction, Observation tuples)
# Format them in a particular way
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
# Set the agent_scratchpad variable to that value
kwargs["agent_scratchpad"] = thoughts
# Create a tools variable from the list of tools provided
kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools])
# Create a list of tool names for the tools provided
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
return self.template.format(**kwargs)
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
# Check if agent should finish
if "Final Answer:" in llm_output:
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# Parse out the action and action input
regex = r"Action: (.*?)[\n]*Action Input:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
# Return the action and action input
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
def agent_run(user_input) -> None:
# Define which tools the agent can use to answer user queries
search = SerpAPIWrapper()
bash = BashProcess()
human = HumanInputRun()
cloudwatch = CloudWatchInsightQuery()
tools = [
Tool(
name = "Search",
func=search.run,
description="useful for when you need to answer questions about current events"
),
Tool(
name = "human",
func=human.run,
description="useful for when you need to ask the human for input"
),
Tool(
name = "cloudwatch",
func=cloudwatch.run,
description="useful for when you need run an AWS cloudwatch insight query or search logs in AWS"
),
Tool(
name = "terminal",
func=bash.run,
description="useful for when you need to run commands in a terminal"
)
]
# Set up the base template
template = """You are a cyber Security professional. You will take steps to achieve the requested task. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Question: {input}
{agent_scratchpad}"""
prompt = CustomPromptTemplate(template=template,tools=tools,input_variables=["input", "intermediate_steps"])
output_parser = CustomOutputParser()
llm = OpenAI(temperature=0)
llm_chain = LLMChain(llm=llm, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
agent_executor.run(user_input)
| [
"langchain.tools.human.tool.HumanInputRun",
"langchain_tools.cwtool.CloudWatchInsightQuery",
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.agents.LLMSingleActionAgent",
"langchain.utilities.BashProcess",
"langchain.SerpAPIWrapper",
"langchain.LLMChain",
"langchain.OpenAI",
"langchain.agents.Tool"
] | [((2630, 2646), 'langchain.SerpAPIWrapper', 'SerpAPIWrapper', ([], {}), '()\n', (2644, 2646), False, 'from langchain import OpenAI, SerpAPIWrapper, LLMChain\n'), ((2658, 2671), 'langchain.utilities.BashProcess', 'BashProcess', ([], {}), '()\n', (2669, 2671), False, 'from langchain.utilities import BashProcess\n'), ((2684, 2699), 'langchain.tools.human.tool.HumanInputRun', 'HumanInputRun', ([], {}), '()\n', (2697, 2699), False, 'from langchain.tools.human.tool import HumanInputRun\n'), ((2717, 2741), 'langchain_tools.cwtool.CloudWatchInsightQuery', 'CloudWatchInsightQuery', ([], {}), '()\n', (2739, 2741), False, 'from langchain_tools.cwtool import CloudWatchInsightQuery\n'), ((4330, 4351), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (4336, 4351), False, 'from langchain import OpenAI, SerpAPIWrapper, LLMChain\n'), ((4368, 4400), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (4376, 4400), False, 'from langchain import OpenAI, SerpAPIWrapper, LLMChain\n'), ((4461, 4587), 'langchain.agents.LLMSingleActionAgent', 'LLMSingleActionAgent', ([], {'llm_chain': 'llm_chain', 'output_parser': 'output_parser', 'stop': "['\\nObservation:']", 'allowed_tools': 'tool_names'}), "(llm_chain=llm_chain, output_parser=output_parser, stop\n =['\\nObservation:'], allowed_tools=tool_names)\n", (4481, 4587), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((4645, 4719), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent', 'tools': 'tools', 'verbose': '(True)'}), '(agent=agent, tools=tools, verbose=True)\n', (4679, 4719), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((2150, 2189), 're.search', 're.search', (['regex', 'llm_output', 're.DOTALL'], {}), '(regex, llm_output, re.DOTALL)\n', (2159, 2189), False, 'import re\n'), ((2765, 2887), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'search.run', 'description': '"""useful for when you need to answer questions about current events"""'}), "(name='Search', func=search.run, description=\n 'useful for when you need to answer questions about current events')\n", (2769, 2887), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((2940, 3046), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""human"""', 'func': 'human.run', 'description': '"""useful for when you need to ask the human for input"""'}), "(name='human', func=human.run, description=\n 'useful for when you need to ask the human for input')\n", (2944, 3046), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((3099, 3251), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""cloudwatch"""', 'func': 'cloudwatch.run', 'description': '"""useful for when you need run an AWS cloudwatch insight query or search logs in AWS"""'}), "(name='cloudwatch', func=cloudwatch.run, description=\n 'useful for when you need run an AWS cloudwatch insight query or search logs in AWS'\n )\n", (3103, 3251), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((3299, 3410), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""terminal"""', 'func': 'bash.run', 'description': '"""useful for when you need to run commands in a terminal"""'}), "(name='terminal', func=bash.run, description=\n 'useful for when you need to run commands in a terminal')\n", (3303, 3410), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n')] |
# coding: utf-8
import os
import gradio as gr
import re
import uuid
from PIL import Image, ImageDraw, ImageOps, ImageFont
import numpy as np
import argparse
import inspect
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from gpt4tools.llm import LlamaLangChain
from gpt4tools.tools import *
GPT4TOOLS_PREFIX = """GPT4Tools can handle various text and visual tasks, such as answering questions and providing in-depth explanations and discussions. It generates human-like text and uses tools to indirectly understand images. When referring to images, GPT4Tools follows strict file name rules. To complete visual tasks, GPT4Tools uses tools and stays loyal to observation outputs. Users can provide new images to GPT4Tools with a description, but tools must be used for subsequent tasks.
TOOLS:
------
GPT4Tools has access to the following tools:"""
GPT4TOOLS_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```
"""
GPT4TOOLS_SUFFIX = """Follow file name rules and do not fake non-existent file names. Remember to provide the image file name loyally from the last tool observation.
Previous conversation:
{chat_history}
New input: {input}
GPT4Tools needs to use tools to observe images, not directly imagine them. Thoughts and observations in the conversation are only visible to GPT4Tools. When answering human questions, repeat important information. Let's think step by step.
{agent_scratchpad}"""
os.makedirs('image', exist_ok=True)
def cut_dialogue_history(history_memory, keep_last_n_paragraphs=1):
if history_memory is None or len(history_memory) == 0:
return history_memory
paragraphs = history_memory.split('Human:')
if len(paragraphs) <= keep_last_n_paragraphs:
return history_memory
return 'Human:' + 'Human:'.join(paragraphs[-1:])
class ConversationBot:
def __init__(self, load_dict, llm_kwargs):
# load_dict = {'VisualQuestionAnswering':'cuda:0', 'ImageCaptioning':'cuda:1',...}
print(f"Initializing GPT4Tools, load_dict={load_dict}")
if 'ImageCaptioning' not in load_dict:
raise ValueError("You have to load ImageCaptioning as a basic function for GPT4Tools")
self.models = {}
# Load Basic Foundation Models
for class_name, device in load_dict.items():
self.models[class_name] = globals()[class_name](device=device)
# Load Template Foundation Models
for class_name, module in globals().items():
if getattr(module, 'template_model', False):
template_required_names = {k for k in inspect.signature(module.__init__).parameters.keys() if k!='self'}
loaded_names = set([type(e).__name__ for e in self.models.values()])
if template_required_names.issubset(loaded_names):
self.models[class_name] = globals()[class_name](
**{name: self.models[name] for name in template_required_names})
print(f"All the Available Functions: {self.models}")
self.tools = []
for instance in self.models.values():
for e in dir(instance):
if e.startswith('inference'):
func = getattr(instance, e)
self.tools.append(Tool(name=func.name, description=func.description, func=func))
self.llm = LlamaLangChain(model_kwargs=llm_kwargs)
self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
def init_agent(self, lang):
self.memory.clear() #clear previous history
if lang=='English':
PREFIX, FORMAT_INSTRUCTIONS, SUFFIX = GPT4TOOLS_PREFIX, GPT4TOOLS_FORMAT_INSTRUCTIONS, GPT4TOOLS_SUFFIX
place = "Enter text and press enter, or upload an image"
label_clear = "Clear"
else:
raise NotImplementedError(f'{lang} is not supported yet')
self.agent = initialize_agent(
self.tools,
self.llm,
agent="conversational-react-description",
verbose=True,
memory=self.memory,
return_intermediate_steps=True,
agent_kwargs={'prefix': PREFIX, 'format_instructions': FORMAT_INSTRUCTIONS,
'suffix': SUFFIX}, )
return gr.update(visible = True), gr.update(visible = False), gr.update(placeholder=place), gr.update(value=label_clear)
def run_text(self, text, state, temperature, top_p, max_new_tokens, keep_last_n_paragraphs):
self.llm.set_llm_params(temperature=temperature,
top_p=top_p,
max_new_tokens=max_new_tokens)
self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_paragraphs)
res = self.agent({"input": text.strip()})
res['output'] = res['output'].replace("\\", "/")
response = re.sub('(image/[-\w]*.png)', lambda m: f'})*{m.group(0)}*', res['output'])
state = state + [(text, response)]
print(f"\nProcessed run_text, Input text: {text}\nCurrent state: {state}\n"
f"Current Memory: {self.agent.memory.buffer}")
image_filenames = re.findall('image/.*.png', str(self.agent.memory.buffer))
image_filename = image_filenames[-1] if len(image_filenames) > 0 else ''
return state, state, f'{image_filename} '
def run_image(self, image, state, txt, lang='English'):
if image is None:
return state, state, txt
image_filename = os.path.join('image', f"{str(uuid.uuid4())[:8]}.png")
print("======>Auto Resize Image...")
img = image
width, height = img.size
ratio = min(512 / width, 512 / height)
width_new, height_new = (round(width * ratio), round(height * ratio))
width_new = int(np.round(width_new / 64.0)) * 64
height_new = int(np.round(height_new / 64.0)) * 64
img = img.resize((width_new, height_new))
img = img.convert('RGB')
img.save(image_filename, "PNG")
print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
description = self.models['ImageCaptioning'].inference(image_filename)
if lang == 'English':
Human_prompt = f'\nHuman: Provide an image named {image_filename}. The description is: {description}. Understand the image using tools.\n'
AI_prompt = "Received."
else:
raise NotImplementedError(f'{lang} is not supported yet')
self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
state = state + [(f"*{image_filename}*", AI_prompt)]
print(f"\nProcessed run_image, Input image: {image_filename}\nCurrent state: {state}\n"
f"Current Memory: {self.agent.memory.buffer}")
return state, state, f'{image_filename} {txt}'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--base_model', type=str, required=True, help='folder path to the vicuna with tokenizer')
parser.add_argument('--lora_model', type=str, required=True, help='folder path to the lora model')
parser.add_argument('--load', type=str, default='ImageCaptioning_cuda:0,Text2Image_cuda:0')
parser.add_argument('--llm_device', type=str, default='cpu', help='device to run the llm model')
parser.add_argument('--temperature', type=float, default=0.1, help='temperature for the llm model')
parser.add_argument('--max_new_tokens', type=int, default=512, help='max number of new tokens to generate')
parser.add_argument('--top_p', type=float, default=0.75, help='top_p for the llm model')
parser.add_argument('--top_k', type=int, default=40, help='top_k for the llm model')
parser.add_argument('--num_beams', type=int, default=1, help='num_beams for the llm model')
parser.add_argument('--keep_last_n_paragraphs', type=int, default=1, help='keep last n paragraphs in the memory')
parser.add_argument('--cache-dir', type=str, default=None, help="cache path to save model")
parser.add_argument('--server-name', type=str, default='0.0.0.0', help="gradio sever name")
parser.add_argument('--server-port', type=int, default=8888, help="gradio server port")
parser.add_argument('--share', action="store_true")
args = parser.parse_args()
load_dict = {e.split('_')[0].strip(): e.split('_')[1].strip() for e in args.load.split(',')}
llm_kwargs = {'base_model': args.base_model,
'lora_model': args.lora_model,
'device': args.llm_device,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'num_beams': args.num_beams,
'cache_dir': args.cache_dir,}
bot = ConversationBot(load_dict=load_dict, llm_kwargs=llm_kwargs)
examples = [
['asserts/images/example-1.jpg','Make the image look like a cartoon.'],
['asserts/images/example-2.jpg','Segment the tie in the image.'],
['asserts/images/example-3.jpg','Generate a man watching a sea based on the pose of the woman.'],
['asserts/images/example-4.jpg','Tell me a story about this image.'],
]
with gr.Blocks() as demo:
with gr.Row():
with gr.Column(scale=0.3):
with gr.Row():
image = gr.Image(type="pil", label="input image")
with gr.Row():
txt = gr.Textbox(lines=7, show_label=False, elem_id="textbox",
placeholder="Enter text and press submit, or upload an image").style(container=False)
with gr.Row():
submit = gr.Button("Submit")
with gr.Row():
clear = gr.Button("Clear")
with gr.Row():
keep_last_n_paragraphs = gr.Slider(
minimum=0,
maximum=3,
value=args.keep_last_n_paragraphs,
step=1,
interactive=True,
label="Remember Last N Paragraphs")
max_new_token = gr.Slider(
minimum=128,
maximum=1024,
value=args.max_new_tokens,
step=64,
interactive=True,
label="Max New Tokens")
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
value=args.temperature,
step=0.1,
interactive=True,
label="Temperature")
top_p = gr.Slider(
minimum=0.0,
maximum=1.0,
value=args.top_p,
step=0.1,
interactive=True,
label="Top P")
with gr.Column(scale=0.7):
chatbot = gr.Chatbot(elem_id="chatbot", label="🦙 GPT4Tools").style(height=690)
state = gr.State([])
# TODO: support more language
bot.init_agent('English')
txt.submit(bot.run_text, [txt, state], [chatbot, state])
txt.submit(lambda: "", None, txt)
# submit.click(bot.run_image, [image, state, txt], [chatbot, state, txt]).then(
# bot.run_text, [txt, state, temperature, top_p, max_new_token, keep_last_n_paragraphs], [chatbot, state]).then(
# lambda: "", None, txt).then(
# lambda: None, None, image)
submit.click(bot.run_image, [image, state, txt], [chatbot, state, txt]).then(
bot.run_text, [txt, state, temperature, top_p, max_new_token, keep_last_n_paragraphs], [chatbot, state, txt]).then(
lambda: None, None, image)
clear.click(bot.memory.clear)
clear.click(lambda: [], None, chatbot)
clear.click(lambda: [], None, state)
with gr.Row():
gr.Examples(
examples=examples,
inputs=[image, txt],
)
demo.launch(server_name=args.server_name, server_port=args.server_port, enable_queue=True, share=args.share) | [
"langchain.chains.conversation.memory.ConversationBufferMemory",
"langchain.agents.initialize.initialize_agent",
"langchain.agents.tools.Tool"
] | [((1924, 1959), 'os.makedirs', 'os.makedirs', (['"""image"""'], {'exist_ok': '(True)'}), "('image', exist_ok=True)\n", (1935, 1959), False, 'import os\n'), ((7453, 7478), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7476, 7478), False, 'import argparse\n'), ((3840, 3879), 'gpt4tools.llm.LlamaLangChain', 'LlamaLangChain', ([], {'model_kwargs': 'llm_kwargs'}), '(model_kwargs=llm_kwargs)\n', (3854, 3879), False, 'from gpt4tools.llm import LlamaLangChain\n'), ((3903, 3975), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(memory_key='chat_history', output_key='output')\n", (3927, 3975), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((4413, 4667), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': PREFIX, 'format_instructions': FORMAT_INSTRUCTIONS, 'suffix': SUFFIX\n }"}), "(self.tools, self.llm, agent=\n 'conversational-react-description', verbose=True, memory=self.memory,\n return_intermediate_steps=True, agent_kwargs={'prefix': PREFIX,\n 'format_instructions': FORMAT_INSTRUCTIONS, 'suffix': SUFFIX})\n", (4429, 4667), False, 'from langchain.agents.initialize import initialize_agent\n'), ((9840, 9851), 'gradio.Blocks', 'gr.Blocks', ([], {}), '()\n', (9849, 9851), True, 'import gradio as gr\n'), ((4783, 4806), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (4792, 4806), True, 'import gradio as gr\n'), ((4810, 4834), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (4819, 4834), True, 'import gradio as gr\n'), ((4838, 4866), 'gradio.update', 'gr.update', ([], {'placeholder': 'place'}), '(placeholder=place)\n', (4847, 4866), True, 'import gradio as gr\n'), ((4868, 4896), 'gradio.update', 'gr.update', ([], {'value': 'label_clear'}), '(value=label_clear)\n', (4877, 4896), True, 'import gradio as gr\n'), ((9874, 9882), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (9880, 9882), True, 'import gradio as gr\n'), ((12740, 12748), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (12746, 12748), True, 'import gradio as gr\n'), ((12762, 12813), 'gradio.Examples', 'gr.Examples', ([], {'examples': 'examples', 'inputs': '[image, txt]'}), '(examples=examples, inputs=[image, txt])\n', (12773, 12813), True, 'import gradio as gr\n'), ((6340, 6366), 'numpy.round', 'np.round', (['(width_new / 64.0)'], {}), '(width_new / 64.0)\n', (6348, 6366), True, 'import numpy as np\n'), ((6398, 6425), 'numpy.round', 'np.round', (['(height_new / 64.0)'], {}), '(height_new / 64.0)\n', (6406, 6425), True, 'import numpy as np\n'), ((9901, 9921), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.3)'}), '(scale=0.3)\n', (9910, 9921), True, 'import gradio as gr\n'), ((11640, 11660), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (11649, 11660), True, 'import gradio as gr\n'), ((11781, 11793), 'gradio.State', 'gr.State', (['[]'], {}), '([])\n', (11789, 11793), True, 'import gradio as gr\n'), ((9944, 9952), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (9950, 9952), True, 'import gradio as gr\n'), ((9982, 10023), 'gradio.Image', 'gr.Image', ([], {'type': '"""pil"""', 'label': '"""input image"""'}), "(type='pil', label='input image')\n", (9990, 10023), True, 'import gradio as gr\n'), ((10045, 10053), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (10051, 10053), True, 'import gradio as gr\n'), ((10282, 10290), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (10288, 10290), True, 'import gradio as gr\n'), ((10321, 10340), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {}), "('Submit')\n", (10330, 10340), True, 'import gradio as gr\n'), ((10362, 10370), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (10368, 10370), True, 'import gradio as gr\n'), ((10400, 10418), 'gradio.Button', 'gr.Button', (['"""Clear"""'], {}), "('Clear')\n", (10409, 10418), True, 'import gradio as gr\n'), ((10440, 10448), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (10446, 10448), True, 'import gradio as gr\n'), ((10495, 10627), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0)', 'maximum': '(3)', 'value': 'args.keep_last_n_paragraphs', 'step': '(1)', 'interactive': '(True)', 'label': '"""Remember Last N Paragraphs"""'}), "(minimum=0, maximum=3, value=args.keep_last_n_paragraphs, step=1,\n interactive=True, label='Remember Last N Paragraphs')\n", (10504, 10627), True, 'import gradio as gr\n'), ((10805, 10923), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(128)', 'maximum': '(1024)', 'value': 'args.max_new_tokens', 'step': '(64)', 'interactive': '(True)', 'label': '"""Max New Tokens"""'}), "(minimum=128, maximum=1024, value=args.max_new_tokens, step=64,\n interactive=True, label='Max New Tokens')\n", (10814, 10923), True, 'import gradio as gr\n'), ((11099, 11211), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0.0)', 'maximum': '(1.0)', 'value': 'args.temperature', 'step': '(0.1)', 'interactive': '(True)', 'label': '"""Temperature"""'}), "(minimum=0.0, maximum=1.0, value=args.temperature, step=0.1,\n interactive=True, label='Temperature')\n", (11108, 11211), True, 'import gradio as gr\n'), ((11381, 11482), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0.0)', 'maximum': '(1.0)', 'value': 'args.top_p', 'step': '(0.1)', 'interactive': '(True)', 'label': '"""Top P"""'}), "(minimum=0.0, maximum=1.0, value=args.top_p, step=0.1, interactive\n =True, label='Top P')\n", (11390, 11482), True, 'import gradio as gr\n'), ((3758, 3819), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': 'func.name', 'description': 'func.description', 'func': 'func'}), '(name=func.name, description=func.description, func=func)\n', (3762, 3819), False, 'from langchain.agents.tools import Tool\n'), ((11688, 11738), 'gradio.Chatbot', 'gr.Chatbot', ([], {'elem_id': '"""chatbot"""', 'label': '"""🦙 GPT4Tools"""'}), "(elem_id='chatbot', label='🦙 GPT4Tools')\n", (11698, 11738), True, 'import gradio as gr\n'), ((6068, 6080), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6078, 6080), False, 'import uuid\n'), ((10081, 10205), 'gradio.Textbox', 'gr.Textbox', ([], {'lines': '(7)', 'show_label': '(False)', 'elem_id': '"""textbox"""', 'placeholder': '"""Enter text and press submit, or upload an image"""'}), "(lines=7, show_label=False, elem_id='textbox', placeholder=\n 'Enter text and press submit, or upload an image')\n", (10091, 10205), True, 'import gradio as gr\n'), ((3072, 3106), 'inspect.signature', 'inspect.signature', (['module.__init__'], {}), '(module.__init__)\n', (3089, 3106), False, 'import inspect\n')] |
import json
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field
from langchain.llms.base import BaseLLM
from typing import List, Any
from langchain import LLMChain
from llm.generate_task_plan.prompt import get_template
from llm.list_output_parser import LLMListOutputParser
class Task(BaseModel):
"""Task model."""
id: int = Field(..., description="Task ID")
description: str = Field(..., description="Task description")
is_done: bool = Field(False, description="Task done or not")
result: str = Field("", description="The result of the task")
class TaskManeger(BaseModel):
"""Task manager model."""
tasks: List[Task] = Field([], description="The list of tasks")
current_task_id: int = Field(1, description="The last task id")
llm: BaseLLM = Field(..., description="llm class for the agent")
def generate_task_plan(self, name: str, role: str, goal: str):
"""Generate a task plan for the agent."""
propmt = get_template()
llm_chain = LLMChain(prompt=propmt, llm=self.llm)
try:
result = llm_chain.predict(
name=name,
role=role,
goal=goal
)
except Exception as e:
raise Exception(f"Error: {e}")
# Parse and validate the result
try:
result_list = LLMListOutputParser.parse(result, separeted_string="\t")
except Exception as e:
raise Exception("Error: " + str(e))
# Add tasks with a serial number
for i, e in enumerate(result_list, start=1):
id = int(i)
description = e
self.tasks.append(Task(id=id, description=description))
self
def get_task_by_id(self, id: int) -> Task:
"""Get a task by Task id."""
for task in self.tasks:
if task.id == id:
return task
return None
def get_current_task(self) -> Task:
"""Get the current task agent is working on."""
return self.get_task_by_id(self.current_task_id)
def get_current_task_string(self) -> str:
"""Get the current task agent is working on as a string."""
task = self.get_current_task()
if task is None:
return None
else:
return self._task_to_string(task)
def complete_task(self, id: int, result: str) -> None:
"""Complete a task by Task id."""
# Complete the task specified by ID
self.tasks[id - 1].is_done = True
self.tasks[id - 1].result = result
self.current_task_id += 1
def complete_current_task(self, result: str) -> None:
"""Complete the current task agent is working on."""
self.complete_task(self.current_task_id, result=result)
def _task_to_string(self, task: Task) -> str:
"""Convert a task to a string."""
return f"{task.id}: {task.description}"
def get_incomplete_tasks(self) -> List[Task]:
"""Get the list of incomplete tasks."""
return [task for task in self.tasks if not task.is_done]
def get_incomplete_tasks_string(self) -> str:
"""Get the list of incomplete tasks as a string."""
result = ""
for task in self.get_incomplete_tasks():
result += self._task_to_string(task) + "\n"
return result
| [
"langchain.LLMChain"
] | [((359, 392), 'pydantic.Field', 'Field', (['...'], {'description': '"""Task ID"""'}), "(..., description='Task ID')\n", (364, 392), False, 'from pydantic import BaseModel, Field\n'), ((416, 458), 'pydantic.Field', 'Field', (['...'], {'description': '"""Task description"""'}), "(..., description='Task description')\n", (421, 458), False, 'from pydantic import BaseModel, Field\n'), ((479, 523), 'pydantic.Field', 'Field', (['(False)'], {'description': '"""Task done or not"""'}), "(False, description='Task done or not')\n", (484, 523), False, 'from pydantic import BaseModel, Field\n'), ((542, 589), 'pydantic.Field', 'Field', (['""""""'], {'description': '"""The result of the task"""'}), "('', description='The result of the task')\n", (547, 589), False, 'from pydantic import BaseModel, Field\n'), ((676, 718), 'pydantic.Field', 'Field', (['[]'], {'description': '"""The list of tasks"""'}), "([], description='The list of tasks')\n", (681, 718), False, 'from pydantic import BaseModel, Field\n'), ((746, 786), 'pydantic.Field', 'Field', (['(1)'], {'description': '"""The last task id"""'}), "(1, description='The last task id')\n", (751, 786), False, 'from pydantic import BaseModel, Field\n'), ((806, 855), 'pydantic.Field', 'Field', (['...'], {'description': '"""llm class for the agent"""'}), "(..., description='llm class for the agent')\n", (811, 855), False, 'from pydantic import BaseModel, Field\n'), ((991, 1005), 'llm.generate_task_plan.prompt.get_template', 'get_template', ([], {}), '()\n', (1003, 1005), False, 'from llm.generate_task_plan.prompt import get_template\n'), ((1026, 1063), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'propmt', 'llm': 'self.llm'}), '(prompt=propmt, llm=self.llm)\n', (1034, 1063), False, 'from langchain import LLMChain\n'), ((1366, 1422), 'llm.list_output_parser.LLMListOutputParser.parse', 'LLMListOutputParser.parse', (['result'], {'separeted_string': '"""\t"""'}), "(result, separeted_string='\\t')\n", (1391, 1422), False, 'from llm.list_output_parser import LLMListOutputParser\n')] |
# Ingest Documents into a Zep Collection
import os
from dotenv import find_dotenv, load_dotenv
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from zep_python import ZepClient
from zep_python.langchain.vectorstore import ZepVectorStore
load_dotenv(dotenv_path=find_dotenv())
SOURCE = "https://en.wikipedia.org/wiki/Leonard_Bernstein" # noqa: E501
ZEP_API_URL = os.environ.get(
"ZEP_API_URL"
) # only required if you're using Zep Open Source
ZEP_API_KEY = os.environ.get("ZEP_API_KEY") # Required for Zep Cloud
if ZEP_API_KEY is None:
raise ValueError(
"ZEP_API_KEY is required for Zep Cloud. "
"Remove this check if using Zep Open Source."
)
ZEP_COLLECTION_NAME = os.environ.get("ZEP_COLLECTION_NAME")
if ZEP_COLLECTION_NAME is None:
raise ValueError("ZEP_COLLECTION_NAME is required for ingestion. ")
zep = ZepClient(
api_key=ZEP_API_KEY,
api_url=ZEP_API_URL, # only required if you're using Zep Open Source
)
# Load
loader = WebBaseLoader(SOURCE)
data = loader.load()
print(f"Loaded: {len(data)} documents")
# Split
text_splitter = RecursiveCharacterTextSplitter(chunk_size=400, chunk_overlap=200)
all_splits = text_splitter.split_documents(data)
print(f"Adding {len(all_splits)} documents to {ZEP_COLLECTION_NAME}...")
# Add to vectorDB
vectorstore = ZepVectorStore.from_documents(
documents=all_splits,
collection_name=ZEP_COLLECTION_NAME,
zep_client=zep,
)
print(f"Added {len(all_splits)} documents to {ZEP_COLLECTION_NAME}...")
| [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain_community.document_loaders.WebBaseLoader"
] | [((449, 478), 'os.environ.get', 'os.environ.get', (['"""ZEP_API_URL"""'], {}), "('ZEP_API_URL')\n", (463, 478), False, 'import os\n'), ((549, 578), 'os.environ.get', 'os.environ.get', (['"""ZEP_API_KEY"""'], {}), "('ZEP_API_KEY')\n", (563, 578), False, 'import os\n'), ((784, 821), 'os.environ.get', 'os.environ.get', (['"""ZEP_COLLECTION_NAME"""'], {}), "('ZEP_COLLECTION_NAME')\n", (798, 821), False, 'import os\n'), ((933, 984), 'zep_python.ZepClient', 'ZepClient', ([], {'api_key': 'ZEP_API_KEY', 'api_url': 'ZEP_API_URL'}), '(api_key=ZEP_API_KEY, api_url=ZEP_API_URL)\n', (942, 984), False, 'from zep_python import ZepClient\n'), ((1062, 1083), 'langchain_community.document_loaders.WebBaseLoader', 'WebBaseLoader', (['SOURCE'], {}), '(SOURCE)\n', (1075, 1083), False, 'from langchain_community.document_loaders import WebBaseLoader\n'), ((1171, 1236), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(400)', 'chunk_overlap': '(200)'}), '(chunk_size=400, chunk_overlap=200)\n', (1201, 1236), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1393, 1502), 'zep_python.langchain.vectorstore.ZepVectorStore.from_documents', 'ZepVectorStore.from_documents', ([], {'documents': 'all_splits', 'collection_name': 'ZEP_COLLECTION_NAME', 'zep_client': 'zep'}), '(documents=all_splits, collection_name=\n ZEP_COLLECTION_NAME, zep_client=zep)\n', (1422, 1502), False, 'from zep_python.langchain.vectorstore import ZepVectorStore\n'), ((345, 358), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (356, 358), False, 'from dotenv import find_dotenv, load_dotenv\n')] |
#model_settings.py
import streamlit as st
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext
from llama_index.logger import LlamaLogger
from langchain.chat_models import ChatOpenAI
from langchain import OpenAI
from enum import Enum
class sentenceTransformers(Enum):
OPTION1 = "sentence-transformers/all-MiniLM-L6-v2" #default
OPTION2 = "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
OPTION3 = "sentence-transformers/all-mpnet-base-v2"
def get_sentence_transformer_dropdown():
options = [e.value for e in sentenceTransformers]
selected_option = st.selectbox("Sentence transformer:", options)
return selected_option
def get_embed_model(provider='Langchain', model_name=sentenceTransformers.OPTION1.value):
# load in HF embedding model from langchain
embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name=model_name)) if provider=='Langchain' else OpenAIEmbedding()
return embed_model
def get_prompt_helper():
# define prompt helper
max_input_size = 4096
num_output = 2048
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
return prompt_helper
def get_llm_predictor():
# define LLM
num_output = 2048
#llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", max_tokens=num_output))
llm_predictor = LLMPredictor(ChatOpenAI(temperature=0.1, model_name="gpt-3.5-turbo", max_tokens=num_output))
return llm_predictor
@st.cache_resource
def get_logger():
llama_logger = LlamaLogger()
return llama_logger
def get_service_context(llm_predictor=get_llm_predictor(),
embed_model=get_embed_model(),
prompt_helper=get_prompt_helper(),
chunk_size_limit=512,
llama_logger=get_logger()):
return ServiceContext.from_defaults(llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
chunk_size_limit=chunk_size_limit,
llama_logger=llama_logger)
| [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings",
"langchain.chat_models.ChatOpenAI"
] | [((705, 751), 'streamlit.selectbox', 'st.selectbox', (['"""Sentence transformer:"""', 'options'], {}), "('Sentence transformer:', options)\n", (717, 751), True, 'import streamlit as st\n'), ((1220, 1279), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (1232, 1279), False, 'from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext\n'), ((1684, 1697), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (1695, 1697), False, 'from llama_index.logger import LlamaLogger\n'), ((2009, 2192), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model', 'prompt_helper': 'prompt_helper', 'chunk_size_limit': 'chunk_size_limit', 'llama_logger': 'llama_logger'}), '(llm_predictor=llm_predictor, embed_model=\n embed_model, prompt_helper=prompt_helper, chunk_size_limit=\n chunk_size_limit, llama_logger=llama_logger)\n', (2037, 2192), False, 'from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext\n'), ((1031, 1048), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (1046, 1048), False, 'from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext\n'), ((1522, 1600), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': 'num_output'}), "(temperature=0.1, model_name='gpt-3.5-turbo', max_tokens=num_output)\n", (1532, 1600), False, 'from langchain.chat_models import ChatOpenAI\n'), ((955, 999), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (976, 999), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n')] |
import os
from dotenv import load_dotenv
import streamlit as st
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import ConversationalRetrievalChain
from langchain.agents.agent_toolkits import create_retriever_tool
from langchain.agents.agent_toolkits import create_conversational_retrieval_agent
from langchain.callbacks import StreamlitCallbackHandler
from langchain.tools import BaseTool, Tool, tool
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import ChatMessage
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from langchain import PromptTemplate, LLMChain
from langchain.vectorstores import LanceDB
import lancedb
import pandas as pd
from langchain.chains import RetrievalQA
st.set_page_config(page_title="GlobeBotter", page_icon="🎬")
st.header('🎬 Welcome to MovieHarbor, your favourite movie recommender')
load_dotenv()
#os.environ["HUGGINGFACEHUB_API_TOKEN"]
openai_api_key = os.environ['OPENAI_API_KEY']
embeddings = OpenAIEmbeddings()
uri = "data/sample-lancedb"
db = lancedb.connect(uri)
table = db.open_table('movies')
docsearch = LanceDB(connection = table, embedding = embeddings)
# Import the movie dataset
md = pd.read_pickle('movies.pkl')
# Create a sidebar for user input
st.sidebar.title("Movie Recommendation System")
st.sidebar.markdown("Please enter your details and preferences below:")
# Ask the user for age, gender and favourite movie genre
age = st.sidebar.slider("What is your age?", 1, 100, 25)
gender = st.sidebar.radio("What is your gender?", ("Male", "Female", "Other"))
genre = st.sidebar.selectbox("What is your favourite movie genre?", md.explode('genres')["genres"].unique())
# Filter the movies based on the user input
df_filtered = md[md['genres'].apply(lambda x: genre in x)]
template_prefix = """You are a movie recommender system that help users to find movies that match their preferences.
Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}"""
user_info = """This is what we know about the user, and you can use this information to better tune your research:
Age: {age}
Gender: {gender}"""
template_suffix= """Question: {question}
Your response:"""
user_info = user_info.format(age = age, gender = gender)
COMBINED_PROMPT = template_prefix +'\n'+ user_info +'\n'+ template_suffix
print(COMBINED_PROMPT)
#setting up the chain
qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff",
retriever=docsearch.as_retriever(search_kwargs={'data': df_filtered}), return_source_documents=True)
query = st.text_input('Enter your question:', placeholder = 'What action movies do you suggest?')
if query:
result = qa({"query": query})
st.write(result['result'])
| [
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.llms.OpenAI",
"langchain.vectorstores.LanceDB"
] | [((924, 983), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""GlobeBotter"""', 'page_icon': '"""🎬"""'}), "(page_title='GlobeBotter', page_icon='🎬')\n", (942, 983), True, 'import streamlit as st\n'), ((984, 1055), 'streamlit.header', 'st.header', (['"""🎬 Welcome to MovieHarbor, your favourite movie recommender"""'], {}), "('🎬 Welcome to MovieHarbor, your favourite movie recommender')\n", (993, 1055), True, 'import streamlit as st\n'), ((1057, 1070), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1068, 1070), False, 'from dotenv import load_dotenv\n'), ((1172, 1190), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1188, 1190), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1224, 1244), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1239, 1244), False, 'import lancedb\n'), ((1290, 1337), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (1297, 1337), False, 'from langchain.vectorstores import LanceDB\n'), ((1375, 1403), 'pandas.read_pickle', 'pd.read_pickle', (['"""movies.pkl"""'], {}), "('movies.pkl')\n", (1389, 1403), True, 'import pandas as pd\n'), ((1439, 1486), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Movie Recommendation System"""'], {}), "('Movie Recommendation System')\n", (1455, 1486), True, 'import streamlit as st\n'), ((1487, 1558), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""Please enter your details and preferences below:"""'], {}), "('Please enter your details and preferences below:')\n", (1506, 1558), True, 'import streamlit as st\n'), ((1623, 1673), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""What is your age?"""', '(1)', '(100)', '(25)'], {}), "('What is your age?', 1, 100, 25)\n", (1640, 1673), True, 'import streamlit as st\n'), ((1683, 1752), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""What is your gender?"""', "('Male', 'Female', 'Other')"], {}), "('What is your gender?', ('Male', 'Female', 'Other'))\n", (1699, 1752), True, 'import streamlit as st\n'), ((2834, 2926), 'streamlit.text_input', 'st.text_input', (['"""Enter your question:"""'], {'placeholder': '"""What action movies do you suggest?"""'}), "('Enter your question:', placeholder=\n 'What action movies do you suggest?')\n", (2847, 2926), True, 'import streamlit as st\n'), ((2972, 2998), 'streamlit.write', 'st.write', (["result['result']"], {}), "(result['result'])\n", (2980, 2998), True, 'import streamlit as st\n'), ((2688, 2696), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (2694, 2696), False, 'from langchain.llms import OpenAI\n')] |
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders.csv_loader import CSVLoader
from langchain_community.document_loaders import HNLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import UnstructuredHTMLLoader
from langchain_openai.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.chains import RetrievalQAWithSourcesChain
from langchain_openai.llms import OpenAI
from constant import openai
import os
os.environ['OPENAI_API_KEY'] = openai
loader = PyPDFLoader("attention is all you need.pdf")
data = loader.load()
# print(data[0])
loader = CSVLoader(file_path="job_placement.csv")
data = loader.load()
# print(data[0])
loader = HNLoader("https://news.ycombinator.com")
data = loader.load()
# print(data[0])
quote = "one Machine can do the work of fifty ordinary humans, No machine can do the" \
"work of one extraordinary human."
ct_splitter = CharacterTextSplitter(
separator='.',
chunk_size=24,
chunk_overlap=3
)
# docs = ct_splitter.split_text(quote)
# print(docs)
rc_splitter = RecursiveCharacterTextSplitter(
chunk_size=24,
chunk_overlap=3,
)
# docs = rc_splitter.split_text(quote)
# print(docs)
loader = UnstructuredHTMLLoader("data.html")
data = loader.load()
rc_splitter = RecursiveCharacterTextSplitter(
chunk_size=24,
chunk_overlap=3,
separators='.',
)
# docs = rc_splitter.split_documents(data)
# print(docs)
quote = "There is a kingdom of lychee fruit that are alive and thriving in Iceland, but they feel " \
"taken advantage of and are not fast enough for you."
splitter = RecursiveCharacterTextSplitter(
chunk_size=40,
chunk_overlap=10,
)
docs = splitter.split_text(quote)
embeddings = OpenAIEmbeddings(openai_api_key=openai)
vectordb = Chroma(
persist_directory="data",
embedding_function=embeddings
)
vectordb.persist()
docstorage = Chroma.from_texts(docs,embeddings)
qa = RetrievalQA.from_chain_type(
llm = OpenAI(model_name="gpt-3.5-turbo-instruct"),
chain_type="stuff",
retriever = docstorage.as_retriever()
)
# query = "Where do lychee fruit live?"
# print(qa.invoke(query))
quote = "There is a kingdom of lycee fruit that are alive and thriving in Iceland, but they fee" \
"taken advantage of and are not fast enough for you."
qa1 = RetrievalQAWithSourcesChain.from_chain_type(
llm = OpenAI(model_name="gpt-3.5-turbo-instruct"),
chain_type="stuff",
retriever = docstorage.as_retriever(),
)
results = qa1({'question':'What is the primary architecture presented in the document?'},return_only_outputs=True)
print(results)
| [
"langchain_community.vectorstores.Chroma",
"langchain_community.vectorstores.Chroma.from_texts",
"langchain.text_splitter.CharacterTextSplitter",
"langchain_community.document_loaders.PyPDFLoader",
"langchain_openai.llms.OpenAI",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain_community.document_loaders.HNLoader",
"langchain_community.document_loaders.UnstructuredHTMLLoader",
"langchain_community.document_loaders.csv_loader.CSVLoader",
"langchain_openai.embeddings.OpenAIEmbeddings"
] | [((741, 785), 'langchain_community.document_loaders.PyPDFLoader', 'PyPDFLoader', (['"""attention is all you need.pdf"""'], {}), "('attention is all you need.pdf')\n", (752, 785), False, 'from langchain_community.document_loaders import PyPDFLoader\n'), ((838, 878), 'langchain_community.document_loaders.csv_loader.CSVLoader', 'CSVLoader', ([], {'file_path': '"""job_placement.csv"""'}), "(file_path='job_placement.csv')\n", (847, 878), False, 'from langchain_community.document_loaders.csv_loader import CSVLoader\n'), ((931, 971), 'langchain_community.document_loaders.HNLoader', 'HNLoader', (['"""https://news.ycombinator.com"""'], {}), "('https://news.ycombinator.com')\n", (939, 971), False, 'from langchain_community.document_loaders import HNLoader\n'), ((1166, 1234), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""."""', 'chunk_size': '(24)', 'chunk_overlap': '(3)'}), "(separator='.', chunk_size=24, chunk_overlap=3)\n", (1187, 1234), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1327, 1389), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(24)', 'chunk_overlap': '(3)'}), '(chunk_size=24, chunk_overlap=3)\n', (1357, 1389), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1473, 1508), 'langchain_community.document_loaders.UnstructuredHTMLLoader', 'UnstructuredHTMLLoader', (['"""data.html"""'], {}), "('data.html')\n", (1495, 1508), False, 'from langchain_community.document_loaders import UnstructuredHTMLLoader\n'), ((1548, 1626), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(24)', 'chunk_overlap': '(3)', 'separators': '"""."""'}), "(chunk_size=24, chunk_overlap=3, separators='.')\n", (1578, 1626), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1889, 1952), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(40)', 'chunk_overlap': '(10)'}), '(chunk_size=40, chunk_overlap=10)\n', (1919, 1952), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2020, 2059), 'langchain_openai.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'openai'}), '(openai_api_key=openai)\n', (2036, 2059), False, 'from langchain_openai.embeddings import OpenAIEmbeddings\n'), ((2074, 2137), 'langchain_community.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': '"""data"""', 'embedding_function': 'embeddings'}), "(persist_directory='data', embedding_function=embeddings)\n", (2080, 2137), False, 'from langchain_community.vectorstores import Chroma\n'), ((2189, 2224), 'langchain_community.vectorstores.Chroma.from_texts', 'Chroma.from_texts', (['docs', 'embeddings'], {}), '(docs, embeddings)\n', (2206, 2224), False, 'from langchain_community.vectorstores import Chroma\n'), ((2272, 2315), 'langchain_openai.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""'}), "(model_name='gpt-3.5-turbo-instruct')\n", (2278, 2315), False, 'from langchain_openai.llms import OpenAI\n'), ((2688, 2731), 'langchain_openai.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""'}), "(model_name='gpt-3.5-turbo-instruct')\n", (2694, 2731), False, 'from langchain_openai.llms import OpenAI\n')] |
# define chain components
from langchain.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain
from langchain.prompts.prompt import PromptTemplate
from database import save_message_to_db, connect_2_db
import os
from pymongo import MongoClient
from urllib.parse import quote_plus
from dotenv import load_dotenv
import openai
# Load environment variables from .env file
load_dotenv()
#openai.api_key = os.getenv("OPENAI_API_KEY")
# to do
# add better girlfriend template
def chain_setup(user_id, user_name):
# get history msg and add it to memmory
memory = ConversationBufferMemory()
_, message_history = connect_2_db()
conv = message_history.find_one({'user_id': user_id})
if conv:
messages = conv['messages']
# Calculate how many messages are available
num_messages = len(messages)
# Start index for messages to be added
start_index = max(num_messages - 5, 0)
# Add messages to memory
for i in range(start_index, num_messages):
# Get message
message = messages[i]
#check if it is user/bot msg
if 'user' in message:
memory.chat_memory.add_user_message(message['user'])
elif 'bot' in message:
memory.chat_memory.add_ai_message(message['bot'])
else:
print("No previous conversation history found for this user.")
chat = ChatOpenAI(temperature=0.5,
openai_api_key=os.getenv("OPENAI_API_KEY"))
memory.ai_prefix = 'Girlfriend'
memory.human_prefix = 'Boyfriend'
template = """
You are as a role of my girfriend, now lets playing the following requirements:
1/ your name is Shirley, 29 years old, you work in your uncle's furniture store as a marketing manager, but you are planning to do a career change to swing trader
2/ My name is """+ user_name +"""
3/ you are my girfriend, you have language addiction.
4/ don't be overly enthusiastic, don't be cringe; don't be overly negative, don't be too boring.
Current conversation:
{history}
Boyfriend: {input}
Girlfriend:
"""
prompt = PromptTemplate(input_variables=["history", "input"], template=template)
conversation = ConversationChain(
prompt=prompt,
llm=chat,
verbose=True,
memory=memory
)
return conversation
def get_chain_response(user_id, user_text, user_name):
conv_chain = chain_setup(user_id=user_id, user_name=user_name)
out = conv_chain(user_text)
print(out['history'])
return out['response']
| [
"langchain.memory.ConversationBufferMemory",
"langchain.chains.ConversationChain",
"langchain.prompts.prompt.PromptTemplate"
] | [((460, 473), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (471, 473), False, 'from dotenv import load_dotenv\n'), ((664, 690), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (688, 690), False, 'from langchain.memory import ConversationBufferMemory\n'), ((719, 733), 'database.connect_2_db', 'connect_2_db', ([], {}), '()\n', (731, 733), False, 'from database import save_message_to_db, connect_2_db\n'), ((2383, 2454), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['history', 'input']", 'template': 'template'}), "(input_variables=['history', 'input'], template=template)\n", (2397, 2454), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((2479, 2550), 'langchain.chains.ConversationChain', 'ConversationChain', ([], {'prompt': 'prompt', 'llm': 'chat', 'verbose': '(True)', 'memory': 'memory'}), '(prompt=prompt, llm=chat, verbose=True, memory=memory)\n', (2496, 2550), False, 'from langchain.chains import ConversationChain\n'), ((1614, 1641), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1623, 1641), False, 'import os\n')] |
# Copyright 2023 Lei Zhang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List
from langchain.agents import initialize_agent, AgentType
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import TextLoader, WebBaseLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.tools import Tool
from langchain.vectorstores import Chroma
from langchain_plantuml import diagram
from langchain_plantuml.core.plantuml_callback_handler import (
BasePlantUMLCallbackHandler,
)
from dotenv import load_dotenv
load_dotenv()
# Define an Agent
class MyAgent:
def __init__(self):
llm = ChatOpenAI(model_name="gpt-3.5-turbo-0613")
"""Create the state_of_union Vectorstore"""
current_path = os.path.abspath(os.path.dirname(__file__))
doc_path = os.path.join(current_path, "state_of_the_union.txt")
loader = TextLoader(doc_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(
texts, embeddings, collection_name="state-of-union"
)
state_of_union = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=docsearch.as_retriever()
)
"""Create the ruff Vectorstore"""
loader = WebBaseLoader("https://beta.ruff.rs/docs/faq/")
docs = loader.load()
ruff_texts = text_splitter.split_documents(docs)
ruff_db = Chroma.from_documents(ruff_texts, embeddings, collection_name="ruff")
ruff = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=ruff_db.as_retriever()
)
"""Create the Agent"""
tools = [
Tool(
name="State of Union QA System",
func=state_of_union.run,
description="useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.",
),
Tool(
name="Ruff QA System",
func=ruff.run,
description="useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.",
),
]
self.agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION
)
def run(self, question: str, callbacks: List[BasePlantUMLCallbackHandler]):
self.agent.run(question, callbacks=callbacks)
# Run the Agent
agent = MyAgent()
activity_diagram = diagram.activity_diagram_callback(note_max_length=2000)
sequence_diagram = diagram.sequence_diagram_callback(note_max_length=2000)
question = "What did biden say about ketanji brown jackson in the state of the union address?"
try:
agent.run(question=question, callbacks=[activity_diagram, sequence_diagram])
finally:
activity_diagram.save_uml_content("example_2_activity-plantuml.puml")
sequence_diagram.save_uml_content("example_2_sequence-plantuml.puml")
| [
"langchain_plantuml.diagram.sequence_diagram_callback",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.document_loaders.WebBaseLoader",
"langchain.agents.initialize_agent",
"langchain.chat_models.ChatOpenAI",
"langchain_plantuml.diagram.activity_diagram_callback",
"langchain.vectorstores.Chroma.from_documents",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.document_loaders.TextLoader",
"langchain.tools.Tool"
] | [((1171, 1184), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1182, 1184), False, 'from dotenv import load_dotenv\n'), ((3316, 3371), 'langchain_plantuml.diagram.activity_diagram_callback', 'diagram.activity_diagram_callback', ([], {'note_max_length': '(2000)'}), '(note_max_length=2000)\n', (3349, 3371), False, 'from langchain_plantuml import diagram\n'), ((3391, 3446), 'langchain_plantuml.diagram.sequence_diagram_callback', 'diagram.sequence_diagram_callback', ([], {'note_max_length': '(2000)'}), '(note_max_length=2000)\n', (3424, 3446), False, 'from langchain_plantuml import diagram\n'), ((1258, 1301), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo-0613"""'}), "(model_name='gpt-3.5-turbo-0613')\n", (1268, 1301), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1440, 1492), 'os.path.join', 'os.path.join', (['current_path', '"""state_of_the_union.txt"""'], {}), "(current_path, 'state_of_the_union.txt')\n", (1452, 1492), False, 'import os\n'), ((1510, 1530), 'langchain.document_loaders.TextLoader', 'TextLoader', (['doc_path'], {}), '(doc_path)\n', (1520, 1530), False, 'from langchain.document_loaders import TextLoader, WebBaseLoader\n'), ((1589, 1644), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (1610, 1644), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1723, 1741), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1739, 1741), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1762, 1836), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['texts', 'embeddings'], {'collection_name': '"""state-of-union"""'}), "(texts, embeddings, collection_name='state-of-union')\n", (1783, 1836), False, 'from langchain.vectorstores import Chroma\n'), ((2059, 2106), 'langchain.document_loaders.WebBaseLoader', 'WebBaseLoader', (['"""https://beta.ruff.rs/docs/faq/"""'], {}), "('https://beta.ruff.rs/docs/faq/')\n", (2072, 2106), False, 'from langchain.document_loaders import TextLoader, WebBaseLoader\n'), ((2211, 2280), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['ruff_texts', 'embeddings'], {'collection_name': '"""ruff"""'}), "(ruff_texts, embeddings, collection_name='ruff')\n", (2232, 2280), False, 'from langchain.vectorstores import Chroma\n'), ((3030, 3103), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.ZERO_SHOT_REACT_DESCRIPTION'}), '(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)\n', (3046, 3103), False, 'from langchain.agents import initialize_agent, AgentType\n'), ((1394, 1419), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1409, 1419), False, 'import os\n'), ((2471, 2694), 'langchain.tools.Tool', 'Tool', ([], {'name': '"""State of Union QA System"""', 'func': 'state_of_union.run', 'description': '"""useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question."""'}), "(name='State of Union QA System', func=state_of_union.run, description=\n 'useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.'\n )\n", (2475, 2694), False, 'from langchain.tools import Tool\n'), ((2761, 2944), 'langchain.tools.Tool', 'Tool', ([], {'name': '"""Ruff QA System"""', 'func': 'ruff.run', 'description': '"""useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question."""'}), "(name='Ruff QA System', func=ruff.run, description=\n 'useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.'\n )\n", (2765, 2944), False, 'from langchain.tools import Tool\n')] |
from __future__ import annotations
from typing import Any, TypeVar
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT
T = TypeVar("T")
class OutputFixingParser(BaseOutputParser[T]):
"""Wraps a parser and tries to fix parsing errors."""
@classmethod
def is_lc_serializable(cls) -> bool:
return True
parser: BaseOutputParser[T]
"""The parser to use to parse the output."""
# Should be an LLMChain but we want to avoid top-level imports from langchain.chains
retry_chain: Any
"""The LLMChain to use to retry the completion."""
max_retries: int = 1
"""The maximum number of times to retry the parse."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_FIX_PROMPT,
max_retries: int = 1,
) -> OutputFixingParser[T]:
"""Create an OutputFixingParser from a language model and a parser.
Args:
llm: llm to use for fixing
parser: parser to use for parsing
prompt: prompt to use for fixing
max_retries: Maximum number of retries to parse.
Returns:
OutputFixingParser
"""
from langchain.chains.llm import LLMChain
chain = LLMChain(llm=llm, prompt=prompt)
return cls(parser=parser, retry_chain=chain, max_retries=max_retries)
def parse(self, completion: str) -> T:
retries = 0
while retries <= self.max_retries:
try:
return self.parser.parse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = self.retry_chain.run(
instructions=self.parser.get_format_instructions(),
completion=completion,
error=repr(e),
)
raise OutputParserException("Failed to parse")
async def aparse(self, completion: str) -> T:
retries = 0
while retries <= self.max_retries:
try:
return await self.parser.aparse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = await self.retry_chain.arun(
instructions=self.parser.get_format_instructions(),
completion=completion,
error=repr(e),
)
raise OutputParserException("Failed to parse")
def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
@property
def _type(self) -> str:
return "output_fixing"
| [
"langchain_core.exceptions.OutputParserException",
"langchain.chains.llm.LLMChain"
] | [((371, 383), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (378, 383), False, 'from typing import Any, TypeVar\n'), ((1545, 1577), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1553, 1577), False, 'from langchain.chains.llm import LLMChain\n'), ((2266, 2306), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2287, 2306), False, 'from langchain_core.exceptions import OutputParserException\n'), ((2938, 2978), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2959, 2978), False, 'from langchain_core.exceptions import OutputParserException\n')] |
import logging
import os
import nextcord # add this
import openai
from langchain import OpenAI
from langchain.chains.summarize import load_summarize_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from nextcord.ext import commands
from pytube import YouTube
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
def setup(bot: commands.Bot):
bot.add_cog(SummaryCog(bot)) # please put this on bottom lol
def progress_func(chunk=None, file_handle=None, remaining=None):
"""progress call back function for the Summarize function"""
logger.info("progressing...")
def complete_func(self, path):
"""complete callback function for the Summarize function"""
logger.info("complete")
logger.info(self)
logger.info(path)
async def download_yt_file(link):
yt = YouTube(
link,
on_progress_callback=progress_func,
on_complete_callback=complete_func,
use_oauth=True,
allow_oauth_cache=True,
)
logger.info("Processing: " + yt.title)
stream = yt.streams.filter(only_audio=True).last()
try:
ytFile = stream.download(os.getenv("SAVE_PATH"))
logger.info(f"Processing complete. saving to path {ytFile}")
except Exception as e:
ytFile = None
logger.info(f"Error processing {e}")
return ytFile
class SummaryCog(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
self.is_busy = False
# this is the name # this is the description
@nextcord.slash_command(name="summary", description="Summarize a video") # remove commands.commands and add nextcord.slash_command
async def get_summary(self, interaction: nextcord.Interaction, link): # remove ctx and add interaction: nextcord.Interaction
ytFile = await download_yt_file(link)
# IN THE WHOLE FILE FIX CTX TO INTERACTION, ANY CTX.AUTHOR TO INTERACTION.USER, AND CTX.SEND TO INTERACTION.REPLY (OR INTERACTION.SEND) DEPENDING ON THE CONTEXT
# DONT USE ALL CAPS, JUST FOR SHOWING YOU WHAT TO CHANGE
audio_file = open(ytFile, "rb") #
transcript = openai.Audio.transcribe("whisper-1", audio_file)
logger.info(transcript)
prompt = f"Write a Title for the transcript that is under 15 words. " \
f"Then write: '--Summary--' " \
f"Write 'Summary' as a Heading " \
f"1. Write a summary of the provided transcript. " \
f"Then write: '--Additional Info--'. " \
f"Then return a list of the main points in the provided transcript. " \
f"Then return a list of action items. " \
f"Then return a list of follow up questions. " \
f"Then return a list of potential arguments against the transcript." \
f"For each list, return a Heading 2 before writing the list items. " \
f"Limit each list item to 200 words, and return no more than 20 points per list. " \
f"Transcript: "
llm = OpenAI(temperature=0, openai_api_key=os.getenv("OPENAI_API_KEY"))
num_tokens = llm.get_num_tokens(transcript)
await interaction.send(f"Number of Tokens in transcript: {num_tokens}")
logger.info(f"Number of Tokens in transcript: {num_tokens}")
text_splitter = RecursiveCharacterTextSplitter(separators=["\n\n", "\n"], chunk_size=10000, chunk_overlap=500)
docs = text_splitter.create_documents([prompt, transcript])
summary_chain = load_summarize_chain(llm=llm, chain_type='map_reduce', verbose=True)
output = summary_chain.run(docs)
await interaction.send(output)
return output
def setup(bot: commands.Bot):
bot.add_cog(SummaryCog(bot)) | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.chains.summarize.load_summarize_chain"
] | [((286, 393), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (305, 393), False, 'import logging\n'), ((404, 431), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (421, 431), False, 'import logging\n'), ((909, 1039), 'pytube.YouTube', 'YouTube', (['link'], {'on_progress_callback': 'progress_func', 'on_complete_callback': 'complete_func', 'use_oauth': '(True)', 'allow_oauth_cache': '(True)'}), '(link, on_progress_callback=progress_func, on_complete_callback=\n complete_func, use_oauth=True, allow_oauth_cache=True)\n', (916, 1039), False, 'from pytube import YouTube\n'), ((1647, 1718), 'nextcord.slash_command', 'nextcord.slash_command', ([], {'name': '"""summary"""', 'description': '"""Summarize a video"""'}), "(name='summary', description='Summarize a video')\n", (1669, 1718), False, 'import nextcord\n'), ((2235, 2283), 'openai.Audio.transcribe', 'openai.Audio.transcribe', (['"""whisper-1"""', 'audio_file'], {}), "('whisper-1', audio_file)\n", (2258, 2283), False, 'import openai\n'), ((3458, 3556), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'separators': "['\\n\\n', '\\n']", 'chunk_size': '(10000)', 'chunk_overlap': '(500)'}), "(separators=['\\n\\n', '\\n'], chunk_size=10000,\n chunk_overlap=500)\n", (3488, 3556), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((3645, 3713), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', ([], {'llm': 'llm', 'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(llm=llm, chain_type='map_reduce', verbose=True)\n", (3665, 3713), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((1223, 1245), 'os.getenv', 'os.getenv', (['"""SAVE_PATH"""'], {}), "('SAVE_PATH')\n", (1232, 1245), False, 'import os\n'), ((3204, 3231), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (3213, 3231), False, 'import os\n')] |
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_env
from langchain.vectorstores.base import VectorStore
if TYPE_CHECKING:
from meilisearch import Client
def _create_client(
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Client:
try:
import meilisearch
except ImportError:
raise ImportError(
"Could not import meilisearch python package. "
"Please install it with `pip install meilisearch`."
)
if not client:
url = url or get_from_env("url", "MEILI_HTTP_ADDR")
try:
api_key = api_key or get_from_env("api_key", "MEILI_MASTER_KEY")
except Exception:
pass
client = meilisearch.Client(url=url, api_key=api_key)
elif not isinstance(client, meilisearch.Client):
raise ValueError(
f"client should be an instance of meilisearch.Client, "
f"got {type(client)}"
)
try:
client.version()
except ValueError as e:
raise ValueError(f"Failed to connect to Meilisearch: {e}")
return client
class Meilisearch(VectorStore):
"""`Meilisearch` vector store.
To use this, you need to have `meilisearch` python package installed,
and a running Meilisearch instance.
To learn more about Meilisearch Python, refer to the in-depth
Meilisearch Python documentation: https://meilisearch.github.io/meilisearch-python/.
See the following documentation for how to run a Meilisearch instance:
https://www.meilisearch.com/docs/learn/getting_started/quick_start.
Example:
.. code-block:: python
from langchain.vectorstores import Meilisearch
from langchain.embeddings.openai import OpenAIEmbeddings
import meilisearch
# api_key is optional; provide it if your meilisearch instance requires it
client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***')
embeddings = OpenAIEmbeddings()
vectorstore = Meilisearch(
embedding=embeddings,
client=client,
index_name='langchain_demo',
text_key='text')
"""
def __init__(
self,
embedding: Embeddings,
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
index_name: str = "langchain-demo",
text_key: str = "text",
metadata_key: str = "metadata",
):
"""Initialize with Meilisearch client."""
client = _create_client(client=client, url=url, api_key=api_key)
self._client = client
self._index_name = index_name
self._embedding = embedding
self._text_key = text_key
self._metadata_key = metadata_key
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embedding and add them to the vector store.
Args:
texts (Iterable[str]): Iterable of strings/text to add to the vectorstore.
metadatas (Optional[List[dict]]): Optional list of metadata.
Defaults to None.
ids Optional[List[str]]: Optional list of IDs.
Defaults to None.
Returns:
List[str]: List of IDs of the texts added to the vectorstore.
"""
texts = list(texts)
# Embed and create the documents
docs = []
if ids is None:
ids = [uuid.uuid4().hex for _ in texts]
if metadatas is None:
metadatas = [{} for _ in texts]
embedding_vectors = self._embedding.embed_documents(texts)
for i, text in enumerate(texts):
id = ids[i]
metadata = metadatas[i]
metadata[self._text_key] = text
embedding = embedding_vectors[i]
docs.append(
{
"id": id,
"_vectors": embedding,
f"{self._metadata_key}": metadata,
}
)
# Send to Meilisearch
self._client.index(str(self._index_name)).add_documents(docs)
return ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return meilisearch documents most similar to the query.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each.
"""
docs_and_scores = self.similarity_search_with_score(
query=query,
k=k,
filter=filter,
kwargs=kwargs,
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return meilisearch documents most similar to the query, along with scores.
Args:
query (str): Query text for which to find similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
text and score for each.
"""
_query = self._embedding.embed_query(query)
docs = self.similarity_search_by_vector_with_scores(
embedding=_query,
k=k,
filter=filter,
kwargs=kwargs,
)
return docs
def similarity_search_by_vector_with_scores(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return meilisearch documents most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
vector and score for each.
"""
docs = []
results = self._client.index(str(self._index_name)).search(
"", {"vector": embedding, "limit": k, "filter": filter}
)
for result in results["hits"]:
metadata = result[self._metadata_key]
if self._text_key in metadata:
text = metadata.pop(self._text_key)
semantic_score = result["_semanticScore"]
docs.append(
(Document(page_content=text, metadata=metadata), semantic_score)
)
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return meilisearch documents most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up similar documents.
k (int): Number of documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata.
Defaults to None.
Returns:
List[Document]: List of Documents most similar to the query
vector and score for each.
"""
docs = self.similarity_search_by_vector_with_scores(
embedding=embedding,
k=k,
filter=filter,
kwargs=kwargs,
)
return [doc for doc, _ in docs]
@classmethod
def from_texts(
cls: Type[Meilisearch],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
client: Optional[Client] = None,
url: Optional[str] = None,
api_key: Optional[str] = None,
index_name: str = "langchain-demo",
ids: Optional[List[str]] = None,
text_key: Optional[str] = "text",
metadata_key: Optional[str] = "metadata",
**kwargs: Any,
) -> Meilisearch:
"""Construct Meilisearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Meilisearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Meilisearch
from langchain.embeddings import OpenAIEmbeddings
import meilisearch
# The environment should be the one specified next to the API key
# in your Meilisearch console
client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***')
embeddings = OpenAIEmbeddings()
docsearch = Meilisearch.from_texts(
client=client,
embeddings=embeddings,
)
"""
client = _create_client(client=client, url=url, api_key=api_key)
vectorstore = cls(
embedding=embedding,
client=client,
index_name=index_name,
)
vectorstore.add_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
text_key=text_key,
metadata_key=metadata_key,
)
return vectorstore
| [
"langchain.docstore.document.Document",
"langchain.utils.get_from_env"
] | [((965, 1009), 'meilisearch.Client', 'meilisearch.Client', ([], {'url': 'url', 'api_key': 'api_key'}), '(url=url, api_key=api_key)\n', (983, 1009), False, 'import meilisearch\n'), ((776, 814), 'langchain.utils.get_from_env', 'get_from_env', (['"""url"""', '"""MEILI_HTTP_ADDR"""'], {}), "('url', 'MEILI_HTTP_ADDR')\n", (788, 814), False, 'from langchain.utils import get_from_env\n'), ((861, 904), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""MEILI_MASTER_KEY"""'], {}), "('api_key', 'MEILI_MASTER_KEY')\n", (873, 904), False, 'from langchain.utils import get_from_env\n'), ((3872, 3884), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3882, 3884), False, 'import uuid\n'), ((7512, 7558), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (7520, 7558), False, 'from langchain.docstore.document import Document\n')] |
# Author: Yiannis Charalambous
from langchain.base_language import BaseLanguageModel
from langchain.schema import AIMessage, BaseMessage, HumanMessage
from esbmc_ai.config import ChatPromptSettings
from .base_chat_interface import BaseChatInterface, ChatResponse
from .ai_models import AIModel
class OptimizeCode(BaseChatInterface):
initial_message: str
def __init__(
self,
ai_model_agent: ChatPromptSettings,
initial_message: str,
ai_model: AIModel,
llm: BaseLanguageModel,
) -> None:
super().__init__(ai_model_agent=ai_model_agent, ai_model=ai_model, llm=llm)
self.initial_message = initial_message
def optimize_function(self, source_code: str, function_name: str) -> ChatResponse:
self.messages = []
self.push_to_message_stack(
HumanMessage(
content=f"Reply OK if you understand the following is the source code to optimize:\n\n{source_code}"
)
)
self.push_to_message_stack(AIMessage(content="OK."))
expanded_initial_message: str = self.initial_message.replace(
"%s", function_name
)
return self.send_message(expanded_initial_message)
| [
"langchain.schema.AIMessage",
"langchain.schema.HumanMessage"
] | [((838, 964), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'f"""Reply OK if you understand the following is the source code to optimize:\n\n{source_code}"""'}), '(content=\n f"""Reply OK if you understand the following is the source code to optimize:\n\n{source_code}"""\n )\n', (850, 964), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage\n'), ((1028, 1052), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': '"""OK."""'}), "(content='OK.')\n", (1037, 1052), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage\n')] |
import os
from typing import Any, Optional
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from pydantic import Extra
import registry
import streaming
from .base import BaseTool, BASE_TOOL_DESCRIPTION_TEMPLATE
current_dir = os.path.dirname(__file__)
project_root = os.path.join(current_dir, '../')
usage_guide_path = os.path.join(project_root, 'usage_guide.md')
with open(usage_guide_path, 'r') as f:
USAGE_GUIDE = f.read()
TEMPLATE = f'''You are an expert Web3 assistant called Cacti. You help users interact with Web3 ecosystem, such as with DeFi, NFTs, ENS, etc., by analyzing their query and providing an appropriate action in your response.
# INSTRUCTIONS
- You have access to the Markdown-formatted usage guide for this chat app below which contains some example prompts to assist users in using the app.
- Always use the usage guide to answer the user's question about the app and provide the example prompts from the guide for the suggested actions
- Do not make up any information or prompts, only use those provided in the usage guide.
- Always include the link to the full usage guide in your final response - https://github.com/yieldprotocol/cacti-backend/blob/master/usage_guide.md
- The final response should be in markdown format.
# USAGE GUIDE
{USAGE_GUIDE}
---
User: {{question}}
Assistant:'''
@registry.register_class
class AppUsageGuideTool(BaseTool):
_chain: LLMChain
class Config:
"""Configuration for this pydantic object."""
extra = Extra.allow
def __init__(
self,
*args,
**kwargs
) -> None:
prompt = PromptTemplate(
input_variables=["question"],
template=TEMPLATE,
)
new_token_handler = kwargs.get('new_token_handler')
chain = streaming.get_streaming_chain(prompt, new_token_handler)
description=BASE_TOOL_DESCRIPTION_TEMPLATE.format(
tool_description="answer questions about the chat assistant app, what it can do, how to interact with it",
input_description="a standalone query with all relevant contextual details pertaining to the chat web application",
output_description="an answer to the question, with suggested follow-up questions if available",
)
super().__init__(
*args,
_chain=chain,
description=description,
**kwargs
)
def _run(self, query: str) -> str:
example = {
"question": query,
"stop": "User",
}
result = self._chain.run(example)
return result.strip()
async def _arun(self, query: str) -> str:
raise NotImplementedError(f"{self.__class__.__name__} does not support async") | [
"langchain.prompts.PromptTemplate"
] | [((262, 287), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (277, 287), False, 'import os\n'), ((303, 335), 'os.path.join', 'os.path.join', (['current_dir', '"""../"""'], {}), "(current_dir, '../')\n", (315, 335), False, 'import os\n'), ((355, 399), 'os.path.join', 'os.path.join', (['project_root', '"""usage_guide.md"""'], {}), "(project_root, 'usage_guide.md')\n", (367, 399), False, 'import os\n'), ((1650, 1713), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': 'TEMPLATE'}), "(input_variables=['question'], template=TEMPLATE)\n", (1664, 1713), False, 'from langchain.prompts import PromptTemplate\n'), ((1825, 1881), 'streaming.get_streaming_chain', 'streaming.get_streaming_chain', (['prompt', 'new_token_handler'], {}), '(prompt, new_token_handler)\n', (1854, 1881), False, 'import streaming\n')] |
from langchain.utilities import WikipediaAPIWrapper
def wikipedia_function(topic):
"""
Runs a query on the Wikipedia API.
Args:
topic (str): The topic to query.
Returns:
dict: The result of the query.
Examples:
>>> wikipedia_function('Python')
{'title': 'Python', 'summary': 'Python is a programming language...'}
"""
wikipedia = WikipediaAPIWrapper()
result = wikipedia.run(topic)
return result | [
"langchain.utilities.WikipediaAPIWrapper"
] | [((383, 404), 'langchain.utilities.WikipediaAPIWrapper', 'WikipediaAPIWrapper', ([], {}), '()\n', (402, 404), False, 'from langchain.utilities import WikipediaAPIWrapper\n')] |
import streamlit as st
import datetime
import os
import psycopg2
from dotenv import load_dotenv
from langchain.prompts import PromptTemplate
from langchain.docstore.document import Document
def log(message):
current_time = datetime.datetime.now()
milliseconds = current_time.microsecond // 1000
timestamp = current_time.strftime(
"[%Y-%m-%d %H:%M:%S.{:03d}] ".format(milliseconds)
)
st.text(timestamp + message)
def check_input(question: str):
if question == "":
raise Exception("Please enter a question.")
else:
pass
_postgres_prompt = """\
You are a PostgreSQL expert. Given an input question, create a syntactically correct PostgreSQL query to run and return it as the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per PostgreSQL.
Never query for all columns from a table. You must query only the columns that are needed to answer the question.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Create meaningful aliases for the columns. For example, if the column name is products_sold.count, you should it as total_sold_products.
Note that the columns with (member_type: measure) are numeric columns and the ones with (member_type: dimension) are string columns.
You should include at least one column with (member_type: measure) in your query.
There are two types of queries supported against cube tables: aggregated and non-aggregated. Aggregated are those with GROUP BY statement, and non-aggregated are those without. Cube queries issued to your database will always be aggregated, and it doesn't matter if you provide GROUP BY in a query or not.
Whenever you use a non-aggregated query you need to provide only column names in SQL:
SELECT status, count FROM orders
The same aggregated query should always aggregate measure columns using a corresponding aggregating function or special MEASURE() function:
SELECT status, SUM(count) FROM orders GROUP BY 1
SELECT status, MEASURE(count) FROM orders GROUP BY 1
If you can't construct the query answer `{no_answer_text}`
Only use the following table: {table_info}
Only look among the following columns and pick the relevant ones:
{columns_info}
Question: {input_question}
"""
PROMPT_POSTFIX = """\
Return the answer as a JSON object with the following format:
{
"query": "",
"filters": [{"column": \"\", "operator": \"\", "value": "\"\"}]
}
"""
CUBE_SQL_API_PROMPT = PromptTemplate(
input_variables=[
"input_question",
"table_info",
"columns_info",
"top_k",
"no_answer_text",
],
template=_postgres_prompt,
)
_NO_ANSWER_TEXT = "I can't answer this question."
def call_sql_api(sql_query: str):
load_dotenv()
CONN_STR = os.environ["DATABASE_URL"]
# Initializing Cube SQL API connection)
connection = psycopg2.connect(CONN_STR)
cursor = connection.cursor()
cursor.execute(sql_query)
columns = [desc[0] for desc in cursor.description]
rows = cursor.fetchall()
cursor.close()
connection.close()
return columns, rows
def create_docs_from_values(columns_values, table_name, column_name):
value_docs = []
for column_value in columns_values:
print(column_value)
metadata = dict(
table_name=table_name,
column_name=column_name,
)
page_content = column_value
value_docs.append(Document(page_content=page_content, metadata=metadata))
return value_docs
| [
"langchain.docstore.document.Document",
"langchain.prompts.PromptTemplate"
] | [((2668, 2806), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['input_question', 'table_info', 'columns_info', 'top_k', 'no_answer_text']", 'template': '_postgres_prompt'}), "(input_variables=['input_question', 'table_info',\n 'columns_info', 'top_k', 'no_answer_text'], template=_postgres_prompt)\n", (2682, 2806), False, 'from langchain.prompts import PromptTemplate\n'), ((230, 253), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (251, 253), False, 'import datetime\n'), ((414, 442), 'streamlit.text', 'st.text', (['(timestamp + message)'], {}), '(timestamp + message)\n', (421, 442), True, 'import streamlit as st\n'), ((2952, 2965), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2963, 2965), False, 'from dotenv import load_dotenv\n'), ((3070, 3096), 'psycopg2.connect', 'psycopg2.connect', (['CONN_STR'], {}), '(CONN_STR)\n', (3086, 3096), False, 'import psycopg2\n'), ((3650, 3704), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'page_content', 'metadata': 'metadata'}), '(page_content=page_content, metadata=metadata)\n', (3658, 3704), False, 'from langchain.docstore.document import Document\n')] |
import os
import pandas as pd
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import mlflow
assert (
"OPENAI_API_KEY" in os.environ
), "Please set the OPENAI_API_KEY environment variable to run this example."
def build_and_evalute_model_with_prompt(prompt_template):
mlflow.start_run()
mlflow.log_param("prompt_template", prompt_template)
# Create a news summarization model using prompt engineering with LangChain. Log the model
# to MLflow Tracking
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(input_variables=["article"], template=prompt_template)
chain = LLMChain(llm=llm, prompt=prompt)
logged_model = mlflow.langchain.log_model(chain, artifact_path="model")
# Evaluate the model on a small sample dataset
sample_data = pd.read_csv("summarization_example_data.csv")
mlflow.evaluate(
model=logged_model.model_uri,
model_type="text-summarization",
data=sample_data,
targets="highlights",
)
mlflow.end_run()
prompt_template_1 = (
"Write a summary of the following article that is between triple backticks: ```{article}```"
)
print(f"Bulding and evaluating model with prompt: '{prompt_template_1}'")
build_and_evalute_model_with_prompt(prompt_template_1)
prompt_template_2 = (
"Write a summary of the following article that is between triple backticks. Be concise. Make"
" sure the summary includes important nouns and dates and keywords in the original text."
" Just return the summary. Do not include any text other than the summary: ```{article}```"
)
print(f"Building and evaluating model with prompt: '{prompt_template_2}'")
build_and_evalute_model_with_prompt(prompt_template_2)
# Load the evaluation results
results: pd.DataFrame = mlflow.load_table(
"eval_results_table.json", extra_columns=["run_id", "params.prompt_template"]
)
results_grouped_by_article = results.sort_values(by="id")
print("Evaluation results:")
print(results_grouped_by_article[["run_id", "params.prompt_template", "article", "outputs"]])
# Score the best model on a new article
new_article = """
Adnan Januzaj swapped the lush turf of Old Trafford for the green baize at Sheffield when he
turned up at the snooker World Championships on Wednesday. The Manchester United winger, who has
endured a frustrating season under Louis van Gaal, had turned out for the Under 21 side at Fulham
on Tuesday night amid reports he could be farmed out on loan next season. But Januzaj may want to
consider trying his hand at another sport after displaying his silky skillls on a mini pool table.
Adnan Januzaj (left) cheered on\xa0Shaun Murphy (right) at the World Championship in Sheffield.
Januzaj shows off his potting skills on a mini pool table at the Crucible on Wednesday.
The 20-year-old Belgium international was at the Crucible to cheer on his friend Shaun Murphy in
his quarter-final against Anthony McGill. The 2005 winner moved a step closer to an elusive second
title in Sheffield with a 13-8 victory, sealed with a 67 break. Three centuries in the match, and
the way he accelerated away from 6-6, showed Murphy is a man to fear, and next for him will be
Neil Robertson or Barry Hawkins. Januzaj turned out for Under 21s in the 4-1 victory at Fulham on
Tuesday night.
"""
print(
f"Scoring the model with prompt '{prompt_template_2}' on the article '{new_article[:70] + '...'}'"
)
best_model = mlflow.pyfunc.load_model(f"runs:/{mlflow.last_active_run().info.run_id}/model")
summary = best_model.predict({"article": new_article})
print(f"Summary: {summary}")
| [
"langchain.llms.OpenAI",
"langchain.prompts.PromptTemplate",
"langchain.chains.LLMChain"
] | [((1832, 1932), 'mlflow.load_table', 'mlflow.load_table', (['"""eval_results_table.json"""'], {'extra_columns': "['run_id', 'params.prompt_template']"}), "('eval_results_table.json', extra_columns=['run_id',\n 'params.prompt_template'])\n", (1849, 1932), False, 'import mlflow\n'), ((349, 367), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (365, 367), False, 'import mlflow\n'), ((372, 424), 'mlflow.log_param', 'mlflow.log_param', (['"""prompt_template"""', 'prompt_template'], {}), "('prompt_template', prompt_template)\n", (388, 424), False, 'import mlflow\n'), ((555, 578), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (561, 578), False, 'from langchain.llms import OpenAI\n'), ((592, 661), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['article']", 'template': 'prompt_template'}), "(input_variables=['article'], template=prompt_template)\n", (606, 661), False, 'from langchain.prompts import PromptTemplate\n'), ((674, 706), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (682, 706), False, 'from langchain.chains import LLMChain\n'), ((726, 782), 'mlflow.langchain.log_model', 'mlflow.langchain.log_model', (['chain'], {'artifact_path': '"""model"""'}), "(chain, artifact_path='model')\n", (752, 782), False, 'import mlflow\n'), ((853, 898), 'pandas.read_csv', 'pd.read_csv', (['"""summarization_example_data.csv"""'], {}), "('summarization_example_data.csv')\n", (864, 898), True, 'import pandas as pd\n'), ((903, 1026), 'mlflow.evaluate', 'mlflow.evaluate', ([], {'model': 'logged_model.model_uri', 'model_type': '"""text-summarization"""', 'data': 'sample_data', 'targets': '"""highlights"""'}), "(model=logged_model.model_uri, model_type=\n 'text-summarization', data=sample_data, targets='highlights')\n", (918, 1026), False, 'import mlflow\n'), ((1065, 1081), 'mlflow.end_run', 'mlflow.end_run', ([], {}), '()\n', (1079, 1081), False, 'import mlflow\n'), ((3510, 3534), 'mlflow.last_active_run', 'mlflow.last_active_run', ([], {}), '()\n', (3532, 3534), False, 'import mlflow\n')] |
import os
import pandas as pd
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import mlflow
assert (
"OPENAI_API_KEY" in os.environ
), "Please set the OPENAI_API_KEY environment variable to run this example."
def build_and_evalute_model_with_prompt(prompt_template):
mlflow.start_run()
mlflow.log_param("prompt_template", prompt_template)
# Create a news summarization model using prompt engineering with LangChain. Log the model
# to MLflow Tracking
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(input_variables=["article"], template=prompt_template)
chain = LLMChain(llm=llm, prompt=prompt)
logged_model = mlflow.langchain.log_model(chain, artifact_path="model")
# Evaluate the model on a small sample dataset
sample_data = pd.read_csv("summarization_example_data.csv")
mlflow.evaluate(
model=logged_model.model_uri,
model_type="text-summarization",
data=sample_data,
targets="highlights",
)
mlflow.end_run()
prompt_template_1 = (
"Write a summary of the following article that is between triple backticks: ```{article}```"
)
print(f"Bulding and evaluating model with prompt: '{prompt_template_1}'")
build_and_evalute_model_with_prompt(prompt_template_1)
prompt_template_2 = (
"Write a summary of the following article that is between triple backticks. Be concise. Make"
" sure the summary includes important nouns and dates and keywords in the original text."
" Just return the summary. Do not include any text other than the summary: ```{article}```"
)
print(f"Building and evaluating model with prompt: '{prompt_template_2}'")
build_and_evalute_model_with_prompt(prompt_template_2)
# Load the evaluation results
results: pd.DataFrame = mlflow.load_table(
"eval_results_table.json", extra_columns=["run_id", "params.prompt_template"]
)
results_grouped_by_article = results.sort_values(by="id")
print("Evaluation results:")
print(results_grouped_by_article[["run_id", "params.prompt_template", "article", "outputs"]])
# Score the best model on a new article
new_article = """
Adnan Januzaj swapped the lush turf of Old Trafford for the green baize at Sheffield when he
turned up at the snooker World Championships on Wednesday. The Manchester United winger, who has
endured a frustrating season under Louis van Gaal, had turned out for the Under 21 side at Fulham
on Tuesday night amid reports he could be farmed out on loan next season. But Januzaj may want to
consider trying his hand at another sport after displaying his silky skillls on a mini pool table.
Adnan Januzaj (left) cheered on\xa0Shaun Murphy (right) at the World Championship in Sheffield.
Januzaj shows off his potting skills on a mini pool table at the Crucible on Wednesday.
The 20-year-old Belgium international was at the Crucible to cheer on his friend Shaun Murphy in
his quarter-final against Anthony McGill. The 2005 winner moved a step closer to an elusive second
title in Sheffield with a 13-8 victory, sealed with a 67 break. Three centuries in the match, and
the way he accelerated away from 6-6, showed Murphy is a man to fear, and next for him will be
Neil Robertson or Barry Hawkins. Januzaj turned out for Under 21s in the 4-1 victory at Fulham on
Tuesday night.
"""
print(
f"Scoring the model with prompt '{prompt_template_2}' on the article '{new_article[:70] + '...'}'"
)
best_model = mlflow.pyfunc.load_model(f"runs:/{mlflow.last_active_run().info.run_id}/model")
summary = best_model.predict({"article": new_article})
print(f"Summary: {summary}")
| [
"langchain.llms.OpenAI",
"langchain.prompts.PromptTemplate",
"langchain.chains.LLMChain"
] | [((1832, 1932), 'mlflow.load_table', 'mlflow.load_table', (['"""eval_results_table.json"""'], {'extra_columns': "['run_id', 'params.prompt_template']"}), "('eval_results_table.json', extra_columns=['run_id',\n 'params.prompt_template'])\n", (1849, 1932), False, 'import mlflow\n'), ((349, 367), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (365, 367), False, 'import mlflow\n'), ((372, 424), 'mlflow.log_param', 'mlflow.log_param', (['"""prompt_template"""', 'prompt_template'], {}), "('prompt_template', prompt_template)\n", (388, 424), False, 'import mlflow\n'), ((555, 578), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (561, 578), False, 'from langchain.llms import OpenAI\n'), ((592, 661), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['article']", 'template': 'prompt_template'}), "(input_variables=['article'], template=prompt_template)\n", (606, 661), False, 'from langchain.prompts import PromptTemplate\n'), ((674, 706), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (682, 706), False, 'from langchain.chains import LLMChain\n'), ((726, 782), 'mlflow.langchain.log_model', 'mlflow.langchain.log_model', (['chain'], {'artifact_path': '"""model"""'}), "(chain, artifact_path='model')\n", (752, 782), False, 'import mlflow\n'), ((853, 898), 'pandas.read_csv', 'pd.read_csv', (['"""summarization_example_data.csv"""'], {}), "('summarization_example_data.csv')\n", (864, 898), True, 'import pandas as pd\n'), ((903, 1026), 'mlflow.evaluate', 'mlflow.evaluate', ([], {'model': 'logged_model.model_uri', 'model_type': '"""text-summarization"""', 'data': 'sample_data', 'targets': '"""highlights"""'}), "(model=logged_model.model_uri, model_type=\n 'text-summarization', data=sample_data, targets='highlights')\n", (918, 1026), False, 'import mlflow\n'), ((1065, 1081), 'mlflow.end_run', 'mlflow.end_run', ([], {}), '()\n', (1079, 1081), False, 'import mlflow\n'), ((3510, 3534), 'mlflow.last_active_run', 'mlflow.last_active_run', ([], {}), '()\n', (3532, 3534), False, 'import mlflow\n')] |
import os
import voyager.utils as U
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import HumanMessage, SystemMessage
from langchain.vectorstores import Chroma
from voyager.prompts import load_prompt
from voyager.control_primitives import load_control_primitives
class SkillManager:
def __init__(
self,
model_name="gpt-3.5-turbo",
temperature=0,
retrieval_top_k=5,
request_timout=120,
ckpt_dir="ckpt",
resume=False,
):
self.llm = ChatOpenAI(
model_name=model_name,
temperature=temperature,
request_timeout=request_timout,
)
U.f_mkdir(f"{ckpt_dir}/skill/code")
U.f_mkdir(f"{ckpt_dir}/skill/description")
U.f_mkdir(f"{ckpt_dir}/skill/vectordb")
# programs for env execution
self.control_primitives = load_control_primitives()
if resume:
print(f"\033[33mLoading Skill Manager from {ckpt_dir}/skill\033[0m")
self.skills = U.load_json(f"{ckpt_dir}/skill/skills.json")
else:
self.skills = {}
self.retrieval_top_k = retrieval_top_k
self.ckpt_dir = ckpt_dir
self.vectordb = Chroma(
collection_name="skill_vectordb",
embedding_function=OpenAIEmbeddings(),
persist_directory=f"{ckpt_dir}/skill/vectordb",
)
assert self.vectordb._collection.count() == len(self.skills), (
f"Skill Manager's vectordb is not synced with skills.json.\n"
f"There are {self.vectordb._collection.count()} skills in vectordb but {len(self.skills)} skills in skills.json.\n"
f"Did you set resume=False when initializing the manager?\n"
f"You may need to manually delete the vectordb directory for running from scratch."
)
@property
def programs(self):
programs = ""
for skill_name, entry in self.skills.items():
programs += f"{entry['code']}\n\n"
for primitives in self.control_primitives:
programs += f"{primitives}\n\n"
return programs
def add_new_skill(self, info):
if info["task"].startswith("Deposit useless items into the chest at"):
# No need to reuse the deposit skill
return
program_name = info["program_name"]
program_code = info["program_code"]
skill_description = self.generate_skill_description(program_name, program_code)
print(
f"\033[33mSkill Manager generated description for {program_name}:\n{skill_description}\033[0m"
)
if program_name in self.skills:
print(f"\033[33mSkill {program_name} already exists. Rewriting!\033[0m")
self.vectordb._collection.delete(ids=[program_name])
i = 2
while f"{program_name}V{i}.js" in os.listdir(f"{self.ckpt_dir}/skill/code"):
i += 1
dumped_program_name = f"{program_name}V{i}"
else:
dumped_program_name = program_name
self.vectordb.add_texts(
texts=[skill_description],
ids=[program_name],
metadatas=[{"name": program_name}],
)
self.skills[program_name] = {
"code": program_code,
"description": skill_description,
}
assert self.vectordb._collection.count() == len(
self.skills
), "vectordb is not synced with skills.json"
U.dump_text(
program_code, f"{self.ckpt_dir}/skill/code/{dumped_program_name}.js"
)
U.dump_text(
skill_description,
f"{self.ckpt_dir}/skill/description/{dumped_program_name}.txt",
)
U.dump_json(self.skills, f"{self.ckpt_dir}/skill/skills.json")
self.vectordb.persist()
def generate_skill_description(self, program_name, program_code):
messages = [
SystemMessage(content=load_prompt("skill")),
HumanMessage(
content=program_code
+ "\n\n"
+ f"The main function is `{program_name}`."
),
]
skill_description = f" // { self.llm(messages).content}"
return f"async function {program_name}(bot) {{\n{skill_description}\n}}"
def retrieve_skills(self, query):
k = min(self.vectordb._collection.count(), self.retrieval_top_k)
if k == 0:
return []
print(f"\033[33mSkill Manager retrieving for {k} skills\033[0m")
docs_and_scores = self.vectordb.similarity_search_with_score(query, k=k)
print(
f"\033[33mSkill Manager retrieved skills: "
f"{', '.join([doc.metadata['name'] for doc, _ in docs_and_scores])}\033[0m"
)
skills = []
for doc, _ in docs_and_scores:
skills.append(self.skills[doc.metadata["name"]]["code"])
return skills
| [
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.schema.HumanMessage",
"langchain.chat_models.ChatOpenAI"
] | [((583, 678), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'request_timeout': 'request_timout'}), '(model_name=model_name, temperature=temperature, request_timeout=\n request_timout)\n', (593, 678), False, 'from langchain.chat_models import ChatOpenAI\n'), ((729, 764), 'voyager.utils.f_mkdir', 'U.f_mkdir', (['f"""{ckpt_dir}/skill/code"""'], {}), "(f'{ckpt_dir}/skill/code')\n", (738, 764), True, 'import voyager.utils as U\n'), ((773, 815), 'voyager.utils.f_mkdir', 'U.f_mkdir', (['f"""{ckpt_dir}/skill/description"""'], {}), "(f'{ckpt_dir}/skill/description')\n", (782, 815), True, 'import voyager.utils as U\n'), ((824, 863), 'voyager.utils.f_mkdir', 'U.f_mkdir', (['f"""{ckpt_dir}/skill/vectordb"""'], {}), "(f'{ckpt_dir}/skill/vectordb')\n", (833, 863), True, 'import voyager.utils as U\n'), ((935, 960), 'voyager.control_primitives.load_control_primitives', 'load_control_primitives', ([], {}), '()\n', (958, 960), False, 'from voyager.control_primitives import load_control_primitives\n'), ((3548, 3633), 'voyager.utils.dump_text', 'U.dump_text', (['program_code', 'f"""{self.ckpt_dir}/skill/code/{dumped_program_name}.js"""'], {}), "(program_code,\n f'{self.ckpt_dir}/skill/code/{dumped_program_name}.js')\n", (3559, 3633), True, 'import voyager.utils as U\n'), ((3660, 3758), 'voyager.utils.dump_text', 'U.dump_text', (['skill_description', 'f"""{self.ckpt_dir}/skill/description/{dumped_program_name}.txt"""'], {}), "(skill_description,\n f'{self.ckpt_dir}/skill/description/{dumped_program_name}.txt')\n", (3671, 3758), True, 'import voyager.utils as U\n'), ((3798, 3860), 'voyager.utils.dump_json', 'U.dump_json', (['self.skills', 'f"""{self.ckpt_dir}/skill/skills.json"""'], {}), "(self.skills, f'{self.ckpt_dir}/skill/skills.json')\n", (3809, 3860), True, 'import voyager.utils as U\n'), ((1087, 1131), 'voyager.utils.load_json', 'U.load_json', (['f"""{ckpt_dir}/skill/skills.json"""'], {}), "(f'{ckpt_dir}/skill/skills.json')\n", (1098, 1131), True, 'import voyager.utils as U\n'), ((4054, 4145), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': "(program_code + '\\n\\n' + f'The main function is `{program_name}`.')"}), "(content=program_code + '\\n\\n' +\n f'The main function is `{program_name}`.')\n", (4066, 4145), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((1364, 1382), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1380, 1382), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2933, 2974), 'os.listdir', 'os.listdir', (['f"""{self.ckpt_dir}/skill/code"""'], {}), "(f'{self.ckpt_dir}/skill/code')\n", (2943, 2974), False, 'import os\n'), ((4019, 4039), 'voyager.prompts.load_prompt', 'load_prompt', (['"""skill"""'], {}), "('skill')\n", (4030, 4039), False, 'from voyager.prompts import load_prompt\n')] |
from langflow import CustomComponent
from langchain.agents import AgentExecutor, create_json_agent
from langflow.field_typing import (
BaseLanguageModel,
)
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
class JsonAgentComponent(CustomComponent):
display_name = "JsonAgent"
description = "Construct a json agent from an LLM and tools."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"toolkit": {"display_name": "Toolkit"},
}
def build(
self,
llm: BaseLanguageModel,
toolkit: JsonToolkit,
) -> AgentExecutor:
return create_json_agent(llm=llm, toolkit=toolkit)
| [
"langchain.agents.create_json_agent"
] | [((657, 700), 'langchain.agents.create_json_agent', 'create_json_agent', ([], {'llm': 'llm', 'toolkit': 'toolkit'}), '(llm=llm, toolkit=toolkit)\n', (674, 700), False, 'from langchain.agents import AgentExecutor, create_json_agent\n')] |
from langflow import CustomComponent
from langchain.agents import AgentExecutor, create_json_agent
from langflow.field_typing import (
BaseLanguageModel,
)
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
class JsonAgentComponent(CustomComponent):
display_name = "JsonAgent"
description = "Construct a json agent from an LLM and tools."
def build_config(self):
return {
"llm": {"display_name": "LLM"},
"toolkit": {"display_name": "Toolkit"},
}
def build(
self,
llm: BaseLanguageModel,
toolkit: JsonToolkit,
) -> AgentExecutor:
return create_json_agent(llm=llm, toolkit=toolkit)
| [
"langchain.agents.create_json_agent"
] | [((657, 700), 'langchain.agents.create_json_agent', 'create_json_agent', ([], {'llm': 'llm', 'toolkit': 'toolkit'}), '(llm=llm, toolkit=toolkit)\n', (674, 700), False, 'from langchain.agents import AgentExecutor, create_json_agent\n')] |
from typing import Annotated, List, Optional
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException, Query, Request
from fastapi.responses import StreamingResponse
from langchain.embeddings.ollama import OllamaEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from logger import get_logger
from middlewares.auth import AuthBearer, get_current_user
from models.settings import BrainSettings, get_supabase_client
from models.user_usage import UserUsage
from modules.brain.service.brain_service import BrainService
from modules.chat.controller.chat.brainful_chat import BrainfulChat
from modules.chat.dto.chats import ChatItem, ChatQuestion
from modules.chat.dto.inputs import (
ChatUpdatableProperties,
CreateChatProperties,
QuestionAndAnswer,
)
from modules.chat.entity.chat import Chat
from modules.chat.service.chat_service import ChatService
from modules.notification.service.notification_service import NotificationService
from modules.user.entity.user_identity import UserIdentity
from packages.utils.telemetry import send_telemetry
from vectorstore.supabase import CustomSupabaseVectorStore
logger = get_logger(__name__)
chat_router = APIRouter()
notification_service = NotificationService()
brain_service = BrainService()
chat_service = ChatService()
def init_vector_store(user_id: UUID) -> CustomSupabaseVectorStore:
"""
Initialize the vector store
"""
brain_settings = BrainSettings()
supabase_client = get_supabase_client()
embeddings = None
if brain_settings.ollama_api_base_url:
embeddings = OllamaEmbeddings(
base_url=brain_settings.ollama_api_base_url
) # pyright: ignore reportPrivateUsage=none
else:
embeddings = OpenAIEmbeddings()
vector_store = CustomSupabaseVectorStore(
supabase_client, embeddings, table_name="vectors", user_id=user_id
)
return vector_store
def get_answer_generator(
chat_id: UUID,
chat_question: ChatQuestion,
brain_id: UUID,
current_user: UserIdentity,
):
chat_instance = BrainfulChat()
chat_instance.validate_authorization(user_id=current_user.id, brain_id=brain_id)
user_usage = UserUsage(
id=current_user.id,
email=current_user.email,
)
vector_store = init_vector_store(user_id=current_user.id)
# Get History
history = chat_service.get_chat_history(chat_id)
# Generic
brain, metadata_brain = brain_service.find_brain_from_question(
brain_id, chat_question.question, current_user, chat_id, history, vector_store
)
send_telemetry("question_asked", {"model_name": brain.model})
gpt_answer_generator = chat_instance.get_answer_generator(
brain=brain,
chat_id=str(chat_id),
model=brain.model,
temperature=0.1,
streaming=True,
prompt_id=chat_question.prompt_id,
user_id=current_user.id,
user_email=current_user.email,
)
return gpt_answer_generator
@chat_router.get("/chat/healthz", tags=["Health"])
async def healthz():
return {"status": "ok"}
# get all chats
@chat_router.get("/chat", dependencies=[Depends(AuthBearer())], tags=["Chat"])
async def get_chats(current_user: UserIdentity = Depends(get_current_user)):
"""
Retrieve all chats for the current user.
- `current_user`: The current authenticated user.
- Returns a list of all chats for the user.
This endpoint retrieves all the chats associated with the current authenticated user. It returns a list of chat objects
containing the chat ID and chat name for each chat.
"""
chats = chat_service.get_user_chats(str(current_user.id))
return {"chats": chats}
# delete one chat
@chat_router.delete(
"/chat/{chat_id}", dependencies=[Depends(AuthBearer())], tags=["Chat"]
)
async def delete_chat(chat_id: UUID):
"""
Delete a specific chat by chat ID.
"""
notification_service.remove_chat_notifications(chat_id)
chat_service.delete_chat_from_db(chat_id)
return {"message": f"{chat_id} has been deleted."}
# update existing chat metadata
@chat_router.put(
"/chat/{chat_id}/metadata", dependencies=[Depends(AuthBearer())], tags=["Chat"]
)
async def update_chat_metadata_handler(
chat_data: ChatUpdatableProperties,
chat_id: UUID,
current_user: UserIdentity = Depends(get_current_user),
):
"""
Update chat attributes
"""
chat = chat_service.get_chat_by_id(
chat_id # pyright: ignore reportPrivateUsage=none
)
if str(current_user.id) != chat.user_id:
raise HTTPException(
status_code=403, # pyright: ignore reportPrivateUsage=none
detail="You should be the owner of the chat to update it.", # pyright: ignore reportPrivateUsage=none
)
return chat_service.update_chat(chat_id=chat_id, chat_data=chat_data)
# create new chat
@chat_router.post("/chat", dependencies=[Depends(AuthBearer())], tags=["Chat"])
async def create_chat_handler(
chat_data: CreateChatProperties,
current_user: UserIdentity = Depends(get_current_user),
):
"""
Create a new chat with initial chat messages.
"""
return chat_service.create_chat(user_id=current_user.id, chat_data=chat_data)
# add new question to chat
@chat_router.post(
"/chat/{chat_id}/question",
dependencies=[
Depends(
AuthBearer(),
),
],
tags=["Chat"],
)
async def create_question_handler(
request: Request,
chat_question: ChatQuestion,
chat_id: UUID,
brain_id: Annotated[UUID | None, Query()] = None,
current_user: UserIdentity = Depends(get_current_user),
):
try:
logger.info(
f"Creating question for chat {chat_id} with brain {brain_id} of type {type(brain_id)}"
)
gpt_answer_generator = get_answer_generator(
chat_id, chat_question, brain_id, current_user
)
chat_answer = gpt_answer_generator.generate_answer(
chat_id, chat_question, save_answer=True
)
return chat_answer
except HTTPException as e:
raise e
# stream new question response from chat
@chat_router.post(
"/chat/{chat_id}/question/stream",
dependencies=[
Depends(
AuthBearer(),
),
],
tags=["Chat"],
)
async def create_stream_question_handler(
request: Request,
chat_question: ChatQuestion,
chat_id: UUID,
brain_id: Annotated[UUID | None, Query()] = None,
current_user: UserIdentity = Depends(get_current_user),
) -> StreamingResponse:
chat_instance = BrainfulChat()
chat_instance.validate_authorization(user_id=current_user.id, brain_id=brain_id)
user_usage = UserUsage(
id=current_user.id,
email=current_user.email,
)
logger.info(
f"Creating question for chat {chat_id} with brain {brain_id} of type {type(brain_id)}"
)
gpt_answer_generator = get_answer_generator(
chat_id, chat_question, brain_id, current_user
)
try:
return StreamingResponse(
gpt_answer_generator.generate_stream(
chat_id, chat_question, save_answer=True
),
media_type="text/event-stream",
)
except HTTPException as e:
raise e
# get chat history
@chat_router.get(
"/chat/{chat_id}/history", dependencies=[Depends(AuthBearer())], tags=["Chat"]
)
async def get_chat_history_handler(
chat_id: UUID,
) -> List[ChatItem]:
# TODO: RBAC with current_user
return chat_service.get_chat_history_with_notifications(chat_id)
@chat_router.post(
"/chat/{chat_id}/question/answer",
dependencies=[Depends(AuthBearer())],
tags=["Chat"],
)
async def add_question_and_answer_handler(
chat_id: UUID,
question_and_answer: QuestionAndAnswer,
) -> Optional[Chat]:
"""
Add a new question and anwser to the chat.
"""
return chat_service.add_question_and_answer(chat_id, question_and_answer)
| [
"langchain.embeddings.ollama.OllamaEmbeddings",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((1158, 1178), 'logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (1168, 1178), False, 'from logger import get_logger\n'), ((1194, 1205), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (1203, 1205), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((1230, 1251), 'modules.notification.service.notification_service.NotificationService', 'NotificationService', ([], {}), '()\n', (1249, 1251), False, 'from modules.notification.service.notification_service import NotificationService\n'), ((1268, 1282), 'modules.brain.service.brain_service.BrainService', 'BrainService', ([], {}), '()\n', (1280, 1282), False, 'from modules.brain.service.brain_service import BrainService\n'), ((1298, 1311), 'modules.chat.service.chat_service.ChatService', 'ChatService', ([], {}), '()\n', (1309, 1311), False, 'from modules.chat.service.chat_service import ChatService\n'), ((1450, 1465), 'models.settings.BrainSettings', 'BrainSettings', ([], {}), '()\n', (1463, 1465), False, 'from models.settings import BrainSettings, get_supabase_client\n'), ((1488, 1509), 'models.settings.get_supabase_client', 'get_supabase_client', ([], {}), '()\n', (1507, 1509), False, 'from models.settings import BrainSettings, get_supabase_client\n'), ((1792, 1889), 'vectorstore.supabase.CustomSupabaseVectorStore', 'CustomSupabaseVectorStore', (['supabase_client', 'embeddings'], {'table_name': '"""vectors"""', 'user_id': 'user_id'}), "(supabase_client, embeddings, table_name='vectors',\n user_id=user_id)\n", (1817, 1889), False, 'from vectorstore.supabase import CustomSupabaseVectorStore\n'), ((2080, 2094), 'modules.chat.controller.chat.brainful_chat.BrainfulChat', 'BrainfulChat', ([], {}), '()\n', (2092, 2094), False, 'from modules.chat.controller.chat.brainful_chat import BrainfulChat\n'), ((2198, 2253), 'models.user_usage.UserUsage', 'UserUsage', ([], {'id': 'current_user.id', 'email': 'current_user.email'}), '(id=current_user.id, email=current_user.email)\n', (2207, 2253), False, 'from models.user_usage import UserUsage\n'), ((2593, 2654), 'packages.utils.telemetry.send_telemetry', 'send_telemetry', (['"""question_asked"""', "{'model_name': brain.model}"], {}), "('question_asked', {'model_name': brain.model})\n", (2607, 2654), False, 'from packages.utils.telemetry import send_telemetry\n'), ((3248, 3273), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (3255, 3273), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((4355, 4380), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (4362, 4380), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((5080, 5105), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (5087, 5105), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((5636, 5661), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (5643, 5661), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((6531, 6556), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (6538, 6556), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((6603, 6617), 'modules.chat.controller.chat.brainful_chat.BrainfulChat', 'BrainfulChat', ([], {}), '()\n', (6615, 6617), False, 'from modules.chat.controller.chat.brainful_chat import BrainfulChat\n'), ((6721, 6776), 'models.user_usage.UserUsage', 'UserUsage', ([], {'id': 'current_user.id', 'email': 'current_user.email'}), '(id=current_user.id, email=current_user.email)\n', (6730, 6776), False, 'from models.user_usage import UserUsage\n'), ((1596, 1657), 'langchain.embeddings.ollama.OllamaEmbeddings', 'OllamaEmbeddings', ([], {'base_url': 'brain_settings.ollama_api_base_url'}), '(base_url=brain_settings.ollama_api_base_url)\n', (1612, 1657), False, 'from langchain.embeddings.ollama import OllamaEmbeddings\n'), ((1754, 1772), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1770, 1772), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((4593, 4688), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(403)', 'detail': '"""You should be the owner of the chat to update it."""'}), "(status_code=403, detail=\n 'You should be the owner of the chat to update it.')\n", (4606, 4688), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((3168, 3180), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (3178, 3180), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((3797, 3809), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (3807, 3809), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((4191, 4203), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (4201, 4203), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((4948, 4960), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (4958, 4960), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((5586, 5593), 'fastapi.Query', 'Query', ([], {}), '()\n', (5591, 5593), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((5387, 5399), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (5397, 5399), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((6481, 6488), 'fastapi.Query', 'Query', ([], {}), '()\n', (6486, 6488), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((6275, 6287), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (6285, 6287), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((7390, 7402), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (7400, 7402), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((7688, 7700), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (7698, 7700), False, 'from middlewares.auth import AuthBearer, get_current_user\n')] |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This would makes sure Python is aware there is more than one sub-package within bigdl,
# physically located elsewhere.
# Otherwise there would be module not found error in non-pip's setting as Python would
# only search the first bigdl package and end up finding only one sub-package.
# This file is adapted from
# https://github.com/hwchase17/langchain/blob/master/langchain/llms/huggingface_pipeline.py
# The MIT License
# Copyright (c) Harrison Chase
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import importlib.util
import logging
from typing import Any, List, Mapping, Optional
from pydantic import Extra
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
DEFAULT_MODEL_ID = "gpt2"
DEFAULT_TASK = "text-generation"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class TransformersPipelineLLM(LLM):
"""Wrapper around the BigDL-LLM Transformer-INT4 model in Transformer.pipeline()
Example:
.. code-block:: python
from bigdl.llm.langchain.llms import TransformersPipelineLLM
llm = TransformersPipelineLLM.from_model_id(model_id="decapoda-research/llama-7b-hf")
"""
pipeline: Any #: :meta private:
model_id: str = DEFAULT_MODEL_ID
"""Model name or model path to use."""
model_kwargs: Optional[dict] = None
"""Key word arguments passed to the model."""
pipeline_kwargs: Optional[dict] = None
"""Key word arguments passed to the pipeline."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@classmethod
def from_model_id(
cls,
model_id: str,
task: str,
model_kwargs: Optional[dict] = None,
pipeline_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> LLM:
"""Construct the pipeline object from model_id and task."""
try:
from bigdl.llm.transformers import (
AutoModel,
AutoModelForCausalLM,
# AutoModelForSeq2SeqLM,
)
from transformers import AutoTokenizer, LlamaTokenizer
from transformers import pipeline as hf_pipeline
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
_model_kwargs = model_kwargs or {}
# TODO: may refactore this code in the future
try:
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
except:
tokenizer = LlamaTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == "text-generation":
model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True, **_model_kwargs)
elif task in ("text2text-generation", "summarization"):
# TODO: support this when related PR merged
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, load_in_4bit=True, **_model_kwargs)
else:
raise ValueError(
f"Got invalid task {task}, "
f"currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ValueError(
f"Could not load the {task} model due to missing dependencies."
) from e
if "trust_remote_code" in _model_kwargs:
_model_kwargs = {
k: v for k, v in _model_kwargs.items() if k != "trust_remote_code"
}
_pipeline_kwargs = pipeline_kwargs or {}
pipeline = hf_pipeline(
task=task,
model=model,
tokenizer=tokenizer,
device='cpu', # only cpu now
model_kwargs=_model_kwargs,
**_pipeline_kwargs,
)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
return cls(
pipeline=pipeline,
model_id=model_id,
model_kwargs=_model_kwargs,
pipeline_kwargs=_pipeline_kwargs,
**kwargs,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_id": self.model_id,
"model_kwargs": self.model_kwargs,
"pipeline_kwargs": self.pipeline_kwargs,
}
@property
def _llm_type(self) -> str:
return "BigDL-llm"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
response = self.pipeline(prompt)
if self.pipeline.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif self.pipeline.task == "text2text-generation":
text = response[0]["generated_text"]
elif self.pipeline.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens"
] | [((5354, 5476), 'transformers.pipeline', 'hf_pipeline', ([], {'task': 'task', 'model': 'model', 'tokenizer': 'tokenizer', 'device': '"""cpu"""', 'model_kwargs': '_model_kwargs'}), "(task=task, model=model, tokenizer=tokenizer, device='cpu',\n model_kwargs=_model_kwargs, **_pipeline_kwargs)\n", (5365, 5476), True, 'from transformers import pipeline as hf_pipeline\n'), ((4206, 4262), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (4235, 4262), False, 'from transformers import AutoTokenizer, LlamaTokenizer\n'), ((7329, 7360), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (7348, 7360), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((4303, 4360), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (4333, 4360), False, 'from transformers import AutoTokenizer, LlamaTokenizer\n'), ((4441, 4528), 'bigdl.llm.transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {'load_in_4bit': '(True)'}), '(model_id, load_in_4bit=True, **\n _model_kwargs)\n', (4477, 4528), False, 'from bigdl.llm.transformers import AutoModel, AutoModelForCausalLM\n')] |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This would makes sure Python is aware there is more than one sub-package within bigdl,
# physically located elsewhere.
# Otherwise there would be module not found error in non-pip's setting as Python would
# only search the first bigdl package and end up finding only one sub-package.
# This file is adapted from
# https://github.com/hwchase17/langchain/blob/master/langchain/llms/huggingface_pipeline.py
# The MIT License
# Copyright (c) Harrison Chase
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import importlib.util
import logging
from typing import Any, List, Mapping, Optional
from pydantic import Extra
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
DEFAULT_MODEL_ID = "gpt2"
DEFAULT_TASK = "text-generation"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class TransformersPipelineLLM(LLM):
"""Wrapper around the BigDL-LLM Transformer-INT4 model in Transformer.pipeline()
Example:
.. code-block:: python
from bigdl.llm.langchain.llms import TransformersPipelineLLM
llm = TransformersPipelineLLM.from_model_id(model_id="decapoda-research/llama-7b-hf")
"""
pipeline: Any #: :meta private:
model_id: str = DEFAULT_MODEL_ID
"""Model name or model path to use."""
model_kwargs: Optional[dict] = None
"""Key word arguments passed to the model."""
pipeline_kwargs: Optional[dict] = None
"""Key word arguments passed to the pipeline."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@classmethod
def from_model_id(
cls,
model_id: str,
task: str,
model_kwargs: Optional[dict] = None,
pipeline_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> LLM:
"""Construct the pipeline object from model_id and task."""
try:
from bigdl.llm.transformers import (
AutoModel,
AutoModelForCausalLM,
# AutoModelForSeq2SeqLM,
)
from transformers import AutoTokenizer, LlamaTokenizer
from transformers import pipeline as hf_pipeline
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"Please install it with `pip install transformers`."
)
_model_kwargs = model_kwargs or {}
# TODO: may refactore this code in the future
try:
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
except:
tokenizer = LlamaTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == "text-generation":
model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True, **_model_kwargs)
elif task in ("text2text-generation", "summarization"):
# TODO: support this when related PR merged
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, load_in_4bit=True, **_model_kwargs)
else:
raise ValueError(
f"Got invalid task {task}, "
f"currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ValueError(
f"Could not load the {task} model due to missing dependencies."
) from e
if "trust_remote_code" in _model_kwargs:
_model_kwargs = {
k: v for k, v in _model_kwargs.items() if k != "trust_remote_code"
}
_pipeline_kwargs = pipeline_kwargs or {}
pipeline = hf_pipeline(
task=task,
model=model,
tokenizer=tokenizer,
device='cpu', # only cpu now
model_kwargs=_model_kwargs,
**_pipeline_kwargs,
)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
return cls(
pipeline=pipeline,
model_id=model_id,
model_kwargs=_model_kwargs,
pipeline_kwargs=_pipeline_kwargs,
**kwargs,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model_id": self.model_id,
"model_kwargs": self.model_kwargs,
"pipeline_kwargs": self.pipeline_kwargs,
}
@property
def _llm_type(self) -> str:
return "BigDL-llm"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
response = self.pipeline(prompt)
if self.pipeline.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif self.pipeline.task == "text2text-generation":
text = response[0]["generated_text"]
elif self.pipeline.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens"
] | [((5354, 5476), 'transformers.pipeline', 'hf_pipeline', ([], {'task': 'task', 'model': 'model', 'tokenizer': 'tokenizer', 'device': '"""cpu"""', 'model_kwargs': '_model_kwargs'}), "(task=task, model=model, tokenizer=tokenizer, device='cpu',\n model_kwargs=_model_kwargs, **_pipeline_kwargs)\n", (5365, 5476), True, 'from transformers import pipeline as hf_pipeline\n'), ((4206, 4262), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (4235, 4262), False, 'from transformers import AutoTokenizer, LlamaTokenizer\n'), ((7329, 7360), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (7348, 7360), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((4303, 4360), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (4333, 4360), False, 'from transformers import AutoTokenizer, LlamaTokenizer\n'), ((4441, 4528), 'bigdl.llm.transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {'load_in_4bit': '(True)'}), '(model_id, load_in_4bit=True, **\n _model_kwargs)\n', (4477, 4528), False, 'from bigdl.llm.transformers import AutoModel, AutoModelForCausalLM\n')] |
from typing import AsyncGenerator, Optional, Tuple
from langchain import ConversationChain
import logging
from typing import Optional, Tuple
from pydantic.v1 import SecretStr
from vocode.streaming.agent.base_agent import RespondAgent
from vocode.streaming.agent.utils import get_sentence_from_buffer
from langchain import ConversationChain
from langchain.schema import ChatMessage, AIMessage, HumanMessage
from langchain_community.chat_models import ChatAnthropic
import logging
from vocode import getenv
from vocode.streaming.models.agent import ChatAnthropicAgentConfig
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
HumanMessagePromptTemplate,
)
from vocode import getenv
from vocode.streaming.models.agent import ChatAnthropicAgentConfig
from langchain.memory import ConversationBufferMemory
SENTENCE_ENDINGS = [".", "!", "?"]
class ChatAnthropicAgent(RespondAgent[ChatAnthropicAgentConfig]):
def __init__(
self,
agent_config: ChatAnthropicAgentConfig,
logger: Optional[logging.Logger] = None,
anthropic_api_key: Optional[SecretStr] = None,
):
super().__init__(agent_config=agent_config, logger=logger)
import anthropic
# Convert anthropic_api_key to SecretStr if it's not None and not already a SecretStr
if anthropic_api_key is not None and not isinstance(
anthropic_api_key, SecretStr
):
anthropic_api_key = SecretStr(anthropic_api_key)
else:
# Retrieve anthropic_api_key from environment variable and convert to SecretStr
env_key = getenv("ANTHROPIC_API_KEY")
if env_key:
anthropic_api_key = SecretStr(env_key)
if not anthropic_api_key:
raise ValueError(
"ANTHROPIC_API_KEY must be set in environment or passed in as a SecretStr"
)
self.prompt = ChatPromptTemplate.from_messages(
[
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}"),
]
)
self.llm = ChatAnthropic(
model_name=agent_config.model_name,
anthropic_api_key=anthropic_api_key,
)
# streaming not well supported by langchain, so we will connect directly
self.anthropic_client = (
anthropic.AsyncAnthropic(api_key=str(anthropic_api_key))
if agent_config.generate_responses
else None
)
self.memory = ConversationBufferMemory(return_messages=True)
self.memory.chat_memory.messages.append(
HumanMessage(content=self.agent_config.prompt_preamble)
)
if agent_config.initial_message:
self.memory.chat_memory.messages.append(
AIMessage(content=agent_config.initial_message.text)
)
self.conversation = ConversationChain(
memory=self.memory, prompt=self.prompt, llm=self.llm
)
async def respond(
self,
human_input,
conversation_id: str,
is_interrupt: bool = False,
) -> Tuple[str, bool]:
text = await self.conversation.apredict(input=human_input)
self.logger.debug(f"LLM response: {text}")
return text, False
async def generate_response(
self,
human_input,
conversation_id: str,
is_interrupt: bool = False,
) -> AsyncGenerator[Tuple[str, bool], None]:
self.memory.chat_memory.messages.append(HumanMessage(content=human_input))
bot_memory_message = AIMessage(content="")
self.memory.chat_memory.messages.append(bot_memory_message)
prompt = self.llm._convert_messages_to_prompt(self.memory.chat_memory.messages)
if self.anthropic_client:
streamed_response = await self.anthropic_client.completions.create(
prompt=prompt,
max_tokens_to_sample=self.agent_config.max_tokens_to_sample,
model=self.agent_config.model_name,
stream=True,
)
buffer = ""
async for completion in streamed_response:
buffer += completion.completion
sentence, remainder = get_sentence_from_buffer(buffer)
if sentence:
bot_memory_message.content = bot_memory_message.content + sentence
buffer = remainder
yield sentence, True
continue
def update_last_bot_message_on_cut_off(self, message: str):
for memory_message in self.memory.chat_memory.messages[::-1]:
if (
isinstance(memory_message, ChatMessage)
and memory_message.role == "assistant"
) or isinstance(memory_message, AIMessage):
memory_message.content = message
return
| [
"langchain.schema.AIMessage",
"langchain.ConversationChain",
"langchain.schema.HumanMessage",
"langchain_community.chat_models.ChatAnthropic",
"langchain.prompts.MessagesPlaceholder",
"langchain.memory.ConversationBufferMemory",
"langchain.prompts.HumanMessagePromptTemplate.from_template"
] | [((2147, 2238), 'langchain_community.chat_models.ChatAnthropic', 'ChatAnthropic', ([], {'model_name': 'agent_config.model_name', 'anthropic_api_key': 'anthropic_api_key'}), '(model_name=agent_config.model_name, anthropic_api_key=\n anthropic_api_key)\n', (2160, 2238), False, 'from langchain_community.chat_models import ChatAnthropic\n'), ((2556, 2602), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'return_messages': '(True)'}), '(return_messages=True)\n', (2580, 2602), False, 'from langchain.memory import ConversationBufferMemory\n'), ((2936, 3007), 'langchain.ConversationChain', 'ConversationChain', ([], {'memory': 'self.memory', 'prompt': 'self.prompt', 'llm': 'self.llm'}), '(memory=self.memory, prompt=self.prompt, llm=self.llm)\n', (2953, 3007), False, 'from langchain import ConversationChain\n'), ((3624, 3645), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': '""""""'}), "(content='')\n", (3633, 3645), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((1468, 1496), 'pydantic.v1.SecretStr', 'SecretStr', (['anthropic_api_key'], {}), '(anthropic_api_key)\n', (1477, 1496), False, 'from pydantic.v1 import SecretStr\n'), ((1625, 1652), 'vocode.getenv', 'getenv', (['"""ANTHROPIC_API_KEY"""'], {}), "('ANTHROPIC_API_KEY')\n", (1631, 1652), False, 'from vocode import getenv\n'), ((2664, 2719), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'self.agent_config.prompt_preamble'}), '(content=self.agent_config.prompt_preamble)\n', (2676, 2719), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((3559, 3592), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'human_input'}), '(content=human_input)\n', (3571, 3592), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((1713, 1731), 'pydantic.v1.SecretStr', 'SecretStr', (['env_key'], {}), '(env_key)\n', (1722, 1731), False, 'from pydantic.v1 import SecretStr\n'), ((1988, 2032), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""history"""'}), "(variable_name='history')\n", (2007, 2032), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate\n'), ((2050, 2101), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{input}"""'], {}), "('{input}')\n", (2090, 2101), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate\n'), ((2840, 2892), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'agent_config.initial_message.text'}), '(content=agent_config.initial_message.text)\n', (2849, 2892), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((4286, 4318), 'vocode.streaming.agent.utils.get_sentence_from_buffer', 'get_sentence_from_buffer', (['buffer'], {}), '(buffer)\n', (4310, 4318), False, 'from vocode.streaming.agent.utils import get_sentence_from_buffer\n')] |
import os
from dotenv import load_dotenv, find_dotenv
from langchain import HuggingFaceHub
from langchain import PromptTemplate, LLMChain, OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import YoutubeLoader
import textwrap
# --------------------------------------------------------------
# Load the HuggingFaceHub API token from the .env file
# --------------------------------------------------------------
load_dotenv(find_dotenv())
HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"]
# --------------------------------------------------------------
# Load the LLM model from the HuggingFaceHub
# --------------------------------------------------------------
repo_id = "tiiuae/falcon-7b-instruct" # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options
falcon_llm = HuggingFaceHub(
repo_id=repo_id, model_kwargs={"temperature": 0.1, "max_new_tokens": 500}
)
# --------------------------------------------------------------
# Create a PromptTemplate and LLMChain
# --------------------------------------------------------------
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=falcon_llm)
# --------------------------------------------------------------
# Run the LLMChain
# --------------------------------------------------------------
question = "How do I make a sandwich?"
response = llm_chain.run(question)
wrapped_text = textwrap.fill(
response, width=100, break_long_words=False, replace_whitespace=False
)
print(wrapped_text)
# --------------------------------------------------------------
# Load a video transcript from YouTube
# --------------------------------------------------------------
video_url = "https://www.youtube.com/watch?v=riXpu1tHzl0"
loader = YoutubeLoader.from_youtube_url(video_url)
transcript = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=3000)
docs = text_splitter.split_documents(transcript)
# --------------------------------------------------------------
# Summarization with LangChain
# --------------------------------------------------------------
# Add map_prompt and combine_prompt to the chain for custom summarization
chain = load_summarize_chain(falcon_llm, chain_type="map_reduce", verbose=True)
print(chain.llm_chain.prompt.template)
print(chain.combine_document_chain.llm_chain.prompt.template)
# --------------------------------------------------------------
# Test the Falcon model with text summarization
# --------------------------------------------------------------
output_summary = chain.run(docs)
wrapped_text = textwrap.fill(
output_summary, width=100, break_long_words=False, replace_whitespace=False
)
print(wrapped_text)
# --------------------------------------------------------------
# Load an OpenAI model for comparison
# --------------------------------------------------------------
openai_llm = OpenAI(
model_name="text-davinci-003", temperature=0.1, max_tokens=500
) # max token length is 4097
chain = load_summarize_chain(openai_llm, chain_type="map_reduce", verbose=True)
output_summary = chain.run(docs)
wrapped_text = textwrap.fill(
output_summary, width=100, break_long_words=False, replace_whitespace=False
)
print(wrapped_text)
| [
"langchain.PromptTemplate",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.document_loaders.YoutubeLoader.from_youtube_url",
"langchain.chains.summarize.load_summarize_chain",
"langchain.LLMChain",
"langchain.OpenAI",
"langchain.HuggingFaceHub"
] | [((955, 1048), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 500}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'max_new_tokens': 500})\n", (969, 1048), False, 'from langchain import HuggingFaceHub\n'), ((1305, 1368), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['question']"}), "(template=template, input_variables=['question'])\n", (1319, 1368), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1381, 1420), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'falcon_llm'}), '(prompt=prompt, llm=falcon_llm)\n', (1389, 1420), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1662, 1750), 'textwrap.fill', 'textwrap.fill', (['response'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(response, width=100, break_long_words=False,\n replace_whitespace=False)\n', (1675, 1750), False, 'import textwrap\n'), ((2012, 2053), 'langchain.document_loaders.YoutubeLoader.from_youtube_url', 'YoutubeLoader.from_youtube_url', (['video_url'], {}), '(video_url)\n', (2042, 2053), False, 'from langchain.document_loaders import YoutubeLoader\n'), ((2098, 2145), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(3000)'}), '(chunk_size=3000)\n', (2128, 2145), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2440, 2511), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['falcon_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(falcon_llm, chain_type='map_reduce', verbose=True)\n", (2460, 2511), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((2841, 2935), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (2854, 2935), False, 'import textwrap\n'), ((3142, 3212), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""text-davinci-003"""', 'temperature': '(0.1)', 'max_tokens': '(500)'}), "(model_name='text-davinci-003', temperature=0.1, max_tokens=500)\n", (3148, 3212), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((3255, 3326), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['openai_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(openai_llm, chain_type='map_reduce', verbose=True)\n", (3275, 3326), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((3375, 3469), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (3388, 3469), False, 'import textwrap\n'), ((541, 554), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (552, 554), False, 'from dotenv import load_dotenv, find_dotenv\n')] |
import os
from dotenv import load_dotenv, find_dotenv
from langchain import HuggingFaceHub
from langchain import PromptTemplate, LLMChain, OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import YoutubeLoader
import textwrap
# --------------------------------------------------------------
# Load the HuggingFaceHub API token from the .env file
# --------------------------------------------------------------
load_dotenv(find_dotenv())
HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"]
# --------------------------------------------------------------
# Load the LLM model from the HuggingFaceHub
# --------------------------------------------------------------
repo_id = "tiiuae/falcon-7b-instruct" # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options
falcon_llm = HuggingFaceHub(
repo_id=repo_id, model_kwargs={"temperature": 0.1, "max_new_tokens": 500}
)
# --------------------------------------------------------------
# Create a PromptTemplate and LLMChain
# --------------------------------------------------------------
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=falcon_llm)
# --------------------------------------------------------------
# Run the LLMChain
# --------------------------------------------------------------
question = "How do I make a sandwich?"
response = llm_chain.run(question)
wrapped_text = textwrap.fill(
response, width=100, break_long_words=False, replace_whitespace=False
)
print(wrapped_text)
# --------------------------------------------------------------
# Load a video transcript from YouTube
# --------------------------------------------------------------
video_url = "https://www.youtube.com/watch?v=riXpu1tHzl0"
loader = YoutubeLoader.from_youtube_url(video_url)
transcript = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=3000)
docs = text_splitter.split_documents(transcript)
# --------------------------------------------------------------
# Summarization with LangChain
# --------------------------------------------------------------
# Add map_prompt and combine_prompt to the chain for custom summarization
chain = load_summarize_chain(falcon_llm, chain_type="map_reduce", verbose=True)
print(chain.llm_chain.prompt.template)
print(chain.combine_document_chain.llm_chain.prompt.template)
# --------------------------------------------------------------
# Test the Falcon model with text summarization
# --------------------------------------------------------------
output_summary = chain.run(docs)
wrapped_text = textwrap.fill(
output_summary, width=100, break_long_words=False, replace_whitespace=False
)
print(wrapped_text)
# --------------------------------------------------------------
# Load an OpenAI model for comparison
# --------------------------------------------------------------
openai_llm = OpenAI(
model_name="text-davinci-003", temperature=0.1, max_tokens=500
) # max token length is 4097
chain = load_summarize_chain(openai_llm, chain_type="map_reduce", verbose=True)
output_summary = chain.run(docs)
wrapped_text = textwrap.fill(
output_summary, width=100, break_long_words=False, replace_whitespace=False
)
print(wrapped_text)
| [
"langchain.PromptTemplate",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.document_loaders.YoutubeLoader.from_youtube_url",
"langchain.chains.summarize.load_summarize_chain",
"langchain.LLMChain",
"langchain.OpenAI",
"langchain.HuggingFaceHub"
] | [((955, 1048), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 500}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'max_new_tokens': 500})\n", (969, 1048), False, 'from langchain import HuggingFaceHub\n'), ((1305, 1368), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['question']"}), "(template=template, input_variables=['question'])\n", (1319, 1368), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1381, 1420), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'falcon_llm'}), '(prompt=prompt, llm=falcon_llm)\n', (1389, 1420), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1662, 1750), 'textwrap.fill', 'textwrap.fill', (['response'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(response, width=100, break_long_words=False,\n replace_whitespace=False)\n', (1675, 1750), False, 'import textwrap\n'), ((2012, 2053), 'langchain.document_loaders.YoutubeLoader.from_youtube_url', 'YoutubeLoader.from_youtube_url', (['video_url'], {}), '(video_url)\n', (2042, 2053), False, 'from langchain.document_loaders import YoutubeLoader\n'), ((2098, 2145), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(3000)'}), '(chunk_size=3000)\n', (2128, 2145), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2440, 2511), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['falcon_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(falcon_llm, chain_type='map_reduce', verbose=True)\n", (2460, 2511), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((2841, 2935), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (2854, 2935), False, 'import textwrap\n'), ((3142, 3212), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""text-davinci-003"""', 'temperature': '(0.1)', 'max_tokens': '(500)'}), "(model_name='text-davinci-003', temperature=0.1, max_tokens=500)\n", (3148, 3212), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((3255, 3326), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['openai_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(openai_llm, chain_type='map_reduce', verbose=True)\n", (3275, 3326), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((3375, 3469), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (3388, 3469), False, 'import textwrap\n'), ((541, 554), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (552, 554), False, 'from dotenv import load_dotenv, find_dotenv\n')] |
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional, Tuple
import dpath.util
from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel
from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier
from airbyte_cdk.models import AirbyteRecordMessage, ConfiguredAirbyteCatalog, ConfiguredAirbyteStream, DestinationSyncMode
from airbyte_cdk.utils.traced_exception import AirbyteTracedException, FailureType
from langchain.document_loaders.base import Document
from langchain.text_splitter import Language, RecursiveCharacterTextSplitter
from langchain.utils import stringify_dict
METADATA_STREAM_FIELD = "_ab_stream"
METADATA_RECORD_ID_FIELD = "_ab_record_id"
CDC_DELETED_FIELD = "_ab_cdc_deleted_at"
@dataclass
class Chunk:
page_content: Optional[str]
metadata: Dict[str, Any]
record: AirbyteRecordMessage
embedding: Optional[List[float]] = None
headers_to_split_on = ["(?:^|\n)# ", "(?:^|\n)## ", "(?:^|\n)### ", "(?:^|\n)#### ", "(?:^|\n)##### ", "(?:^|\n)###### "]
class DocumentProcessor:
"""
DocumentProcessor is a helper class that generates documents from Airbyte records.
It is used to generate documents from records before writing them to the destination:
* The text fields are extracted from the record and concatenated to a single string.
* The metadata fields are extracted from the record and added to the document metadata.
* The document is split into chunks of a given size using a langchain text splitter.
The Writer class uses the DocumentProcessor class to internally generate documents from records - in most cases you don't need to use it directly,
except if you want to implement a custom writer.
The config parameters specified by the ProcessingConfigModel has to be made part of the connector spec to allow the user to configure the document processor.
Calling DocumentProcessor.check_config(config) will validate the config and return an error message if the config is invalid.
"""
streams: Mapping[str, ConfiguredAirbyteStream]
@staticmethod
def check_config(config: ProcessingConfigModel) -> Optional[str]:
if config.text_splitter is not None and config.text_splitter.mode == "separator":
for s in config.text_splitter.separators:
try:
separator = json.loads(s)
if not isinstance(separator, str):
return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes."
except json.decoder.JSONDecodeError:
return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes."
return None
def _get_text_splitter(
self, chunk_size: int, chunk_overlap: int, splitter_config: Optional[TextSplitterConfigModel]
) -> RecursiveCharacterTextSplitter:
if splitter_config is None:
splitter_config = SeparatorSplitterConfigModel(mode="separator")
if splitter_config.mode == "separator":
return RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=[json.loads(s) for s in splitter_config.separators],
keep_separator=splitter_config.keep_separator,
disallowed_special=(),
)
if splitter_config.mode == "markdown":
return RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=headers_to_split_on[: splitter_config.split_level],
is_separator_regex=True,
keep_separator=True,
disallowed_special=(),
)
if splitter_config.mode == "code":
return RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=RecursiveCharacterTextSplitter.get_separators_for_language(Language(splitter_config.language)),
disallowed_special=(),
)
def __init__(self, config: ProcessingConfigModel, catalog: ConfiguredAirbyteCatalog):
self.streams = {create_stream_identifier(stream.stream): stream for stream in catalog.streams}
self.splitter = self._get_text_splitter(config.chunk_size, config.chunk_overlap, config.text_splitter)
self.text_fields = config.text_fields
self.metadata_fields = config.metadata_fields
self.field_name_mappings = config.field_name_mappings
self.logger = logging.getLogger("airbyte.document_processor")
def process(self, record: AirbyteRecordMessage) -> Tuple[List[Chunk], Optional[str]]:
"""
Generate documents from records.
:param records: List of AirbyteRecordMessages
:return: Tuple of (List of document chunks, record id to delete if a stream is in dedup mode to avoid stale documents in the vector store)
"""
if CDC_DELETED_FIELD in record.data and record.data[CDC_DELETED_FIELD]:
return [], self._extract_primary_key(record)
doc = self._generate_document(record)
if doc is None:
text_fields = ", ".join(self.text_fields) if self.text_fields else "all fields"
raise AirbyteTracedException(
internal_message="No text fields found in record",
message=f"Record {str(record.data)[:250]}... does not contain any of the configured text fields: {text_fields}. Please check your processing configuration, there has to be at least one text field set in each record.",
failure_type=FailureType.config_error,
)
chunks = [
Chunk(page_content=chunk_document.page_content, metadata=chunk_document.metadata, record=record)
for chunk_document in self._split_document(doc)
]
id_to_delete = doc.metadata[METADATA_RECORD_ID_FIELD] if METADATA_RECORD_ID_FIELD in doc.metadata else None
return chunks, id_to_delete
def _generate_document(self, record: AirbyteRecordMessage) -> Optional[Document]:
relevant_fields = self._extract_relevant_fields(record, self.text_fields)
if len(relevant_fields) == 0:
return None
text = stringify_dict(relevant_fields)
metadata = self._extract_metadata(record)
return Document(page_content=text, metadata=metadata)
def _extract_relevant_fields(self, record: AirbyteRecordMessage, fields: Optional[List[str]]) -> Dict[str, Any]:
relevant_fields = {}
if fields and len(fields) > 0:
for field in fields:
values = dpath.util.values(record.data, field, separator=".")
if values and len(values) > 0:
relevant_fields[field] = values if len(values) > 1 else values[0]
else:
relevant_fields = record.data
return self._remap_field_names(relevant_fields)
def _extract_metadata(self, record: AirbyteRecordMessage) -> Dict[str, Any]:
metadata = self._extract_relevant_fields(record, self.metadata_fields)
metadata[METADATA_STREAM_FIELD] = create_stream_identifier(record)
primary_key = self._extract_primary_key(record)
if primary_key:
metadata[METADATA_RECORD_ID_FIELD] = primary_key
return metadata
def _extract_primary_key(self, record: AirbyteRecordMessage) -> Optional[str]:
stream_identifier = create_stream_identifier(record)
current_stream: ConfiguredAirbyteStream = self.streams[stream_identifier]
# if the sync mode is deduping, use the primary key to upsert existing records instead of appending new ones
if not current_stream.primary_key or current_stream.destination_sync_mode != DestinationSyncMode.append_dedup:
return None
primary_key = []
for key in current_stream.primary_key:
try:
primary_key.append(str(dpath.util.get(record.data, key)))
except KeyError:
primary_key.append("__not_found__")
stringified_primary_key = "_".join(primary_key)
return f"{stream_identifier}_{stringified_primary_key}"
def _split_document(self, doc: Document) -> List[Document]:
chunks: List[Document] = self.splitter.split_documents([doc])
return chunks
def _remap_field_names(self, fields: Dict[str, Any]) -> Dict[str, Any]:
if not self.field_name_mappings:
return fields
new_fields = fields.copy()
for mapping in self.field_name_mappings:
if mapping.from_field in new_fields:
new_fields[mapping.to_field] = new_fields.pop(mapping.from_field)
return new_fields
| [
"langchain.utils.stringify_dict",
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
"langchain.document_loaders.base.Document",
"langchain.text_splitter.Language"
] | [((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'from langchain.utils import stringify_dict\n'), ((6697, 6743), 'langchain.document_loaders.base.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (6705, 6743), False, 'from langchain.document_loaders.base import Document\n'), ((7489, 7521), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7513, 7521), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((7799, 7831), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7823, 7831), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((3160, 3206), 'airbyte_cdk.destinations.vector_db_based.config.SeparatorSplitterConfigModel', 'SeparatorSplitterConfigModel', ([], {'mode': '"""separator"""'}), "(mode='separator')\n", (3188, 3206), False, 'from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel\n'), ((3674, 3923), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'separators': 'headers_to_split_on[:splitter_config.split_level]', 'is_separator_regex': '(True)', 'keep_separator': '(True)', 'disallowed_special': '()'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, separators=headers_to_split_on[:\n splitter_config.split_level], is_separator_regex=True, keep_separator=\n True, disallowed_special=())\n', (3726, 3923), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n'), ((4513, 4552), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['stream.stream'], {}), '(stream.stream)\n', (4537, 4552), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((2542, 2555), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2552, 2555), False, 'import json\n'), ((3440, 3453), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (3450, 3453), False, 'import json\n'), ((4308, 4342), 'langchain.text_splitter.Language', 'Language', (['splitter_config.language'], {}), '(splitter_config.language)\n', (4316, 4342), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n')] |
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional, Tuple
import dpath.util
from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel
from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier
from airbyte_cdk.models import AirbyteRecordMessage, ConfiguredAirbyteCatalog, ConfiguredAirbyteStream, DestinationSyncMode
from airbyte_cdk.utils.traced_exception import AirbyteTracedException, FailureType
from langchain.document_loaders.base import Document
from langchain.text_splitter import Language, RecursiveCharacterTextSplitter
from langchain.utils import stringify_dict
METADATA_STREAM_FIELD = "_ab_stream"
METADATA_RECORD_ID_FIELD = "_ab_record_id"
CDC_DELETED_FIELD = "_ab_cdc_deleted_at"
@dataclass
class Chunk:
page_content: Optional[str]
metadata: Dict[str, Any]
record: AirbyteRecordMessage
embedding: Optional[List[float]] = None
headers_to_split_on = ["(?:^|\n)# ", "(?:^|\n)## ", "(?:^|\n)### ", "(?:^|\n)#### ", "(?:^|\n)##### ", "(?:^|\n)###### "]
class DocumentProcessor:
"""
DocumentProcessor is a helper class that generates documents from Airbyte records.
It is used to generate documents from records before writing them to the destination:
* The text fields are extracted from the record and concatenated to a single string.
* The metadata fields are extracted from the record and added to the document metadata.
* The document is split into chunks of a given size using a langchain text splitter.
The Writer class uses the DocumentProcessor class to internally generate documents from records - in most cases you don't need to use it directly,
except if you want to implement a custom writer.
The config parameters specified by the ProcessingConfigModel has to be made part of the connector spec to allow the user to configure the document processor.
Calling DocumentProcessor.check_config(config) will validate the config and return an error message if the config is invalid.
"""
streams: Mapping[str, ConfiguredAirbyteStream]
@staticmethod
def check_config(config: ProcessingConfigModel) -> Optional[str]:
if config.text_splitter is not None and config.text_splitter.mode == "separator":
for s in config.text_splitter.separators:
try:
separator = json.loads(s)
if not isinstance(separator, str):
return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes."
except json.decoder.JSONDecodeError:
return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes."
return None
def _get_text_splitter(
self, chunk_size: int, chunk_overlap: int, splitter_config: Optional[TextSplitterConfigModel]
) -> RecursiveCharacterTextSplitter:
if splitter_config is None:
splitter_config = SeparatorSplitterConfigModel(mode="separator")
if splitter_config.mode == "separator":
return RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=[json.loads(s) for s in splitter_config.separators],
keep_separator=splitter_config.keep_separator,
disallowed_special=(),
)
if splitter_config.mode == "markdown":
return RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=headers_to_split_on[: splitter_config.split_level],
is_separator_regex=True,
keep_separator=True,
disallowed_special=(),
)
if splitter_config.mode == "code":
return RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=RecursiveCharacterTextSplitter.get_separators_for_language(Language(splitter_config.language)),
disallowed_special=(),
)
def __init__(self, config: ProcessingConfigModel, catalog: ConfiguredAirbyteCatalog):
self.streams = {create_stream_identifier(stream.stream): stream for stream in catalog.streams}
self.splitter = self._get_text_splitter(config.chunk_size, config.chunk_overlap, config.text_splitter)
self.text_fields = config.text_fields
self.metadata_fields = config.metadata_fields
self.field_name_mappings = config.field_name_mappings
self.logger = logging.getLogger("airbyte.document_processor")
def process(self, record: AirbyteRecordMessage) -> Tuple[List[Chunk], Optional[str]]:
"""
Generate documents from records.
:param records: List of AirbyteRecordMessages
:return: Tuple of (List of document chunks, record id to delete if a stream is in dedup mode to avoid stale documents in the vector store)
"""
if CDC_DELETED_FIELD in record.data and record.data[CDC_DELETED_FIELD]:
return [], self._extract_primary_key(record)
doc = self._generate_document(record)
if doc is None:
text_fields = ", ".join(self.text_fields) if self.text_fields else "all fields"
raise AirbyteTracedException(
internal_message="No text fields found in record",
message=f"Record {str(record.data)[:250]}... does not contain any of the configured text fields: {text_fields}. Please check your processing configuration, there has to be at least one text field set in each record.",
failure_type=FailureType.config_error,
)
chunks = [
Chunk(page_content=chunk_document.page_content, metadata=chunk_document.metadata, record=record)
for chunk_document in self._split_document(doc)
]
id_to_delete = doc.metadata[METADATA_RECORD_ID_FIELD] if METADATA_RECORD_ID_FIELD in doc.metadata else None
return chunks, id_to_delete
def _generate_document(self, record: AirbyteRecordMessage) -> Optional[Document]:
relevant_fields = self._extract_relevant_fields(record, self.text_fields)
if len(relevant_fields) == 0:
return None
text = stringify_dict(relevant_fields)
metadata = self._extract_metadata(record)
return Document(page_content=text, metadata=metadata)
def _extract_relevant_fields(self, record: AirbyteRecordMessage, fields: Optional[List[str]]) -> Dict[str, Any]:
relevant_fields = {}
if fields and len(fields) > 0:
for field in fields:
values = dpath.util.values(record.data, field, separator=".")
if values and len(values) > 0:
relevant_fields[field] = values if len(values) > 1 else values[0]
else:
relevant_fields = record.data
return self._remap_field_names(relevant_fields)
def _extract_metadata(self, record: AirbyteRecordMessage) -> Dict[str, Any]:
metadata = self._extract_relevant_fields(record, self.metadata_fields)
metadata[METADATA_STREAM_FIELD] = create_stream_identifier(record)
primary_key = self._extract_primary_key(record)
if primary_key:
metadata[METADATA_RECORD_ID_FIELD] = primary_key
return metadata
def _extract_primary_key(self, record: AirbyteRecordMessage) -> Optional[str]:
stream_identifier = create_stream_identifier(record)
current_stream: ConfiguredAirbyteStream = self.streams[stream_identifier]
# if the sync mode is deduping, use the primary key to upsert existing records instead of appending new ones
if not current_stream.primary_key or current_stream.destination_sync_mode != DestinationSyncMode.append_dedup:
return None
primary_key = []
for key in current_stream.primary_key:
try:
primary_key.append(str(dpath.util.get(record.data, key)))
except KeyError:
primary_key.append("__not_found__")
stringified_primary_key = "_".join(primary_key)
return f"{stream_identifier}_{stringified_primary_key}"
def _split_document(self, doc: Document) -> List[Document]:
chunks: List[Document] = self.splitter.split_documents([doc])
return chunks
def _remap_field_names(self, fields: Dict[str, Any]) -> Dict[str, Any]:
if not self.field_name_mappings:
return fields
new_fields = fields.copy()
for mapping in self.field_name_mappings:
if mapping.from_field in new_fields:
new_fields[mapping.to_field] = new_fields.pop(mapping.from_field)
return new_fields
| [
"langchain.utils.stringify_dict",
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
"langchain.document_loaders.base.Document",
"langchain.text_splitter.Language"
] | [((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'from langchain.utils import stringify_dict\n'), ((6697, 6743), 'langchain.document_loaders.base.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (6705, 6743), False, 'from langchain.document_loaders.base import Document\n'), ((7489, 7521), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7513, 7521), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((7799, 7831), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7823, 7831), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((3160, 3206), 'airbyte_cdk.destinations.vector_db_based.config.SeparatorSplitterConfigModel', 'SeparatorSplitterConfigModel', ([], {'mode': '"""separator"""'}), "(mode='separator')\n", (3188, 3206), False, 'from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel\n'), ((3674, 3923), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'separators': 'headers_to_split_on[:splitter_config.split_level]', 'is_separator_regex': '(True)', 'keep_separator': '(True)', 'disallowed_special': '()'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, separators=headers_to_split_on[:\n splitter_config.split_level], is_separator_regex=True, keep_separator=\n True, disallowed_special=())\n', (3726, 3923), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n'), ((4513, 4552), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['stream.stream'], {}), '(stream.stream)\n', (4537, 4552), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((2542, 2555), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2552, 2555), False, 'import json\n'), ((3440, 3453), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (3450, 3453), False, 'import json\n'), ((4308, 4342), 'langchain.text_splitter.Language', 'Language', (['splitter_config.language'], {}), '(splitter_config.language)\n', (4316, 4342), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n')] |
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional, Tuple
import dpath.util
from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel
from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier
from airbyte_cdk.models import AirbyteRecordMessage, ConfiguredAirbyteCatalog, ConfiguredAirbyteStream, DestinationSyncMode
from airbyte_cdk.utils.traced_exception import AirbyteTracedException, FailureType
from langchain.document_loaders.base import Document
from langchain.text_splitter import Language, RecursiveCharacterTextSplitter
from langchain.utils import stringify_dict
METADATA_STREAM_FIELD = "_ab_stream"
METADATA_RECORD_ID_FIELD = "_ab_record_id"
CDC_DELETED_FIELD = "_ab_cdc_deleted_at"
@dataclass
class Chunk:
page_content: Optional[str]
metadata: Dict[str, Any]
record: AirbyteRecordMessage
embedding: Optional[List[float]] = None
headers_to_split_on = ["(?:^|\n)# ", "(?:^|\n)## ", "(?:^|\n)### ", "(?:^|\n)#### ", "(?:^|\n)##### ", "(?:^|\n)###### "]
class DocumentProcessor:
"""
DocumentProcessor is a helper class that generates documents from Airbyte records.
It is used to generate documents from records before writing them to the destination:
* The text fields are extracted from the record and concatenated to a single string.
* The metadata fields are extracted from the record and added to the document metadata.
* The document is split into chunks of a given size using a langchain text splitter.
The Writer class uses the DocumentProcessor class to internally generate documents from records - in most cases you don't need to use it directly,
except if you want to implement a custom writer.
The config parameters specified by the ProcessingConfigModel has to be made part of the connector spec to allow the user to configure the document processor.
Calling DocumentProcessor.check_config(config) will validate the config and return an error message if the config is invalid.
"""
streams: Mapping[str, ConfiguredAirbyteStream]
@staticmethod
def check_config(config: ProcessingConfigModel) -> Optional[str]:
if config.text_splitter is not None and config.text_splitter.mode == "separator":
for s in config.text_splitter.separators:
try:
separator = json.loads(s)
if not isinstance(separator, str):
return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes."
except json.decoder.JSONDecodeError:
return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes."
return None
def _get_text_splitter(
self, chunk_size: int, chunk_overlap: int, splitter_config: Optional[TextSplitterConfigModel]
) -> RecursiveCharacterTextSplitter:
if splitter_config is None:
splitter_config = SeparatorSplitterConfigModel(mode="separator")
if splitter_config.mode == "separator":
return RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=[json.loads(s) for s in splitter_config.separators],
keep_separator=splitter_config.keep_separator,
disallowed_special=(),
)
if splitter_config.mode == "markdown":
return RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=headers_to_split_on[: splitter_config.split_level],
is_separator_regex=True,
keep_separator=True,
disallowed_special=(),
)
if splitter_config.mode == "code":
return RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=RecursiveCharacterTextSplitter.get_separators_for_language(Language(splitter_config.language)),
disallowed_special=(),
)
def __init__(self, config: ProcessingConfigModel, catalog: ConfiguredAirbyteCatalog):
self.streams = {create_stream_identifier(stream.stream): stream for stream in catalog.streams}
self.splitter = self._get_text_splitter(config.chunk_size, config.chunk_overlap, config.text_splitter)
self.text_fields = config.text_fields
self.metadata_fields = config.metadata_fields
self.field_name_mappings = config.field_name_mappings
self.logger = logging.getLogger("airbyte.document_processor")
def process(self, record: AirbyteRecordMessage) -> Tuple[List[Chunk], Optional[str]]:
"""
Generate documents from records.
:param records: List of AirbyteRecordMessages
:return: Tuple of (List of document chunks, record id to delete if a stream is in dedup mode to avoid stale documents in the vector store)
"""
if CDC_DELETED_FIELD in record.data and record.data[CDC_DELETED_FIELD]:
return [], self._extract_primary_key(record)
doc = self._generate_document(record)
if doc is None:
text_fields = ", ".join(self.text_fields) if self.text_fields else "all fields"
raise AirbyteTracedException(
internal_message="No text fields found in record",
message=f"Record {str(record.data)[:250]}... does not contain any of the configured text fields: {text_fields}. Please check your processing configuration, there has to be at least one text field set in each record.",
failure_type=FailureType.config_error,
)
chunks = [
Chunk(page_content=chunk_document.page_content, metadata=chunk_document.metadata, record=record)
for chunk_document in self._split_document(doc)
]
id_to_delete = doc.metadata[METADATA_RECORD_ID_FIELD] if METADATA_RECORD_ID_FIELD in doc.metadata else None
return chunks, id_to_delete
def _generate_document(self, record: AirbyteRecordMessage) -> Optional[Document]:
relevant_fields = self._extract_relevant_fields(record, self.text_fields)
if len(relevant_fields) == 0:
return None
text = stringify_dict(relevant_fields)
metadata = self._extract_metadata(record)
return Document(page_content=text, metadata=metadata)
def _extract_relevant_fields(self, record: AirbyteRecordMessage, fields: Optional[List[str]]) -> Dict[str, Any]:
relevant_fields = {}
if fields and len(fields) > 0:
for field in fields:
values = dpath.util.values(record.data, field, separator=".")
if values and len(values) > 0:
relevant_fields[field] = values if len(values) > 1 else values[0]
else:
relevant_fields = record.data
return self._remap_field_names(relevant_fields)
def _extract_metadata(self, record: AirbyteRecordMessage) -> Dict[str, Any]:
metadata = self._extract_relevant_fields(record, self.metadata_fields)
metadata[METADATA_STREAM_FIELD] = create_stream_identifier(record)
primary_key = self._extract_primary_key(record)
if primary_key:
metadata[METADATA_RECORD_ID_FIELD] = primary_key
return metadata
def _extract_primary_key(self, record: AirbyteRecordMessage) -> Optional[str]:
stream_identifier = create_stream_identifier(record)
current_stream: ConfiguredAirbyteStream = self.streams[stream_identifier]
# if the sync mode is deduping, use the primary key to upsert existing records instead of appending new ones
if not current_stream.primary_key or current_stream.destination_sync_mode != DestinationSyncMode.append_dedup:
return None
primary_key = []
for key in current_stream.primary_key:
try:
primary_key.append(str(dpath.util.get(record.data, key)))
except KeyError:
primary_key.append("__not_found__")
stringified_primary_key = "_".join(primary_key)
return f"{stream_identifier}_{stringified_primary_key}"
def _split_document(self, doc: Document) -> List[Document]:
chunks: List[Document] = self.splitter.split_documents([doc])
return chunks
def _remap_field_names(self, fields: Dict[str, Any]) -> Dict[str, Any]:
if not self.field_name_mappings:
return fields
new_fields = fields.copy()
for mapping in self.field_name_mappings:
if mapping.from_field in new_fields:
new_fields[mapping.to_field] = new_fields.pop(mapping.from_field)
return new_fields
| [
"langchain.utils.stringify_dict",
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
"langchain.document_loaders.base.Document",
"langchain.text_splitter.Language"
] | [((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'from langchain.utils import stringify_dict\n'), ((6697, 6743), 'langchain.document_loaders.base.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (6705, 6743), False, 'from langchain.document_loaders.base import Document\n'), ((7489, 7521), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7513, 7521), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((7799, 7831), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7823, 7831), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((3160, 3206), 'airbyte_cdk.destinations.vector_db_based.config.SeparatorSplitterConfigModel', 'SeparatorSplitterConfigModel', ([], {'mode': '"""separator"""'}), "(mode='separator')\n", (3188, 3206), False, 'from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel\n'), ((3674, 3923), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'separators': 'headers_to_split_on[:splitter_config.split_level]', 'is_separator_regex': '(True)', 'keep_separator': '(True)', 'disallowed_special': '()'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, separators=headers_to_split_on[:\n splitter_config.split_level], is_separator_regex=True, keep_separator=\n True, disallowed_special=())\n', (3726, 3923), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n'), ((4513, 4552), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['stream.stream'], {}), '(stream.stream)\n', (4537, 4552), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((2542, 2555), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2552, 2555), False, 'import json\n'), ((3440, 3453), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (3450, 3453), False, 'import json\n'), ((4308, 4342), 'langchain.text_splitter.Language', 'Language', (['splitter_config.language'], {}), '(splitter_config.language)\n', (4316, 4342), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n')] |
from waifu.llm.Brain import Brain
from waifu.llm.VectorDB import VectorDB
from waifu.llm.SentenceTransformer import STEmbedding
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from typing import Any, List, Mapping, Optional
from langchain.schema import BaseMessage
import openai
class GPT(Brain):
def __init__(self, api_key: str,
name: str,
stream: bool=False,
callback=None,
model: str='gpt-3.5-turbo',
proxy: str=''):
self.llm = ChatOpenAI(openai_api_key=api_key,
model_name=model,
streaming=stream,
callbacks=[callback],
temperature=0.85)
self.llm_nonstream = ChatOpenAI(openai_api_key=api_key, model_name=model)
self.embedding = OpenAIEmbeddings(openai_api_key=api_key)
# self.embedding = STEmbedding()
self.vectordb = VectorDB(self.embedding, f'./memory/{name}.csv')
if proxy != '':
openai.proxy = proxy
def think(self, messages: List[BaseMessage]):
return self.llm(messages).content
def think_nonstream(self, messages: List[BaseMessage]):
return self.llm_nonstream(messages).content
def store_memory(self, text: str | list):
'''保存记忆 embedding'''
self.vectordb.store(text)
def extract_memory(self, text: str, top_n: int = 10):
'''提取 top_n 条相关记忆'''
return self.vectordb.query(text, top_n) | [
"langchain.embeddings.OpenAIEmbeddings",
"langchain.chat_models.ChatOpenAI"
] | [((576, 690), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model', 'streaming': 'stream', 'callbacks': '[callback]', 'temperature': '(0.85)'}), '(openai_api_key=api_key, model_name=model, streaming=stream,\n callbacks=[callback], temperature=0.85)\n', (586, 690), False, 'from langchain.chat_models import ChatOpenAI\n'), ((812, 864), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model'}), '(openai_api_key=api_key, model_name=model)\n', (822, 864), False, 'from langchain.chat_models import ChatOpenAI\n'), ((890, 930), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'api_key'}), '(openai_api_key=api_key)\n', (906, 930), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((996, 1044), 'waifu.llm.VectorDB.VectorDB', 'VectorDB', (['self.embedding', 'f"""./memory/{name}.csv"""'], {}), "(self.embedding, f'./memory/{name}.csv')\n", (1004, 1044), False, 'from waifu.llm.VectorDB import VectorDB\n')] |
from waifu.llm.Brain import Brain
from waifu.llm.VectorDB import VectorDB
from waifu.llm.SentenceTransformer import STEmbedding
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from typing import Any, List, Mapping, Optional
from langchain.schema import BaseMessage
import openai
class GPT(Brain):
def __init__(self, api_key: str,
name: str,
stream: bool=False,
callback=None,
model: str='gpt-3.5-turbo',
proxy: str=''):
self.llm = ChatOpenAI(openai_api_key=api_key,
model_name=model,
streaming=stream,
callbacks=[callback],
temperature=0.85)
self.llm_nonstream = ChatOpenAI(openai_api_key=api_key, model_name=model)
self.embedding = OpenAIEmbeddings(openai_api_key=api_key)
# self.embedding = STEmbedding()
self.vectordb = VectorDB(self.embedding, f'./memory/{name}.csv')
if proxy != '':
openai.proxy = proxy
def think(self, messages: List[BaseMessage]):
return self.llm(messages).content
def think_nonstream(self, messages: List[BaseMessage]):
return self.llm_nonstream(messages).content
def store_memory(self, text: str | list):
'''保存记忆 embedding'''
self.vectordb.store(text)
def extract_memory(self, text: str, top_n: int = 10):
'''提取 top_n 条相关记忆'''
return self.vectordb.query(text, top_n) | [
"langchain.embeddings.OpenAIEmbeddings",
"langchain.chat_models.ChatOpenAI"
] | [((576, 690), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model', 'streaming': 'stream', 'callbacks': '[callback]', 'temperature': '(0.85)'}), '(openai_api_key=api_key, model_name=model, streaming=stream,\n callbacks=[callback], temperature=0.85)\n', (586, 690), False, 'from langchain.chat_models import ChatOpenAI\n'), ((812, 864), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model'}), '(openai_api_key=api_key, model_name=model)\n', (822, 864), False, 'from langchain.chat_models import ChatOpenAI\n'), ((890, 930), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'api_key'}), '(openai_api_key=api_key)\n', (906, 930), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((996, 1044), 'waifu.llm.VectorDB.VectorDB', 'VectorDB', (['self.embedding', 'f"""./memory/{name}.csv"""'], {}), "(self.embedding, f'./memory/{name}.csv')\n", (1004, 1044), False, 'from waifu.llm.VectorDB import VectorDB\n')] |
from time import sleep
import copy
import redis
import json
import pickle
import traceback
from flask import Response, request, stream_with_context
from typing import Dict, Union
import os
from langchain.schema import HumanMessage, SystemMessage
from backend.api.language_model import get_llm
from backend.main import app, message_id_register, message_pool, logger
from backend.utils.streaming import single_round_chat_with_agent_streaming
from backend.schemas import OVERLOAD, NEED_CONTINUE_MODEL
from backend.schemas import DEFAULT_USER_ID
from real_agents.adapters.llm import BaseLanguageModel
from real_agents.adapters.agent_helpers import AgentExecutor, Tool
from real_agents.adapters.callbacks.agent_streaming import \
AgentStreamingStdOutCallbackHandler
from real_agents.adapters.models import ChatOpenAI
from real_agents.adapters.memory import ConversationReActBufferMemory
from real_agents.adapters.data_model import DataModel, JsonDataModel
from real_agents.adapters.interactive_executor import initialize_webot_agent
from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor
r = redis.Redis(host=os.getenv("REDIS_SERVER"), port=6379, db=0) # adjust host/port/db as needed
# here webot and webot_status are stored in redis since the two global variable can not be modified and accessed normally in multiprocess
# fixme:now webot is stored without message_id or chat_id info, so it can only be used for one chat at a time
# fixme:now webot_status is stored with chat_id info, if the status is not reset after a message ended abnormally e.g. the message is interrupted, it will be reused wrongly for the next chat
def get_webot_from_redis(user_id: str, chat_id: str, ) -> WebBrowsingExecutor:
data = r.get(f'webot_{user_id}_{chat_id}')
if data is not None:
webot = pickle.loads(data)
else:
# initialize a webot with None instrucition if webot does not exist
webot = WebBrowsingExecutor(None)
save_webot_to_redis(user_id, chat_id, webot)
return webot
def save_webot_to_redis(user_id: str, chat_id: str, webot: WebBrowsingExecutor, ):
r.set(f'webot_{user_id}_{chat_id}', pickle.dumps(webot))
def get_webot_status_from_redis(user_id: str, chat_id: str):
webot_status_json = r.get(f'webot_status_{user_id}_{chat_id}')
if webot_status_json is not None:
webot_status = json.loads(webot_status_json)
return webot_status
else:
return {}
def save_webot_status_to_redis(user_id: str, chat_id: str, webot_status: Dict):
r.set(f'webot_status_{user_id}_{chat_id}', json.dumps(webot_status))
def reset_webot(user_id: str, chat_id: str):
webot = WebBrowsingExecutor(None)
save_webot_to_redis(user_id, chat_id, webot)
def reset_webot_status(user_id: str, chat_id: str):
webot_status = {"webot_status": "idle", "url": None}
save_webot_status_to_redis(user_id, chat_id, webot_status)
# this function has been deprecated
def get_plan(instruction: str, start_url: str, chat_llm: ChatOpenAI):
# fixme: Move this into a separate chain or executors to decompose the LLMs
system_message = f"""
You are a planner to assist another browser automation assistant.
Here is the instruction for the other assistant:
```
You MUST take one of the following actions. NEVER EVER EVER make up actions that do not exist:
1. click(element): Clicks on an element
2. setValue(element, value: string): Focuses on and sets the value of an input element
3. finish(): Indicates the task is finished
4. fail(): Indicates that you are unable to complete the task
You will be be given a task to perform and the current state of the DOM. You will also be given previous actions that you have taken. You may retry a failed action up to one time.
This is an example of an action:
<Thought>I should click the add to cart button</Thought>
<Action>click(223)</Action>
You MUST always include the <Thought> and <Action> open/close tags or else your response will be marked as invalid.
Rules you MUST follow:
1. You must only take one step at a time. You cannot take multiple actions in a single response.
2. You should not consider the action to present the result to the user. You only need to do available actions. If info in current page is enough for the user to solve the problem, you should finish.
```
Now your responsibility is to give a step-by-step plan according to user's instruction. This plan will be given to the assistant as a reference when it is performing tasks.
""".strip()
human_message = f"""
The user requests the following task:
{instruction}
Now you are at {start_url}
Provide a plan to do this (you can use pseudo description as below to describe the item).
Here is an example case:
request: Go to google calendar to schedule a meeting
current url: "https://google.com"
example plan:
1. setValue(searchBar, "google calendar")
2. click(search)
3. click(the item with title of google calendar)
4.1 if user has loginned
do nothing
4.2 if user hasn't loginned
do login
5. click(create event button)
6. setValue(event title input bar, "meeting")
7. click(save event button)
8. finish()
""".strip()
messages = [SystemMessage(content=system_message),
HumanMessage(content=human_message)]
response = chat_llm(messages).content
return response
def create_webot_interaction_executor(
llm: BaseLanguageModel,
llm_name: str,
user_id: str,
chat_id: str
) -> AgentExecutor:
"""Creates an agent executor for interaction.
Args:
llm: A llm model.
llm_name: A string llm name.
user_id: A string of user id.
chat_id: A string chat id.
Returns:
An agent executor.
"""
# Initialize memory
memory = ConversationReActBufferMemory(memory_key="chat_history",
return_messages=True, max_token_limit=10000)
class RunWebot:
def __init__(self, webot: WebotExecutor, llm: BaseLanguageModel, user_id: str,
chat_id: str):
self.llm = llm
self.webot = webot
self.user_id = user_id
self.chat_id = chat_id
def run(self, term: str) -> Union[str, Dict, DataModel]:
try:
user_id = self.user_id
chat_id = self.chat_id
reset_webot(user_id=user_id, chat_id=chat_id)
reset_webot_status(user_id=user_id, chat_id=chat_id)
raw_observation = self.webot.run(user_intent=term, llm=self.llm)
instruction, start_url = raw_observation["instruction"], \
raw_observation["start_url"]
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
webot.instruction = instruction
# webot.plan = get_plan(instruction, start_url)
webot.plan = ""
save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot)
webot_status = {
"webot_status": "running",
"url": start_url
}
save_webot_status_to_redis(user_id=user_id, chat_id=chat_id,
webot_status=webot_status)
while True:
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
if webot.finish or webot.interrupt or webot.error or webot.fail:
break
else:
sleep(0.5)
save_webot_status_to_redis(user_id=user_id, chat_id=chat_id,
webot_status={"webot_status": "idle",
"url": None})
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
webot.instruction = None
save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot)
if webot.finish:
webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id)
action_history = webot.action_history
last_page = webot.pages_viewed[-1]
observation = JsonDataModel.from_raw_data(
{
"success": True,
"result": json.dumps({"action_history": action_history,
"last_page": last_page}, indent=4),
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
if webot.fail:
observation = JsonDataModel.from_raw_data(
{
"success": True,
"result": "The webot failed to execute the instruction.",
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
if webot.interrupt:
observation = JsonDataModel.from_raw_data(
{
"success": False,
"result": "The web browsing is interrupted by user.",
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
if webot.error:
observation = JsonDataModel.from_raw_data(
{
"success": False,
"result": "Error occurs during web browsing.",
"intermediate_steps": json.dumps(
{"instruction": instruction, "start_url": start_url},
indent=4)
}
)
return observation
except Exception as e:
print(traceback.format_exc())
observation = JsonDataModel.from_raw_data(
{
"success": False,
"result": f"Failed in web browsing with the input: {term}, please try again later.",
"intermediate_steps": json.dumps({"error": str(e)})
}
)
return observation
webot = WebotExecutor.from_webot()
llm = copy.deepcopy(llm)
run_webot = RunWebot(webot, llm, chat_id=chat_id, user_id=user_id)
tools = [Tool(name=webot.name, func=run_webot.run, description=webot.description)]
continue_model = llm_name if llm_name in NEED_CONTINUE_MODEL else None
interaction_executor = initialize_webot_agent(
tools, llm, continue_model, memory=memory, verbose=True
)
return interaction_executor
@app.route("/api/chat_xlang_webot", methods=["POST"])
def chat_xlang_webot() -> Dict:
"""Returns the chat response of web agent."""
try:
# Get request parameters
request_json = request.get_json()
user_id = request_json.pop("user_id", DEFAULT_USER_ID)
chat_id = request_json["chat_id"]
user_intent = request_json["user_intent"]
parent_message_id = request_json["parent_message_id"]
llm_name = request_json["llm_name"]
temperature = request_json.get("temperature", 0.4)
stop_words = ["[RESPONSE_BEGIN]", "TOOL RESPONSE"]
kwargs = {
"temperature": temperature,
"stop": stop_words,
}
# Get language model
llm = get_llm(llm_name, **kwargs)
logger.bind(user_id=user_id, chat_id=chat_id, api="/chat",
msg_head="Request json").debug(request_json)
human_message_id = message_id_register.add_variable(user_intent)
ai_message_id = message_id_register.add_variable("")
stream_handler = AgentStreamingStdOutCallbackHandler()
# Build executor and run chat
# reset webot and status
reset_webot(user_id=user_id, chat_id=chat_id)
reset_webot_status(user_id=user_id, chat_id=chat_id)
interaction_executor = create_webot_interaction_executor(
llm=llm,
llm_name=llm_name,
chat_id=chat_id,
user_id=user_id
)
activated_message_list = message_pool.get_activated_message_list(user_id,
chat_id,
list(),
parent_message_id)
message_pool.load_agent_memory_from_list(interaction_executor.memory,
activated_message_list)
return stream_with_context(
Response(
single_round_chat_with_agent_streaming(
interaction_executor=interaction_executor,
user_intent=user_intent,
human_message_id=human_message_id,
ai_message_id=ai_message_id,
user_id=user_id,
chat_id=chat_id,
message_list=activated_message_list,
parent_message_id=parent_message_id,
stream_handler=stream_handler,
llm_name=llm_name,
app_type="webot",
),
content_type="application/json",
)
)
except Exception as e:
import traceback
traceback.print_exc()
return Response(response=None,
status=f"{OVERLOAD} backend is currently overloaded")
| [
"langchain.schema.HumanMessage",
"langchain.schema.SystemMessage"
] | [((11305, 11357), 'backend.main.app.route', 'app.route', (['"""/api/chat_xlang_webot"""'], {'methods': "['POST']"}), "('/api/chat_xlang_webot', methods=['POST'])\n", (11314, 11357), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((2664, 2689), 'real_agents.web_agent.WebBrowsingExecutor', 'WebBrowsingExecutor', (['None'], {}), '(None)\n', (2683, 2689), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((5769, 5875), 'real_agents.adapters.memory.ConversationReActBufferMemory', 'ConversationReActBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)', 'max_token_limit': '(10000)'}), "(memory_key='chat_history', return_messages=\n True, max_token_limit=10000)\n", (5798, 5875), False, 'from real_agents.adapters.memory import ConversationReActBufferMemory\n'), ((10859, 10885), 'real_agents.web_agent.WebotExecutor.from_webot', 'WebotExecutor.from_webot', ([], {}), '()\n', (10883, 10885), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((10896, 10914), 'copy.deepcopy', 'copy.deepcopy', (['llm'], {}), '(llm)\n', (10909, 10914), False, 'import copy\n'), ((11176, 11255), 'real_agents.adapters.interactive_executor.initialize_webot_agent', 'initialize_webot_agent', (['tools', 'llm', 'continue_model'], {'memory': 'memory', 'verbose': '(True)'}), '(tools, llm, continue_model, memory=memory, verbose=True)\n', (11198, 11255), False, 'from real_agents.adapters.interactive_executor import initialize_webot_agent\n'), ((1125, 1150), 'os.getenv', 'os.getenv', (['"""REDIS_SERVER"""'], {}), "('REDIS_SERVER')\n", (1134, 1150), False, 'import os\n'), ((1810, 1828), 'pickle.loads', 'pickle.loads', (['data'], {}), '(data)\n', (1822, 1828), False, 'import pickle\n'), ((1931, 1956), 'real_agents.web_agent.WebBrowsingExecutor', 'WebBrowsingExecutor', (['None'], {}), '(None)\n', (1950, 1956), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((2152, 2171), 'pickle.dumps', 'pickle.dumps', (['webot'], {}), '(webot)\n', (2164, 2171), False, 'import pickle\n'), ((2364, 2393), 'json.loads', 'json.loads', (['webot_status_json'], {}), '(webot_status_json)\n', (2374, 2393), False, 'import json\n'), ((2579, 2603), 'json.dumps', 'json.dumps', (['webot_status'], {}), '(webot_status)\n', (2589, 2603), False, 'import json\n'), ((5172, 5209), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_message'}), '(content=system_message)\n', (5185, 5209), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((5227, 5262), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'human_message'}), '(content=human_message)\n', (5239, 5262), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((10999, 11071), 'real_agents.adapters.agent_helpers.Tool', 'Tool', ([], {'name': 'webot.name', 'func': 'run_webot.run', 'description': 'webot.description'}), '(name=webot.name, func=run_webot.run, description=webot.description)\n', (11003, 11071), False, 'from real_agents.adapters.agent_helpers import AgentExecutor, Tool\n'), ((11505, 11523), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (11521, 11523), False, 'from flask import Response, request, stream_with_context\n'), ((12048, 12075), 'backend.api.language_model.get_llm', 'get_llm', (['llm_name'], {}), '(llm_name, **kwargs)\n', (12055, 12075), False, 'from backend.api.language_model import get_llm\n'), ((12237, 12282), 'backend.main.message_id_register.add_variable', 'message_id_register.add_variable', (['user_intent'], {}), '(user_intent)\n', (12269, 12282), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((12307, 12343), 'backend.main.message_id_register.add_variable', 'message_id_register.add_variable', (['""""""'], {}), "('')\n", (12339, 12343), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((12370, 12407), 'real_agents.adapters.callbacks.agent_streaming.AgentStreamingStdOutCallbackHandler', 'AgentStreamingStdOutCallbackHandler', ([], {}), '()\n', (12405, 12407), False, 'from real_agents.adapters.callbacks.agent_streaming import AgentStreamingStdOutCallbackHandler\n'), ((13127, 13224), 'backend.main.message_pool.load_agent_memory_from_list', 'message_pool.load_agent_memory_from_list', (['interaction_executor.memory', 'activated_message_list'], {}), '(interaction_executor.memory,\n activated_message_list)\n', (13167, 13224), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((14066, 14087), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (14085, 14087), False, 'import traceback\n'), ((14103, 14180), 'flask.Response', 'Response', ([], {'response': 'None', 'status': 'f"""{OVERLOAD} backend is currently overloaded"""'}), "(response=None, status=f'{OVERLOAD} backend is currently overloaded')\n", (14111, 14180), False, 'from flask import Response, request, stream_with_context\n'), ((12085, 12173), 'backend.main.logger.bind', 'logger.bind', ([], {'user_id': 'user_id', 'chat_id': 'chat_id', 'api': '"""/chat"""', 'msg_head': '"""Request json"""'}), "(user_id=user_id, chat_id=chat_id, api='/chat', msg_head=\n 'Request json')\n", (12096, 12173), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((13344, 13714), 'backend.utils.streaming.single_round_chat_with_agent_streaming', 'single_round_chat_with_agent_streaming', ([], {'interaction_executor': 'interaction_executor', 'user_intent': 'user_intent', 'human_message_id': 'human_message_id', 'ai_message_id': 'ai_message_id', 'user_id': 'user_id', 'chat_id': 'chat_id', 'message_list': 'activated_message_list', 'parent_message_id': 'parent_message_id', 'stream_handler': 'stream_handler', 'llm_name': 'llm_name', 'app_type': '"""webot"""'}), "(interaction_executor=\n interaction_executor, user_intent=user_intent, human_message_id=\n human_message_id, ai_message_id=ai_message_id, user_id=user_id, chat_id\n =chat_id, message_list=activated_message_list, parent_message_id=\n parent_message_id, stream_handler=stream_handler, llm_name=llm_name,\n app_type='webot')\n", (13382, 13714), False, 'from backend.utils.streaming import single_round_chat_with_agent_streaming\n'), ((7547, 7557), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (7552, 7557), False, 'from time import sleep\n'), ((10439, 10461), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10459, 10461), False, 'import traceback\n'), ((8392, 8477), 'json.dumps', 'json.dumps', (["{'action_history': action_history, 'last_page': last_page}"], {'indent': '(4)'}), "({'action_history': action_history, 'last_page': last_page}, indent=4\n )\n", (8402, 8477), False, 'import json\n'), ((8574, 8648), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (8584, 8648), False, 'import json\n'), ((9103, 9177), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (9113, 9177), False, 'import json\n'), ((9634, 9708), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (9644, 9708), False, 'import json\n'), ((10154, 10228), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (10164, 10228), False, 'import json\n')] |
from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
llm_creative = ChatOpenAI(temperature=1, model_name="gpt-3.5-turbo")
def get_summary_chain() -> LLMChain:
summary_template = """
given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create:
1. a short summary
2. two interesting facts about them
\n{format_instructions}
"""
summary_prompt_template = PromptTemplate(
input_variables=["information", "twitter_posts"],
template=summary_template,
partial_variables={
"format_instructions": summary_parser.get_format_instructions()
},
)
return LLMChain(llm=llm, prompt=summary_prompt_template)
def get_interests_chain() -> LLMChain:
interesting_facts_template = """
given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create:
3 topics that might interest them
\n{format_instructions}
"""
interesting_facts_prompt_template = PromptTemplate(
input_variables=["information", "twitter_posts"],
template=interesting_facts_template,
partial_variables={
"format_instructions": topics_of_interest_parser.get_format_instructions()
},
)
return LLMChain(llm=llm, prompt=interesting_facts_prompt_template)
def get_ice_breaker_chain() -> LLMChain:
ice_breaker_template = """
given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create:
2 creative Ice breakers with them that are derived from their activity on Linkedin and twitter, preferably on latest tweets
\n{format_instructions}
"""
ice_breaker_prompt_template = PromptTemplate(
input_variables=["information", "twitter_posts"],
template=ice_breaker_template,
partial_variables={
"format_instructions": ice_breaker_parser.get_format_instructions()
},
)
return LLMChain(llm=llm_creative, prompt=ice_breaker_prompt_template)
| [
"langchain.chains.LLMChain",
"langchain_openai.ChatOpenAI"
] | [((225, 278), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (235, 278), False, 'from langchain_openai import ChatOpenAI\n'), ((294, 347), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(1)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=1, model_name='gpt-3.5-turbo')\n", (304, 347), False, 'from langchain_openai import ChatOpenAI\n'), ((933, 982), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'summary_prompt_template'}), '(llm=llm, prompt=summary_prompt_template)\n', (941, 982), False, 'from langchain.chains import LLMChain\n'), ((1580, 1639), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'interesting_facts_prompt_template'}), '(llm=llm, prompt=interesting_facts_prompt_template)\n', (1588, 1639), False, 'from langchain.chains import LLMChain\n'), ((2304, 2366), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm_creative', 'prompt': 'ice_breaker_prompt_template'}), '(llm=llm_creative, prompt=ice_breaker_prompt_template)\n', (2312, 2366), False, 'from langchain.chains import LLMChain\n'), ((863, 903), 'output_parsers.summary_parser.get_format_instructions', 'summary_parser.get_format_instructions', ([], {}), '()\n', (901, 903), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((1499, 1550), 'output_parsers.topics_of_interest_parser.get_format_instructions', 'topics_of_interest_parser.get_format_instructions', ([], {}), '()\n', (1548, 1550), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((2230, 2274), 'output_parsers.ice_breaker_parser.get_format_instructions', 'ice_breaker_parser.get_format_instructions', ([], {}), '()\n', (2272, 2274), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n')] |
from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
llm_creative = ChatOpenAI(temperature=1, model_name="gpt-3.5-turbo")
def get_summary_chain() -> LLMChain:
summary_template = """
given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create:
1. a short summary
2. two interesting facts about them
\n{format_instructions}
"""
summary_prompt_template = PromptTemplate(
input_variables=["information", "twitter_posts"],
template=summary_template,
partial_variables={
"format_instructions": summary_parser.get_format_instructions()
},
)
return LLMChain(llm=llm, prompt=summary_prompt_template)
def get_interests_chain() -> LLMChain:
interesting_facts_template = """
given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create:
3 topics that might interest them
\n{format_instructions}
"""
interesting_facts_prompt_template = PromptTemplate(
input_variables=["information", "twitter_posts"],
template=interesting_facts_template,
partial_variables={
"format_instructions": topics_of_interest_parser.get_format_instructions()
},
)
return LLMChain(llm=llm, prompt=interesting_facts_prompt_template)
def get_ice_breaker_chain() -> LLMChain:
ice_breaker_template = """
given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create:
2 creative Ice breakers with them that are derived from their activity on Linkedin and twitter, preferably on latest tweets
\n{format_instructions}
"""
ice_breaker_prompt_template = PromptTemplate(
input_variables=["information", "twitter_posts"],
template=ice_breaker_template,
partial_variables={
"format_instructions": ice_breaker_parser.get_format_instructions()
},
)
return LLMChain(llm=llm_creative, prompt=ice_breaker_prompt_template)
| [
"langchain.chains.LLMChain",
"langchain_openai.ChatOpenAI"
] | [((225, 278), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (235, 278), False, 'from langchain_openai import ChatOpenAI\n'), ((294, 347), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(1)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=1, model_name='gpt-3.5-turbo')\n", (304, 347), False, 'from langchain_openai import ChatOpenAI\n'), ((933, 982), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'summary_prompt_template'}), '(llm=llm, prompt=summary_prompt_template)\n', (941, 982), False, 'from langchain.chains import LLMChain\n'), ((1580, 1639), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'interesting_facts_prompt_template'}), '(llm=llm, prompt=interesting_facts_prompt_template)\n', (1588, 1639), False, 'from langchain.chains import LLMChain\n'), ((2304, 2366), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm_creative', 'prompt': 'ice_breaker_prompt_template'}), '(llm=llm_creative, prompt=ice_breaker_prompt_template)\n', (2312, 2366), False, 'from langchain.chains import LLMChain\n'), ((863, 903), 'output_parsers.summary_parser.get_format_instructions', 'summary_parser.get_format_instructions', ([], {}), '()\n', (901, 903), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((1499, 1550), 'output_parsers.topics_of_interest_parser.get_format_instructions', 'topics_of_interest_parser.get_format_instructions', ([], {}), '()\n', (1548, 1550), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((2230, 2274), 'output_parsers.ice_breaker_parser.get_format_instructions', 'ice_breaker_parser.get_format_instructions', ([], {}), '()\n', (2272, 2274), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n')] |
import asyncio
import uvicorn
from typing import AsyncIterable, Awaitable
from dotenv import load_dotenv
from fastapi import FastAPI
from fastapi.responses import FileResponse, StreamingResponse
from langchain.callbacks import AsyncIteratorCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
load_dotenv()
async def wait_done(fn: Awaitable, event: asyncio.Event):
try:
await fn
except Exception as e:
print(e)
event.set()
finally:
event.set()
async def call_openai(question: str) -> AsyncIterable[str]:
callback = AsyncIteratorCallbackHandler()
model = ChatOpenAI(streaming=True, verbose=True, callbacks=[callback])
coroutine = wait_done(model.agenerate(messages=[[HumanMessage(content=question)]]), callback.done)
task = asyncio.create_task(coroutine)
async for token in callback.aiter():
yield f"{token}"
await task
app = FastAPI()
@app.post("/ask")
def ask(body: dict):
return StreamingResponse(call_openai(body['question']), media_type="text/event-stream")
@app.get("/")
async def homepage():
return FileResponse('statics/index.html')
if __name__ == "__main__":
uvicorn.run(host="0.0.0.0", port=8888, app=app) | [
"langchain.callbacks.AsyncIteratorCallbackHandler",
"langchain.schema.HumanMessage",
"langchain.chat_models.ChatOpenAI"
] | [((345, 358), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (356, 358), False, 'from dotenv import load_dotenv\n'), ((959, 968), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (966, 968), False, 'from fastapi import FastAPI\n'), ((616, 646), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (644, 646), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((659, 721), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'streaming': '(True)', 'verbose': '(True)', 'callbacks': '[callback]'}), '(streaming=True, verbose=True, callbacks=[callback])\n', (669, 721), False, 'from langchain.chat_models import ChatOpenAI\n'), ((837, 867), 'asyncio.create_task', 'asyncio.create_task', (['coroutine'], {}), '(coroutine)\n', (856, 867), False, 'import asyncio\n'), ((1149, 1183), 'fastapi.responses.FileResponse', 'FileResponse', (['"""statics/index.html"""'], {}), "('statics/index.html')\n", (1161, 1183), False, 'from fastapi.responses import FileResponse, StreamingResponse\n'), ((1216, 1263), 'uvicorn.run', 'uvicorn.run', ([], {'host': '"""0.0.0.0"""', 'port': '(8888)', 'app': 'app'}), "(host='0.0.0.0', port=8888, app=app)\n", (1227, 1263), False, 'import uvicorn\n'), ((776, 806), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'question'}), '(content=question)\n', (788, 806), False, 'from langchain.schema import HumanMessage\n')] |
import asyncio
import uvicorn
from typing import AsyncIterable, Awaitable
from dotenv import load_dotenv
from fastapi import FastAPI
from fastapi.responses import FileResponse, StreamingResponse
from langchain.callbacks import AsyncIteratorCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
load_dotenv()
async def wait_done(fn: Awaitable, event: asyncio.Event):
try:
await fn
except Exception as e:
print(e)
event.set()
finally:
event.set()
async def call_openai(question: str) -> AsyncIterable[str]:
callback = AsyncIteratorCallbackHandler()
model = ChatOpenAI(streaming=True, verbose=True, callbacks=[callback])
coroutine = wait_done(model.agenerate(messages=[[HumanMessage(content=question)]]), callback.done)
task = asyncio.create_task(coroutine)
async for token in callback.aiter():
yield f"{token}"
await task
app = FastAPI()
@app.post("/ask")
def ask(body: dict):
return StreamingResponse(call_openai(body['question']), media_type="text/event-stream")
@app.get("/")
async def homepage():
return FileResponse('statics/index.html')
if __name__ == "__main__":
uvicorn.run(host="0.0.0.0", port=8888, app=app) | [
"langchain.callbacks.AsyncIteratorCallbackHandler",
"langchain.schema.HumanMessage",
"langchain.chat_models.ChatOpenAI"
] | [((345, 358), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (356, 358), False, 'from dotenv import load_dotenv\n'), ((959, 968), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (966, 968), False, 'from fastapi import FastAPI\n'), ((616, 646), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (644, 646), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((659, 721), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'streaming': '(True)', 'verbose': '(True)', 'callbacks': '[callback]'}), '(streaming=True, verbose=True, callbacks=[callback])\n', (669, 721), False, 'from langchain.chat_models import ChatOpenAI\n'), ((837, 867), 'asyncio.create_task', 'asyncio.create_task', (['coroutine'], {}), '(coroutine)\n', (856, 867), False, 'import asyncio\n'), ((1149, 1183), 'fastapi.responses.FileResponse', 'FileResponse', (['"""statics/index.html"""'], {}), "('statics/index.html')\n", (1161, 1183), False, 'from fastapi.responses import FileResponse, StreamingResponse\n'), ((1216, 1263), 'uvicorn.run', 'uvicorn.run', ([], {'host': '"""0.0.0.0"""', 'port': '(8888)', 'app': 'app'}), "(host='0.0.0.0', port=8888, app=app)\n", (1227, 1263), False, 'import uvicorn\n'), ((776, 806), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'question'}), '(content=question)\n', (788, 806), False, 'from langchain.schema import HumanMessage\n')] |
""" Adapted from https://github.com/QwenLM/Qwen-7B/blob/main/examples/react_demo.py """
import json
import os
from langchain.llms import OpenAI
llm = OpenAI(
model_name="qwen",
temperature=0,
openai_api_base="http://192.168.0.53:7891/v1",
openai_api_key="xxx",
)
# 将一个插件的关键信息拼接成一段文本的模版。
TOOL_DESC = """{name_for_model}: Call this tool to interact with the {name_for_human} API. What is the {name_for_human} API useful for? {description_for_model} Parameters: {parameters}"""
# ReAct prompting 的 instruction 模版,将包含插件的详细信息。
PROMPT_REACT = """Answer the following questions as best you can. You have access to the following tools:
{tools_text}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tools_name_text}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can be repeated zero or more times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin!
Question: {query}"""
def llm_with_plugin(prompt: str, history, list_of_plugin_info=()):
"""
Args:
prompt: 用户的最新一个问题。
history: 用户与模型的对话历史,是一个 list,
list 中的每个元素为 {"user": "用户输入", "bot": "模型输出"} 的一轮对话。
最新的一轮对话放 list 末尾。不包含最新一个问题。
list_of_plugin_info: 候选插件列表,是一个 list,list 中的每个元素为一个插件的关键信息。
比如 list_of_plugin_info = [plugin_info_0, plugin_info_1, plugin_info_2],
其中 plugin_info_0, plugin_info_1, plugin_info_2 这几个样例见本文档前文。
Returns: 模型对用户最新一个问题的回答。
"""
chat_history = [(x["user"], x["bot"]) for x in history] + [(prompt, '')]
# 需要让模型进行续写的初始文本
planning_prompt = build_input_text(chat_history, list_of_plugin_info)
text = ""
while True:
output = text_completion(planning_prompt + text, stop_words=["Observation:", "Observation:\n"])
action, action_input, output = parse_latest_plugin_call(output)
if action: # 需要调用插件
# action、action_input 分别为需要调用的插件代号、输入参数
# observation是插件返回的结果,为字符串
observation = call_plugin(action, action_input)
output += f"\nObservation: {observation}\nThought:"
text += output
else: # 生成结束,并且不再需要调用插件
text += output
break
new_history = []
new_history.extend(history)
new_history.append({"user": prompt, "bot": text})
return text, new_history
def build_input_text(chat_history, list_of_plugin_info) -> str:
""" 将对话历史、插件信息聚合成一段初始文本 """
tools_text = []
for plugin_info in list_of_plugin_info:
tool = TOOL_DESC.format(
name_for_model=plugin_info["name_for_model"],
name_for_human=plugin_info["name_for_human"],
description_for_model=plugin_info["description_for_model"],
parameters=json.dumps(plugin_info["parameters"], ensure_ascii=False),
)
if plugin_info.get("args_format", "json") == "json":
tool += " Format the arguments as a JSON object."
elif plugin_info['args_format'] == 'code':
tool += " Enclose the code within triple backticks (`) at the beginning and end of the code."
else:
raise NotImplementedError
tools_text.append(tool)
tools_text = '\n\n'.join(tools_text)
# 候选插件的代号
tools_name_text = ", ".join([plugin_info["name_for_model"] for plugin_info in list_of_plugin_info])
im_start = "<|im_start|>"
im_end = "<|im_end|>"
prompt = f"{im_start}system\nYou are a helpful assistant.{im_end}"
for i, (query, response) in enumerate(chat_history):
if list_of_plugin_info: # 如果有候选插件
# 倒数第一轮或倒数第二轮对话填入详细的插件信息,但具体什么位置填可以自行判断
if (len(chat_history) == 1) or (i == len(chat_history) - 2):
query = PROMPT_REACT.format(
tools_text=tools_text,
tools_name_text=tools_name_text,
query=query,
)
query = query.lstrip("\n").rstrip() # 重要!若不 strip 会与训练时数据的构造方式产生差异。
response = response.lstrip("\n").rstrip() # 重要!若不 strip 会与训练时数据的构造方式产生差异。
# 使用续写模式(text completion)时,需要用如下格式区分用户和AI:
prompt += f"\n{im_start}user\n{query}{im_end}"
prompt += f"\n{im_start}assistant\n{response}{im_end}"
assert prompt.endswith(f"\n{im_start}assistant\n{im_end}")
prompt = prompt[: -len(f"{im_end}")]
return prompt
def text_completion(input_text: str, stop_words) -> str: # 作为一个文本续写模型来使用
im_end = "<|im_end|>"
if im_end not in stop_words:
stop_words = stop_words + [im_end]
return llm(input_text, stop=stop_words) # 续写 input_text 的结果,不包含 input_text 的内容
def parse_latest_plugin_call(text):
plugin_name, plugin_args = "", ""
i = text.rfind("\nAction:")
j = text.rfind("\nAction Input:")
k = text.rfind("\nObservation:")
if 0 <= i < j: # If the text has `Action` and `Action input`,
if k < j: # but does not contain `Observation`,
# then it is likely that `Observation` is ommited by the LLM,
# because the output text may have discarded the stop word.
text = text.rstrip() + "\nObservation:" # Add it back.
k = text.rfind("\nObservation:")
plugin_name = text[i + len("\nAction:"): j].strip()
plugin_args = text[j + len("\nAction Input:"): k].strip()
text = text[:k]
return plugin_name, plugin_args, text
def call_plugin(plugin_name: str, plugin_args: str) -> str:
""" 请开发者自行完善这部分内容。这里的参考实现仅是 demo 用途,非生产用途 """
if plugin_name == "google_search":
# 使用 SerpAPI 需要在这里填入您的 SERPAPI_API_KEY!
os.environ["SERPAPI_API_KEY"] = os.getenv("SERPAPI_API_KEY", default="")
from langchain import SerpAPIWrapper
return SerpAPIWrapper().run(json.loads(plugin_args)["search_query"])
elif plugin_name == "image_gen":
import urllib.parse
prompt = json.loads(plugin_args)["prompt"]
prompt = urllib.parse.quote(prompt)
return json.dumps({"image_url": f"https://image.pollinations.ai/prompt/{prompt}"}, ensure_ascii=False)
else:
raise NotImplementedError
def test():
tools = [
{
"name_for_human": "谷歌搜索",
"name_for_model": "google_search",
"description_for_model": "谷歌搜索是一个通用搜索引擎,可用于访问互联网、查询百科知识、了解时事新闻等。",
"parameters": [
{
"name": "search_query",
"description": "搜索关键词或短语",
"required": True,
"schema": {"type": "string"},
}
],
},
{
"name_for_human": "文生图",
"name_for_model": "image_gen",
"description_for_model": "文生图是一个AI绘画(图像生成)服务,输入文本描述,返回根据文本作画得到的图片的URL",
"parameters": [
{
"name": "prompt",
"description": "英文关键词,描述了希望图像具有什么内容",
"required": True,
"schema": {"type": "string"},
}
],
},
]
history = []
for query in ["你好", "谁是周杰伦", "他老婆是谁", "给我画个可爱的小猫吧,最好是黑猫"]:
print(f"User's Query:\n{query}\n")
response, history = llm_with_plugin(prompt=query, history=history, list_of_plugin_info=tools)
print(f"Qwen's Response:\n{response}\n")
if __name__ == "__main__":
test()
"""如果执行成功,在终端下应当能看到如下输出:
User's Query:
你好
Qwen's Response:
Thought: 提供的工具对回答该问题帮助较小,我将不使用工具直接作答。
Final Answer: 你好!很高兴见到你。有什么我可以帮忙的吗?
User's Query:
谁是周杰伦
Qwen's Response:
Thought: 我应该使用Google搜索查找相关信息。
Action: google_search
Action Input: {"search_query": "周杰伦"}
Observation: Jay Chou is a Taiwanese singer, songwriter, record producer, rapper, actor, television personality, and businessman.
Thought: I now know the final answer.
Final Answer: 周杰伦(Jay Chou)是一位来自台湾的歌手、词曲创作人、音乐制作人、说唱歌手、演员、电视节目主持人和企业家。他以其独特的音乐风格和才华在华语乐坛享有很高的声誉。
User's Query:
他老婆是谁
Qwen's Response:
Thought: 我应该使用Google搜索查找相关信息。
Action: google_search
Action Input: {"search_query": "周杰伦 老婆"}
Observation: Hannah Quinlivan
Thought: I now know the final answer.
Final Answer: 周杰伦的老婆是Hannah Quinlivan,她是一位澳大利亚籍的模特和演员。两人于2015年结婚,并育有一子。
User's Query:
给我画个可爱的小猫吧,最好是黑猫
Qwen's Response:
Thought: 我应该使用文生图API来生成一张可爱的小猫图片。
Action: image_gen
Action Input: {"prompt": "cute black cat"}
Observation: {"image_url": "https://image.pollinations.ai/prompt/cute%20black%20cat"}
Thought: I now know the final answer.
Final Answer: 生成的可爱小猫图片的URL为https://image.pollinations.ai/prompt/cute%20black%20cat。你可以点击这个链接查看图片。
"""
| [
"langchain.llms.OpenAI",
"langchain.SerpAPIWrapper"
] | [((153, 267), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""qwen"""', 'temperature': '(0)', 'openai_api_base': '"""http://192.168.0.53:7891/v1"""', 'openai_api_key': '"""xxx"""'}), "(model_name='qwen', temperature=0, openai_api_base=\n 'http://192.168.0.53:7891/v1', openai_api_key='xxx')\n", (159, 267), False, 'from langchain.llms import OpenAI\n'), ((5778, 5818), 'os.getenv', 'os.getenv', (['"""SERPAPI_API_KEY"""'], {'default': '""""""'}), "('SERPAPI_API_KEY', default='')\n", (5787, 5818), False, 'import os\n'), ((6118, 6217), 'json.dumps', 'json.dumps', (["{'image_url': f'https://image.pollinations.ai/prompt/{prompt}'}"], {'ensure_ascii': '(False)'}), "({'image_url': f'https://image.pollinations.ai/prompt/{prompt}'},\n ensure_ascii=False)\n", (6128, 6217), False, 'import json\n'), ((2933, 2990), 'json.dumps', 'json.dumps', (["plugin_info['parameters']"], {'ensure_ascii': '(False)'}), "(plugin_info['parameters'], ensure_ascii=False)\n", (2943, 2990), False, 'import json\n'), ((5880, 5896), 'langchain.SerpAPIWrapper', 'SerpAPIWrapper', ([], {}), '()\n', (5894, 5896), False, 'from langchain import SerpAPIWrapper\n'), ((5901, 5924), 'json.loads', 'json.loads', (['plugin_args'], {}), '(plugin_args)\n', (5911, 5924), False, 'import json\n'), ((6025, 6048), 'json.loads', 'json.loads', (['plugin_args'], {}), '(plugin_args)\n', (6035, 6048), False, 'import json\n')] |
"""Wrapper around Cohere APIs."""
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from pydantic import Extra, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator(llm: Cohere) -> Callable[[Any], Any]:
import cohere
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(cohere.error.CohereError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.generate(**kwargs)
return _completion_with_retry(**kwargs)
class Cohere(LLM):
"""Wrapper around Cohere large language models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Cohere
cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key")
"""
client: Any #: :meta private:
model: Optional[str] = None
"""Model name to use."""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
temperature: float = 0.75
"""A non-negative float that tunes the degree of randomness in generation."""
k: int = 0
"""Number of most likely tokens to consider at each step."""
p: int = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency. Between 0 and 1."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens. Between 0 and 1."""
truncate: Optional[str] = None
"""Specify how the client handles inputs longer than the maximum token
length: Truncate from START, END or NONE"""
max_retries: int = 10
"""Maximum number of retries to make when generating."""
cohere_api_key: Optional[str] = None
stop: Optional[List[str]] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
try:
import cohere
values["client"] = cohere.Client(cohere_api_key)
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"k": self.k,
"p": self.p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"truncate": self.truncate,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "cohere"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to Cohere's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = cohere("Tell me a joke.")
"""
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
params["stop_sequences"] = self.stop
else:
params["stop_sequences"] = stop
response = completion_with_retry(
self, model=self.model, prompt=prompt, **params
)
text = response.generations[0].text
# If stop tokens are provided, Cohere's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((3018, 3034), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3032, 3034), False, 'from pydantic import Extra, root_validator\n'), ((3195, 3259), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""cohere_api_key"""', '"""COHERE_API_KEY"""'], {}), "(values, 'cohere_api_key', 'COHERE_API_KEY')\n", (3215, 3259), False, 'from langchain.utils import get_from_dict_or_env\n'), ((866, 901), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['llm.max_retries'], {}), '(llm.max_retries)\n', (884, 901), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((916, 980), 'tenacity.wait_exponential', 'wait_exponential', ([], {'multiplier': '(1)', 'min': 'min_seconds', 'max': 'max_seconds'}), '(multiplier=1, min=min_seconds, max=max_seconds)\n', (932, 980), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((997, 1046), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['cohere.error.CohereError'], {}), '(cohere.error.CohereError)\n', (1020, 1046), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((1070, 1111), 'tenacity.before_sleep_log', 'before_sleep_log', (['logger', 'logging.WARNING'], {}), '(logger, logging.WARNING)\n', (1086, 1111), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((3353, 3382), 'cohere.Client', 'cohere.Client', (['cohere_api_key'], {}), '(cohere_api_key)\n', (3366, 3382), False, 'import cohere\n'), ((5575, 5626), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (5594, 5626), False, 'from langchain.llms.utils import enforce_stop_tokens\n')] |
"""Wrapper around Cohere APIs."""
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from pydantic import Extra, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator(llm: Cohere) -> Callable[[Any], Any]:
import cohere
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(cohere.error.CohereError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.generate(**kwargs)
return _completion_with_retry(**kwargs)
class Cohere(LLM):
"""Wrapper around Cohere large language models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Cohere
cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key")
"""
client: Any #: :meta private:
model: Optional[str] = None
"""Model name to use."""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
temperature: float = 0.75
"""A non-negative float that tunes the degree of randomness in generation."""
k: int = 0
"""Number of most likely tokens to consider at each step."""
p: int = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency. Between 0 and 1."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens. Between 0 and 1."""
truncate: Optional[str] = None
"""Specify how the client handles inputs longer than the maximum token
length: Truncate from START, END or NONE"""
max_retries: int = 10
"""Maximum number of retries to make when generating."""
cohere_api_key: Optional[str] = None
stop: Optional[List[str]] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
try:
import cohere
values["client"] = cohere.Client(cohere_api_key)
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"k": self.k,
"p": self.p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"truncate": self.truncate,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "cohere"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to Cohere's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = cohere("Tell me a joke.")
"""
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
params["stop_sequences"] = self.stop
else:
params["stop_sequences"] = stop
response = completion_with_retry(
self, model=self.model, prompt=prompt, **params
)
text = response.generations[0].text
# If stop tokens are provided, Cohere's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((3018, 3034), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3032, 3034), False, 'from pydantic import Extra, root_validator\n'), ((3195, 3259), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""cohere_api_key"""', '"""COHERE_API_KEY"""'], {}), "(values, 'cohere_api_key', 'COHERE_API_KEY')\n", (3215, 3259), False, 'from langchain.utils import get_from_dict_or_env\n'), ((866, 901), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['llm.max_retries'], {}), '(llm.max_retries)\n', (884, 901), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((916, 980), 'tenacity.wait_exponential', 'wait_exponential', ([], {'multiplier': '(1)', 'min': 'min_seconds', 'max': 'max_seconds'}), '(multiplier=1, min=min_seconds, max=max_seconds)\n', (932, 980), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((997, 1046), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['cohere.error.CohereError'], {}), '(cohere.error.CohereError)\n', (1020, 1046), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((1070, 1111), 'tenacity.before_sleep_log', 'before_sleep_log', (['logger', 'logging.WARNING'], {}), '(logger, logging.WARNING)\n', (1086, 1111), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((3353, 3382), 'cohere.Client', 'cohere.Client', (['cohere_api_key'], {}), '(cohere_api_key)\n', (3366, 3382), False, 'import cohere\n'), ((5575, 5626), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (5594, 5626), False, 'from langchain.llms.utils import enforce_stop_tokens\n')] |
"""Wrapper around GooseAI API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class GooseAI(LLM):
"""Wrapper around OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``GOOSEAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import GooseAI
gooseai = GooseAI(model_name="gpt-neo-20b")
"""
client: Any
model_name: str = "gpt-neo-20b"
"""Model name to use"""
temperature: float = 0.7
"""What sampling temperature to use"""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
min_tokens: int = 1
"""The minimum number of tokens to generate in the completion."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
gooseai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.ignore
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transfered to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
gooseai_api_key = get_from_dict_or_env(
values, "gooseai_api_key", "GOOSEAI_API_KEY"
)
try:
import openai
openai.api_key = gooseai_api_key
openai.api_base = "https://api.goose.ai/v1"
values["client"] = openai.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling GooseAI API."""
normal_params = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"min_tokens": self.min_tokens,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gooseai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call the GooseAI API."""
params = self._default_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
response = self.client.create(engine=self.model_name, prompt=prompt, **params)
text = response.choices[0].text
return text
| [
"langchain.utils.get_from_dict_or_env"
] | [((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836, 1863), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1841, 1863), False, 'from pydantic import Extra, Field, root_validator\n'), ((2085, 2109), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2099, 2109), False, 'from pydantic import Extra, Field, root_validator\n'), ((2995, 3011), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3009, 3011), False, 'from pydantic import Extra, Field, root_validator\n'), ((3173, 3239), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""gooseai_api_key"""', '"""GOOSEAI_API_KEY"""'], {}), "(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')\n", (3193, 3239), False, 'from langchain.utils import get_from_dict_or_env\n')] |
"""Wrapper around GooseAI API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class GooseAI(LLM):
"""Wrapper around OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``GOOSEAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import GooseAI
gooseai = GooseAI(model_name="gpt-neo-20b")
"""
client: Any
model_name: str = "gpt-neo-20b"
"""Model name to use"""
temperature: float = 0.7
"""What sampling temperature to use"""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
min_tokens: int = 1
"""The minimum number of tokens to generate in the completion."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
gooseai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.ignore
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transfered to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
gooseai_api_key = get_from_dict_or_env(
values, "gooseai_api_key", "GOOSEAI_API_KEY"
)
try:
import openai
openai.api_key = gooseai_api_key
openai.api_base = "https://api.goose.ai/v1"
values["client"] = openai.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling GooseAI API."""
normal_params = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"min_tokens": self.min_tokens,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gooseai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call the GooseAI API."""
params = self._default_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
response = self.client.create(engine=self.model_name, prompt=prompt, **params)
text = response.choices[0].text
return text
| [
"langchain.utils.get_from_dict_or_env"
] | [((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836, 1863), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1841, 1863), False, 'from pydantic import Extra, Field, root_validator\n'), ((2085, 2109), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2099, 2109), False, 'from pydantic import Extra, Field, root_validator\n'), ((2995, 3011), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3009, 3011), False, 'from pydantic import Extra, Field, root_validator\n'), ((3173, 3239), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""gooseai_api_key"""', '"""GOOSEAI_API_KEY"""'], {}), "(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')\n", (3193, 3239), False, 'from langchain.utils import get_from_dict_or_env\n')] |
"""Wrapper around GooseAI API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class GooseAI(LLM):
"""Wrapper around OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``GOOSEAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import GooseAI
gooseai = GooseAI(model_name="gpt-neo-20b")
"""
client: Any
model_name: str = "gpt-neo-20b"
"""Model name to use"""
temperature: float = 0.7
"""What sampling temperature to use"""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
min_tokens: int = 1
"""The minimum number of tokens to generate in the completion."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
gooseai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.ignore
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transfered to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
gooseai_api_key = get_from_dict_or_env(
values, "gooseai_api_key", "GOOSEAI_API_KEY"
)
try:
import openai
openai.api_key = gooseai_api_key
openai.api_base = "https://api.goose.ai/v1"
values["client"] = openai.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling GooseAI API."""
normal_params = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"min_tokens": self.min_tokens,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gooseai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call the GooseAI API."""
params = self._default_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
response = self.client.create(engine=self.model_name, prompt=prompt, **params)
text = response.choices[0].text
return text
| [
"langchain.utils.get_from_dict_or_env"
] | [((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836, 1863), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1841, 1863), False, 'from pydantic import Extra, Field, root_validator\n'), ((2085, 2109), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2099, 2109), False, 'from pydantic import Extra, Field, root_validator\n'), ((2995, 3011), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3009, 3011), False, 'from pydantic import Extra, Field, root_validator\n'), ((3173, 3239), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""gooseai_api_key"""', '"""GOOSEAI_API_KEY"""'], {}), "(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')\n", (3193, 3239), False, 'from langchain.utils import get_from_dict_or_env\n')] |
"""Wrapper around GooseAI API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class GooseAI(LLM):
"""Wrapper around OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``GOOSEAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import GooseAI
gooseai = GooseAI(model_name="gpt-neo-20b")
"""
client: Any
model_name: str = "gpt-neo-20b"
"""Model name to use"""
temperature: float = 0.7
"""What sampling temperature to use"""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
min_tokens: int = 1
"""The minimum number of tokens to generate in the completion."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
gooseai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.ignore
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transfered to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
gooseai_api_key = get_from_dict_or_env(
values, "gooseai_api_key", "GOOSEAI_API_KEY"
)
try:
import openai
openai.api_key = gooseai_api_key
openai.api_base = "https://api.goose.ai/v1"
values["client"] = openai.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling GooseAI API."""
normal_params = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"min_tokens": self.min_tokens,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gooseai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call the GooseAI API."""
params = self._default_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
response = self.client.create(engine=self.model_name, prompt=prompt, **params)
text = response.choices[0].text
return text
| [
"langchain.utils.get_from_dict_or_env"
] | [((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836, 1863), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1841, 1863), False, 'from pydantic import Extra, Field, root_validator\n'), ((2085, 2109), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2099, 2109), False, 'from pydantic import Extra, Field, root_validator\n'), ((2995, 3011), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3009, 3011), False, 'from pydantic import Extra, Field, root_validator\n'), ((3173, 3239), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""gooseai_api_key"""', '"""GOOSEAI_API_KEY"""'], {}), "(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')\n", (3193, 3239), False, 'from langchain.utils import get_from_dict_or_env\n')] |
"""Wrapper around Anyscale"""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
class Anyscale(LLM):
"""Wrapper around Anyscale Services.
To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``,
``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale
Service, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Anyscale
anyscale = Anyscale(anyscale_service_url="SERVICE_URL",
anyscale_service_route="SERVICE_ROUTE",
anyscale_service_token="SERVICE_TOKEN")
# Use Ray for distributed processing
import ray
prompt_list=[]
@ray.remote
def send_query(llm, prompt):
resp = llm(prompt)
return resp
futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list]
results = ray.get(futures)
"""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model. Reserved for future use"""
anyscale_service_url: Optional[str] = None
anyscale_service_route: Optional[str] = None
anyscale_service_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
anyscale_service_url = get_from_dict_or_env(
values, "anyscale_service_url", "ANYSCALE_SERVICE_URL"
)
anyscale_service_route = get_from_dict_or_env(
values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE"
)
anyscale_service_token = get_from_dict_or_env(
values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN"
)
try:
anyscale_service_endpoint = f"{anyscale_service_url}/-/route"
headers = {"Authorization": f"Bearer {anyscale_service_token}"}
requests.get(anyscale_service_endpoint, headers=headers)
except requests.exceptions.RequestException as e:
raise ValueError(e)
values["anyscale_service_url"] = anyscale_service_url
values["anyscale_service_route"] = anyscale_service_route
values["anyscale_service_token"] = anyscale_service_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"anyscale_service_url": self.anyscale_service_url,
"anyscale_service_route": self.anyscale_service_route,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "anyscale"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to Anyscale Service endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = anyscale("Tell me a joke.")
"""
anyscale_service_endpoint = (
f"{self.anyscale_service_url}/{self.anyscale_service_route}"
)
headers = {"Authorization": f"Bearer {self.anyscale_service_token}"}
body = {"prompt": prompt}
resp = requests.post(anyscale_service_endpoint, headers=headers, json=body)
if resp.status_code != 200:
raise ValueError(
f"Error returned by service, status code {resp.status_code}"
)
text = resp.text
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'anyscale_service_url', 'ANYSCALE_SERVICE_URL')\n", (1882, 1938), False, 'from langchain.utils import get_from_dict_or_env\n'), ((1994, 2079), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_route"""', '"""ANYSCALE_SERVICE_ROUTE"""'], {}), "(values, 'anyscale_service_route', 'ANYSCALE_SERVICE_ROUTE'\n )\n", (2014, 2079), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2130, 2215), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_token"""', '"""ANYSCALE_SERVICE_TOKEN"""'], {}), "(values, 'anyscale_service_token', 'ANYSCALE_SERVICE_TOKEN'\n )\n", (2150, 2215), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3943, 4011), 'requests.post', 'requests.post', (['anyscale_service_endpoint'], {'headers': 'headers', 'json': 'body'}), '(anyscale_service_endpoint, headers=headers, json=body)\n', (3956, 4011), False, 'import requests\n'), ((2408, 2464), 'requests.get', 'requests.get', (['anyscale_service_endpoint'], {'headers': 'headers'}), '(anyscale_service_endpoint, headers=headers)\n', (2420, 2464), False, 'import requests\n'), ((4390, 4421), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (4409, 4421), False, 'from langchain.llms.utils import enforce_stop_tokens\n')] |
"""Wrapper around Anyscale"""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
class Anyscale(LLM):
"""Wrapper around Anyscale Services.
To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``,
``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale
Service, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Anyscale
anyscale = Anyscale(anyscale_service_url="SERVICE_URL",
anyscale_service_route="SERVICE_ROUTE",
anyscale_service_token="SERVICE_TOKEN")
# Use Ray for distributed processing
import ray
prompt_list=[]
@ray.remote
def send_query(llm, prompt):
resp = llm(prompt)
return resp
futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list]
results = ray.get(futures)
"""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model. Reserved for future use"""
anyscale_service_url: Optional[str] = None
anyscale_service_route: Optional[str] = None
anyscale_service_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
anyscale_service_url = get_from_dict_or_env(
values, "anyscale_service_url", "ANYSCALE_SERVICE_URL"
)
anyscale_service_route = get_from_dict_or_env(
values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE"
)
anyscale_service_token = get_from_dict_or_env(
values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN"
)
try:
anyscale_service_endpoint = f"{anyscale_service_url}/-/route"
headers = {"Authorization": f"Bearer {anyscale_service_token}"}
requests.get(anyscale_service_endpoint, headers=headers)
except requests.exceptions.RequestException as e:
raise ValueError(e)
values["anyscale_service_url"] = anyscale_service_url
values["anyscale_service_route"] = anyscale_service_route
values["anyscale_service_token"] = anyscale_service_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"anyscale_service_url": self.anyscale_service_url,
"anyscale_service_route": self.anyscale_service_route,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "anyscale"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to Anyscale Service endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = anyscale("Tell me a joke.")
"""
anyscale_service_endpoint = (
f"{self.anyscale_service_url}/{self.anyscale_service_route}"
)
headers = {"Authorization": f"Bearer {self.anyscale_service_token}"}
body = {"prompt": prompt}
resp = requests.post(anyscale_service_endpoint, headers=headers, json=body)
if resp.status_code != 200:
raise ValueError(
f"Error returned by service, status code {resp.status_code}"
)
text = resp.text
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'anyscale_service_url', 'ANYSCALE_SERVICE_URL')\n", (1882, 1938), False, 'from langchain.utils import get_from_dict_or_env\n'), ((1994, 2079), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_route"""', '"""ANYSCALE_SERVICE_ROUTE"""'], {}), "(values, 'anyscale_service_route', 'ANYSCALE_SERVICE_ROUTE'\n )\n", (2014, 2079), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2130, 2215), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_token"""', '"""ANYSCALE_SERVICE_TOKEN"""'], {}), "(values, 'anyscale_service_token', 'ANYSCALE_SERVICE_TOKEN'\n )\n", (2150, 2215), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3943, 4011), 'requests.post', 'requests.post', (['anyscale_service_endpoint'], {'headers': 'headers', 'json': 'body'}), '(anyscale_service_endpoint, headers=headers, json=body)\n', (3956, 4011), False, 'import requests\n'), ((2408, 2464), 'requests.get', 'requests.get', (['anyscale_service_endpoint'], {'headers': 'headers'}), '(anyscale_service_endpoint, headers=headers)\n', (2420, 2464), False, 'import requests\n'), ((4390, 4421), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (4409, 4421), False, 'from langchain.llms.utils import enforce_stop_tokens\n')] |
"""Wrapper around Anyscale"""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
class Anyscale(LLM):
"""Wrapper around Anyscale Services.
To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``,
``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale
Service, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Anyscale
anyscale = Anyscale(anyscale_service_url="SERVICE_URL",
anyscale_service_route="SERVICE_ROUTE",
anyscale_service_token="SERVICE_TOKEN")
# Use Ray for distributed processing
import ray
prompt_list=[]
@ray.remote
def send_query(llm, prompt):
resp = llm(prompt)
return resp
futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list]
results = ray.get(futures)
"""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model. Reserved for future use"""
anyscale_service_url: Optional[str] = None
anyscale_service_route: Optional[str] = None
anyscale_service_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
anyscale_service_url = get_from_dict_or_env(
values, "anyscale_service_url", "ANYSCALE_SERVICE_URL"
)
anyscale_service_route = get_from_dict_or_env(
values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE"
)
anyscale_service_token = get_from_dict_or_env(
values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN"
)
try:
anyscale_service_endpoint = f"{anyscale_service_url}/-/route"
headers = {"Authorization": f"Bearer {anyscale_service_token}"}
requests.get(anyscale_service_endpoint, headers=headers)
except requests.exceptions.RequestException as e:
raise ValueError(e)
values["anyscale_service_url"] = anyscale_service_url
values["anyscale_service_route"] = anyscale_service_route
values["anyscale_service_token"] = anyscale_service_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"anyscale_service_url": self.anyscale_service_url,
"anyscale_service_route": self.anyscale_service_route,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "anyscale"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to Anyscale Service endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = anyscale("Tell me a joke.")
"""
anyscale_service_endpoint = (
f"{self.anyscale_service_url}/{self.anyscale_service_route}"
)
headers = {"Authorization": f"Bearer {self.anyscale_service_token}"}
body = {"prompt": prompt}
resp = requests.post(anyscale_service_endpoint, headers=headers, json=body)
if resp.status_code != 200:
raise ValueError(
f"Error returned by service, status code {resp.status_code}"
)
text = resp.text
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'anyscale_service_url', 'ANYSCALE_SERVICE_URL')\n", (1882, 1938), False, 'from langchain.utils import get_from_dict_or_env\n'), ((1994, 2079), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_route"""', '"""ANYSCALE_SERVICE_ROUTE"""'], {}), "(values, 'anyscale_service_route', 'ANYSCALE_SERVICE_ROUTE'\n )\n", (2014, 2079), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2130, 2215), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_token"""', '"""ANYSCALE_SERVICE_TOKEN"""'], {}), "(values, 'anyscale_service_token', 'ANYSCALE_SERVICE_TOKEN'\n )\n", (2150, 2215), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3943, 4011), 'requests.post', 'requests.post', (['anyscale_service_endpoint'], {'headers': 'headers', 'json': 'body'}), '(anyscale_service_endpoint, headers=headers, json=body)\n', (3956, 4011), False, 'import requests\n'), ((2408, 2464), 'requests.get', 'requests.get', (['anyscale_service_endpoint'], {'headers': 'headers'}), '(anyscale_service_endpoint, headers=headers)\n', (2420, 2464), False, 'import requests\n'), ((4390, 4421), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (4409, 4421), False, 'from langchain.llms.utils import enforce_stop_tokens\n')] |
"""Wrapper around Anyscale"""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
class Anyscale(LLM):
"""Wrapper around Anyscale Services.
To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``,
``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale
Service, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Anyscale
anyscale = Anyscale(anyscale_service_url="SERVICE_URL",
anyscale_service_route="SERVICE_ROUTE",
anyscale_service_token="SERVICE_TOKEN")
# Use Ray for distributed processing
import ray
prompt_list=[]
@ray.remote
def send_query(llm, prompt):
resp = llm(prompt)
return resp
futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list]
results = ray.get(futures)
"""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model. Reserved for future use"""
anyscale_service_url: Optional[str] = None
anyscale_service_route: Optional[str] = None
anyscale_service_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
anyscale_service_url = get_from_dict_or_env(
values, "anyscale_service_url", "ANYSCALE_SERVICE_URL"
)
anyscale_service_route = get_from_dict_or_env(
values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE"
)
anyscale_service_token = get_from_dict_or_env(
values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN"
)
try:
anyscale_service_endpoint = f"{anyscale_service_url}/-/route"
headers = {"Authorization": f"Bearer {anyscale_service_token}"}
requests.get(anyscale_service_endpoint, headers=headers)
except requests.exceptions.RequestException as e:
raise ValueError(e)
values["anyscale_service_url"] = anyscale_service_url
values["anyscale_service_route"] = anyscale_service_route
values["anyscale_service_token"] = anyscale_service_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"anyscale_service_url": self.anyscale_service_url,
"anyscale_service_route": self.anyscale_service_route,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "anyscale"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to Anyscale Service endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = anyscale("Tell me a joke.")
"""
anyscale_service_endpoint = (
f"{self.anyscale_service_url}/{self.anyscale_service_route}"
)
headers = {"Authorization": f"Bearer {self.anyscale_service_token}"}
body = {"prompt": prompt}
resp = requests.post(anyscale_service_endpoint, headers=headers, json=body)
if resp.status_code != 200:
raise ValueError(
f"Error returned by service, status code {resp.status_code}"
)
text = resp.text
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'anyscale_service_url', 'ANYSCALE_SERVICE_URL')\n", (1882, 1938), False, 'from langchain.utils import get_from_dict_or_env\n'), ((1994, 2079), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_route"""', '"""ANYSCALE_SERVICE_ROUTE"""'], {}), "(values, 'anyscale_service_route', 'ANYSCALE_SERVICE_ROUTE'\n )\n", (2014, 2079), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2130, 2215), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_token"""', '"""ANYSCALE_SERVICE_TOKEN"""'], {}), "(values, 'anyscale_service_token', 'ANYSCALE_SERVICE_TOKEN'\n )\n", (2150, 2215), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3943, 4011), 'requests.post', 'requests.post', (['anyscale_service_endpoint'], {'headers': 'headers', 'json': 'body'}), '(anyscale_service_endpoint, headers=headers, json=body)\n', (3956, 4011), False, 'import requests\n'), ((2408, 2464), 'requests.get', 'requests.get', (['anyscale_service_endpoint'], {'headers': 'headers'}), '(anyscale_service_endpoint, headers=headers)\n', (2420, 2464), False, 'import requests\n'), ((4390, 4421), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (4409, 4421), False, 'from langchain.llms.utils import enforce_stop_tokens\n')] |
from langchain.prompts import PromptTemplate
_symptom_extract_template = """Consider the following conversation patient note:
Patient note: {note}
Choose on of the symptoms to be the chief complaint (it is usually the first symptom mentioned).
Provide your response strictly in the following format, replacing only the name_of_chief_complaint (keeping : yes), and refrain from including any additional text:
<symptom> name_of_chief_complaint </symptom>
"""
_symptom_match_template = """Given the symptom: {symptom} which of the following retrievals is the best match?
Retrievals:
{retrievals}
Select only one and write it below in the following format:
<match> choice </match>
Remember, do not include any other text, ensure your choice is in the provided retrievals, and follow the output format.
"""
CC_EXTRACT_PROMPT = PromptTemplate.from_template(_symptom_extract_template)
CC_MATCH_PROMPT = PromptTemplate.from_template(_symptom_match_template)
| [
"langchain.prompts.PromptTemplate.from_template"
] | [((830, 885), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['_symptom_extract_template'], {}), '(_symptom_extract_template)\n', (858, 885), False, 'from langchain.prompts import PromptTemplate\n'), ((904, 957), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['_symptom_match_template'], {}), '(_symptom_match_template)\n', (932, 957), False, 'from langchain.prompts import PromptTemplate\n')] |
import requests
from typing import Any, Dict, Optional
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
from langchain.chains import APIChain
from langchain.prompts import BasePromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from .requests_l402 import RequestsL402Wrapper
from .requests_l402 import ResponseTextWrapper
from lightning import LightningNode
class L402APIChain(APIChain):
requests_wrapper: Any
@classmethod
def from_llm_and_api_docs(
cls,
llm: BaseLanguageModel,
api_docs: str,
headers: Optional[dict] = None,
api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT,
lightning_node = None,
**kwargs: Any,
) -> APIChain:
"""Load chain from just an LLM and the api docs."""
requests_L402 = RequestsL402Wrapper(lightning_node, requests)
lang_chain_request_L402 = ResponseTextWrapper(
requests_wrapper=requests_L402,
)
get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
return cls(
api_request_chain=get_request_chain,
api_answer_chain=get_answer_chain,
requests_wrapper=lang_chain_request_L402,
api_docs=api_docs,
**kwargs,
)
| [
"langchain.chains.llm.LLMChain"
] | [((1139, 1179), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_url_prompt'}), '(llm=llm, prompt=api_url_prompt)\n', (1147, 1179), False, 'from langchain.chains.llm import LLMChain\n'), ((1207, 1252), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_response_prompt'}), '(llm=llm, prompt=api_response_prompt)\n', (1215, 1252), False, 'from langchain.chains.llm import LLMChain\n')] |
"""Functionality for loading chains."""
import json
from pathlib import Path
from typing import Any, Union
import yaml
from langchain.chains.api.base import APIChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.llm import LLMChain
from langchain.chains.llm_bash.base import LLMBashChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.pal.base import PALChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.sql_database.base import SQLDatabaseChain
from langchain.chains.vector_db_qa.base import VectorDBQA
from langchain.llms.loading import load_llm, load_llm_from_config
from langchain.prompts.loading import load_prompt, load_prompt_from_config
from langchain.utilities.loading import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
"""Load LLM chain from config dict."""
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return LLMChain(llm=llm, prompt=prompt, **config)
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
else:
raise ValueError("`embeddings` must be present.")
return HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=embeddings, **config
)
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
else:
raise ValueError(
"One of `document_prompt` or `document_prompt_path` must be present."
)
return StuffDocumentsChain(
llm_chain=llm_chain, document_prompt=document_prompt, **config
)
def _load_map_reduce_documents_chain(
config: dict, **kwargs: Any
) -> MapReduceDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "combine_document_chain" in config:
combine_document_chain_config = config.pop("combine_document_chain")
combine_document_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_document_chain_path" in config:
combine_document_chain = load_chain(config.pop("combine_document_chain_path"))
else:
raise ValueError(
"One of `combine_document_chain` or "
"`combine_document_chain_path` must be present."
)
if "collapse_document_chain" in config:
collapse_document_chain_config = config.pop("collapse_document_chain")
if collapse_document_chain_config is None:
collapse_document_chain = None
else:
collapse_document_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_document_chain_path" in config:
collapse_document_chain = load_chain(config.pop("collapse_document_chain_path"))
return MapReduceDocumentsChain(
llm_chain=llm_chain,
combine_document_chain=combine_document_chain,
collapse_document_chain=collapse_document_chain,
**config,
)
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMBashChain(llm=llm, prompt=prompt, **config)
def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "create_draft_answer_prompt" in config:
create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt")
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config
)
elif "create_draft_answer_prompt_path" in config:
create_draft_answer_prompt = load_prompt(
config.pop("create_draft_answer_prompt_path")
)
if "list_assertions_prompt" in config:
list_assertions_prompt_config = config.pop("list_assertions_prompt")
list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config)
elif "list_assertions_prompt_path" in config:
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
if "check_assertions_prompt" in config:
check_assertions_prompt_config = config.pop("check_assertions_prompt")
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config
)
elif "check_assertions_prompt_path" in config:
check_assertions_prompt = load_prompt(
config.pop("check_assertions_prompt_path")
)
if "revised_answer_prompt" in config:
revised_answer_prompt_config = config.pop("revised_answer_prompt")
revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config)
elif "revised_answer_prompt_path" in config:
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
return LLMCheckerChain(
llm=llm,
create_draft_answer_prompt=create_draft_answer_prompt,
list_assertions_prompt=list_assertions_prompt,
check_assertions_prompt=check_assertions_prompt,
revised_answer_prompt=revised_answer_prompt,
**config,
)
def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMMathChain(llm=llm, prompt=prompt, **config)
def _load_map_rerank_documents_chain(
config: dict, **kwargs: Any
) -> MapRerankDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return PALChain(llm=llm, prompt=prompt, **config)
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
if "initial_llm_chain" in config:
initial_llm_chain_config = config.pop("initial_llm_chain")
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
elif "initial_llm_chain_path" in config:
initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"))
else:
raise ValueError(
"One of `initial_llm_chain` or `initial_llm_chain_config` must be present."
)
if "refine_llm_chain" in config:
refine_llm_chain_config = config.pop("refine_llm_chain")
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
elif "refine_llm_chain_path" in config:
refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"))
else:
raise ValueError(
"One of `refine_llm_chain` or `refine_llm_chain_config` must be present."
)
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
return RefineDocumentsChain(
initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain,
document_prompt=document_prompt,
**config,
)
def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain:
if "database" in kwargs:
database = kwargs.pop("database")
else:
raise ValueError("`database` must be present.")
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config)
def _load_vector_db_qa_with_sources_chain(
config: dict, **kwargs: Any
) -> VectorDBQAWithSourcesChain:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQA(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
if "api_request_chain" in config:
api_request_chain_config = config.pop("api_request_chain")
api_request_chain = load_chain_from_config(api_request_chain_config)
elif "api_request_chain_path" in config:
api_request_chain = load_chain(config.pop("api_request_chain_path"))
else:
raise ValueError(
"One of `api_request_chain` or `api_request_chain_path` must be present."
)
if "api_answer_chain" in config:
api_answer_chain_config = config.pop("api_answer_chain")
api_answer_chain = load_chain_from_config(api_answer_chain_config)
elif "api_answer_chain_path" in config:
api_answer_chain = load_chain(config.pop("api_answer_chain_path"))
else:
raise ValueError(
"One of `api_answer_chain` or `api_answer_chain_path` must be present."
)
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
else:
raise ValueError("`requests_wrapper` must be present.")
return APIChain(
api_request_chain=api_request_chain,
api_answer_chain=api_answer_chain,
requests_wrapper=requests_wrapper,
**config,
)
def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
return LLMRequestsChain(
llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config
)
else:
return LLMRequestsChain(llm_chain=llm_chain, **config)
type_to_loader_dict = {
"api_chain": _load_api_chain,
"hyde_chain": _load_hyde_chain,
"llm_chain": _load_llm_chain,
"llm_bash_chain": _load_llm_bash_chain,
"llm_checker_chain": _load_llm_checker_chain,
"llm_math_chain": _load_llm_math_chain,
"llm_requests_chain": _load_llm_requests_chain,
"pal_chain": _load_pal_chain,
"qa_with_sources_chain": _load_qa_with_sources_chain,
"stuff_documents_chain": _load_stuff_documents_chain,
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
"refine_documents_chain": _load_refine_documents_chain,
"sql_database_chain": _load_sql_database_chain,
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
"vector_db_qa": _load_vector_db_qa,
}
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
raise ValueError("Must specify a chain Type in config")
config_type = config.pop("_type")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} chain not supported")
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs)
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs
):
return hub_result
else:
return _load_chain_from_file(path, **kwargs)
def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain:
"""Load chain from file."""
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError("File type must be json or yaml")
# Override default 'verbose' and 'memory' for the chain
if "verbose" in kwargs:
config["verbose"] = kwargs.pop("verbose")
if "memory" in kwargs:
config["memory"] = kwargs.pop("memory")
# Load the chain from the config now.
return load_chain_from_config(config, **kwargs)
| [
"langchain.chains.llm.LLMChain",
"langchain.chains.qa_with_sources.base.QAWithSourcesChain",
"langchain.chains.api.base.APIChain",
"langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain",
"langchain.chains.hyde.base.HypotheticalDocumentEmbedder",
"langchain.chains.pal.base.PALChain",
"langchain.utilities.loading.try_load_from_hub",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.chains.llm_requests.LLMRequestsChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain",
"langchain.chains.combine_documents.refine.RefineDocumentsChain",
"langchain.chains.vector_db_qa.base.VectorDBQA",
"langchain.prompts.loading.load_prompt_from_config",
"langchain.chains.llm_bash.base.LLMBashChain",
"langchain.chains.llm_math.base.LLMMathChain",
"langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain",
"langchain.chains.sql_database.base.SQLDatabaseChain",
"langchain.llms.loading.load_llm_from_config",
"langchain.chains.llm_checker.base.LLMCheckerChain"
] | [((2165, 2207), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (2173, 2207), False, 'from langchain.chains.llm import LLMChain\n'), ((2853, 2945), 'langchain.chains.hyde.base.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=\n embeddings, **config)\n', (2881, 2945), False, 'from langchain.chains.hyde.base import HypotheticalDocumentEmbedder\n'), ((3900, 3987), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt'}), '(llm_chain=llm_chain, document_prompt=document_prompt,\n **config)\n', (3919, 3987), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((5552, 5711), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'combine_document_chain': 'combine_document_chain', 'collapse_document_chain': 'collapse_document_chain'}), '(llm_chain=llm_chain, combine_document_chain=\n combine_document_chain, collapse_document_chain=collapse_document_chain,\n **config)\n', (5575, 5711), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((6314, 6360), 'langchain.chains.llm_bash.base.LLMBashChain', 'LLMBashChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (6326, 6360), False, 'from langchain.chains.llm_bash.base import LLMBashChain\n'), ((8225, 8469), 'langchain.chains.llm_checker.base.LLMCheckerChain', 'LLMCheckerChain', ([], {'llm': 'llm', 'create_draft_answer_prompt': 'create_draft_answer_prompt', 'list_assertions_prompt': 'list_assertions_prompt', 'check_assertions_prompt': 'check_assertions_prompt', 'revised_answer_prompt': 'revised_answer_prompt'}), '(llm=llm, create_draft_answer_prompt=\n create_draft_answer_prompt, list_assertions_prompt=\n list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,\n revised_answer_prompt=revised_answer_prompt, **config)\n', (8240, 8469), False, 'from langchain.chains.llm_checker.base import LLMCheckerChain\n'), ((9083, 9129), 'langchain.chains.llm_math.base.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (9095, 9129), False, 'from langchain.chains.llm_math.base import LLMMathChain\n'), ((9579, 9633), 'langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain', 'MapRerankDocumentsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (9602, 9633), False, 'from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain\n'), ((10285, 10327), 'langchain.chains.pal.base.PALChain', 'PALChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (10293, 10327), False, 'from langchain.chains.pal.base import PALChain\n'), ((11566, 11706), 'langchain.chains.combine_documents.refine.RefineDocumentsChain', 'RefineDocumentsChain', ([], {'initial_llm_chain': 'initial_llm_chain', 'refine_llm_chain': 'refine_llm_chain', 'document_prompt': 'document_prompt'}), '(initial_llm_chain=initial_llm_chain, refine_llm_chain=\n refine_llm_chain, document_prompt=document_prompt, **config)\n', (11586, 11706), False, 'from langchain.chains.combine_documents.refine import RefineDocumentsChain\n'), ((12349, 12426), 'langchain.chains.qa_with_sources.base.QAWithSourcesChain', 'QAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain'}), '(combine_documents_chain=combine_documents_chain, **config)\n', (12367, 12426), False, 'from langchain.chains.qa_with_sources.base import QAWithSourcesChain\n'), ((13054, 13123), 'langchain.chains.sql_database.base.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'database': 'database', 'llm': 'llm', 'prompt': 'prompt'}), '(database=database, llm=llm, prompt=prompt, **config)\n', (13070, 13123), False, 'from langchain.chains.sql_database.base import SQLDatabaseChain\n'), ((13905, 14019), 'langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain', 'VectorDBQAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain,\n vectorstore=vectorstore, **config)\n', (13931, 14019), False, 'from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain\n'), ((14787, 14886), 'langchain.chains.vector_db_qa.base.VectorDBQA', 'VectorDBQA', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain, vectorstore=\n vectorstore, **config)\n', (14797, 14886), False, 'from langchain.chains.vector_db_qa.base import VectorDBQA\n'), ((16019, 16149), 'langchain.chains.api.base.APIChain', 'APIChain', ([], {'api_request_chain': 'api_request_chain', 'api_answer_chain': 'api_answer_chain', 'requests_wrapper': 'requests_wrapper'}), '(api_request_chain=api_request_chain, api_answer_chain=\n api_answer_chain, requests_wrapper=requests_wrapper, **config)\n', (16027, 16149), False, 'from langchain.chains.api.base import APIChain\n'), ((1653, 1685), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (1673, 1685), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((1936, 1974), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (1959, 1974), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((3604, 3642), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (3627, 3642), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((5892, 5924), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (5912, 5924), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6174, 6212), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (6197, 6212), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((6517, 6549), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (6537, 6549), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6879, 6937), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['create_draft_answer_prompt_config'], {}), '(create_draft_answer_prompt_config)\n', (6902, 6937), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7285, 7339), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['list_assertions_prompt_config'], {}), '(list_assertions_prompt_config)\n', (7308, 7339), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7635, 7690), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['check_assertions_prompt_config'], {}), '(check_assertions_prompt_config)\n', (7658, 7690), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8025, 8078), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['revised_answer_prompt_config'], {}), '(revised_answer_prompt_config)\n', (8048, 8078), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8661, 8693), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (8681, 8693), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((8943, 8981), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (8966, 8981), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((9775, 9807), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (9795, 9807), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((10057, 10095), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (10080, 10095), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((11399, 11437), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (11422, 11437), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((12722, 12754), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (12742, 12754), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((13004, 13042), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (13027, 13042), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((16709, 16796), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain', 'requests_wrapper': 'requests_wrapper'}), '(llm_chain=llm_chain, requests_wrapper=requests_wrapper, **\n config)\n', (16725, 16796), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((16839, 16886), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (16855, 16886), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((18341, 18429), 'langchain.utilities.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_chain_from_file', '"""chains"""', "{'json', 'yaml'}"], {}), "(path, _load_chain_from_file, 'chains', {'json', 'yaml'},\n **kwargs)\n", (18358, 18429), False, 'from langchain.utilities.loading import try_load_from_hub\n'), ((18724, 18734), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (18728, 18734), False, 'from pathlib import Path\n'), ((18899, 18911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18908, 18911), False, 'import json\n'), ((19011, 19028), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (19025, 19028), False, 'import yaml\n')] |
"""Functionality for loading chains."""
import json
from pathlib import Path
from typing import Any, Union
import yaml
from langchain.chains.api.base import APIChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.llm import LLMChain
from langchain.chains.llm_bash.base import LLMBashChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.pal.base import PALChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.sql_database.base import SQLDatabaseChain
from langchain.chains.vector_db_qa.base import VectorDBQA
from langchain.llms.loading import load_llm, load_llm_from_config
from langchain.prompts.loading import load_prompt, load_prompt_from_config
from langchain.utilities.loading import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
"""Load LLM chain from config dict."""
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return LLMChain(llm=llm, prompt=prompt, **config)
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
else:
raise ValueError("`embeddings` must be present.")
return HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=embeddings, **config
)
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
else:
raise ValueError(
"One of `document_prompt` or `document_prompt_path` must be present."
)
return StuffDocumentsChain(
llm_chain=llm_chain, document_prompt=document_prompt, **config
)
def _load_map_reduce_documents_chain(
config: dict, **kwargs: Any
) -> MapReduceDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "combine_document_chain" in config:
combine_document_chain_config = config.pop("combine_document_chain")
combine_document_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_document_chain_path" in config:
combine_document_chain = load_chain(config.pop("combine_document_chain_path"))
else:
raise ValueError(
"One of `combine_document_chain` or "
"`combine_document_chain_path` must be present."
)
if "collapse_document_chain" in config:
collapse_document_chain_config = config.pop("collapse_document_chain")
if collapse_document_chain_config is None:
collapse_document_chain = None
else:
collapse_document_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_document_chain_path" in config:
collapse_document_chain = load_chain(config.pop("collapse_document_chain_path"))
return MapReduceDocumentsChain(
llm_chain=llm_chain,
combine_document_chain=combine_document_chain,
collapse_document_chain=collapse_document_chain,
**config,
)
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMBashChain(llm=llm, prompt=prompt, **config)
def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "create_draft_answer_prompt" in config:
create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt")
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config
)
elif "create_draft_answer_prompt_path" in config:
create_draft_answer_prompt = load_prompt(
config.pop("create_draft_answer_prompt_path")
)
if "list_assertions_prompt" in config:
list_assertions_prompt_config = config.pop("list_assertions_prompt")
list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config)
elif "list_assertions_prompt_path" in config:
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
if "check_assertions_prompt" in config:
check_assertions_prompt_config = config.pop("check_assertions_prompt")
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config
)
elif "check_assertions_prompt_path" in config:
check_assertions_prompt = load_prompt(
config.pop("check_assertions_prompt_path")
)
if "revised_answer_prompt" in config:
revised_answer_prompt_config = config.pop("revised_answer_prompt")
revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config)
elif "revised_answer_prompt_path" in config:
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
return LLMCheckerChain(
llm=llm,
create_draft_answer_prompt=create_draft_answer_prompt,
list_assertions_prompt=list_assertions_prompt,
check_assertions_prompt=check_assertions_prompt,
revised_answer_prompt=revised_answer_prompt,
**config,
)
def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMMathChain(llm=llm, prompt=prompt, **config)
def _load_map_rerank_documents_chain(
config: dict, **kwargs: Any
) -> MapRerankDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return PALChain(llm=llm, prompt=prompt, **config)
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
if "initial_llm_chain" in config:
initial_llm_chain_config = config.pop("initial_llm_chain")
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
elif "initial_llm_chain_path" in config:
initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"))
else:
raise ValueError(
"One of `initial_llm_chain` or `initial_llm_chain_config` must be present."
)
if "refine_llm_chain" in config:
refine_llm_chain_config = config.pop("refine_llm_chain")
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
elif "refine_llm_chain_path" in config:
refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"))
else:
raise ValueError(
"One of `refine_llm_chain` or `refine_llm_chain_config` must be present."
)
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
return RefineDocumentsChain(
initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain,
document_prompt=document_prompt,
**config,
)
def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain:
if "database" in kwargs:
database = kwargs.pop("database")
else:
raise ValueError("`database` must be present.")
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config)
def _load_vector_db_qa_with_sources_chain(
config: dict, **kwargs: Any
) -> VectorDBQAWithSourcesChain:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQA(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
if "api_request_chain" in config:
api_request_chain_config = config.pop("api_request_chain")
api_request_chain = load_chain_from_config(api_request_chain_config)
elif "api_request_chain_path" in config:
api_request_chain = load_chain(config.pop("api_request_chain_path"))
else:
raise ValueError(
"One of `api_request_chain` or `api_request_chain_path` must be present."
)
if "api_answer_chain" in config:
api_answer_chain_config = config.pop("api_answer_chain")
api_answer_chain = load_chain_from_config(api_answer_chain_config)
elif "api_answer_chain_path" in config:
api_answer_chain = load_chain(config.pop("api_answer_chain_path"))
else:
raise ValueError(
"One of `api_answer_chain` or `api_answer_chain_path` must be present."
)
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
else:
raise ValueError("`requests_wrapper` must be present.")
return APIChain(
api_request_chain=api_request_chain,
api_answer_chain=api_answer_chain,
requests_wrapper=requests_wrapper,
**config,
)
def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
return LLMRequestsChain(
llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config
)
else:
return LLMRequestsChain(llm_chain=llm_chain, **config)
type_to_loader_dict = {
"api_chain": _load_api_chain,
"hyde_chain": _load_hyde_chain,
"llm_chain": _load_llm_chain,
"llm_bash_chain": _load_llm_bash_chain,
"llm_checker_chain": _load_llm_checker_chain,
"llm_math_chain": _load_llm_math_chain,
"llm_requests_chain": _load_llm_requests_chain,
"pal_chain": _load_pal_chain,
"qa_with_sources_chain": _load_qa_with_sources_chain,
"stuff_documents_chain": _load_stuff_documents_chain,
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
"refine_documents_chain": _load_refine_documents_chain,
"sql_database_chain": _load_sql_database_chain,
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
"vector_db_qa": _load_vector_db_qa,
}
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
raise ValueError("Must specify a chain Type in config")
config_type = config.pop("_type")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} chain not supported")
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs)
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs
):
return hub_result
else:
return _load_chain_from_file(path, **kwargs)
def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain:
"""Load chain from file."""
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError("File type must be json or yaml")
# Override default 'verbose' and 'memory' for the chain
if "verbose" in kwargs:
config["verbose"] = kwargs.pop("verbose")
if "memory" in kwargs:
config["memory"] = kwargs.pop("memory")
# Load the chain from the config now.
return load_chain_from_config(config, **kwargs)
| [
"langchain.chains.llm.LLMChain",
"langchain.chains.qa_with_sources.base.QAWithSourcesChain",
"langchain.chains.api.base.APIChain",
"langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain",
"langchain.chains.hyde.base.HypotheticalDocumentEmbedder",
"langchain.chains.pal.base.PALChain",
"langchain.utilities.loading.try_load_from_hub",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.chains.llm_requests.LLMRequestsChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain",
"langchain.chains.combine_documents.refine.RefineDocumentsChain",
"langchain.chains.vector_db_qa.base.VectorDBQA",
"langchain.prompts.loading.load_prompt_from_config",
"langchain.chains.llm_bash.base.LLMBashChain",
"langchain.chains.llm_math.base.LLMMathChain",
"langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain",
"langchain.chains.sql_database.base.SQLDatabaseChain",
"langchain.llms.loading.load_llm_from_config",
"langchain.chains.llm_checker.base.LLMCheckerChain"
] | [((2165, 2207), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (2173, 2207), False, 'from langchain.chains.llm import LLMChain\n'), ((2853, 2945), 'langchain.chains.hyde.base.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=\n embeddings, **config)\n', (2881, 2945), False, 'from langchain.chains.hyde.base import HypotheticalDocumentEmbedder\n'), ((3900, 3987), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt'}), '(llm_chain=llm_chain, document_prompt=document_prompt,\n **config)\n', (3919, 3987), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((5552, 5711), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'combine_document_chain': 'combine_document_chain', 'collapse_document_chain': 'collapse_document_chain'}), '(llm_chain=llm_chain, combine_document_chain=\n combine_document_chain, collapse_document_chain=collapse_document_chain,\n **config)\n', (5575, 5711), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((6314, 6360), 'langchain.chains.llm_bash.base.LLMBashChain', 'LLMBashChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (6326, 6360), False, 'from langchain.chains.llm_bash.base import LLMBashChain\n'), ((8225, 8469), 'langchain.chains.llm_checker.base.LLMCheckerChain', 'LLMCheckerChain', ([], {'llm': 'llm', 'create_draft_answer_prompt': 'create_draft_answer_prompt', 'list_assertions_prompt': 'list_assertions_prompt', 'check_assertions_prompt': 'check_assertions_prompt', 'revised_answer_prompt': 'revised_answer_prompt'}), '(llm=llm, create_draft_answer_prompt=\n create_draft_answer_prompt, list_assertions_prompt=\n list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,\n revised_answer_prompt=revised_answer_prompt, **config)\n', (8240, 8469), False, 'from langchain.chains.llm_checker.base import LLMCheckerChain\n'), ((9083, 9129), 'langchain.chains.llm_math.base.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (9095, 9129), False, 'from langchain.chains.llm_math.base import LLMMathChain\n'), ((9579, 9633), 'langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain', 'MapRerankDocumentsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (9602, 9633), False, 'from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain\n'), ((10285, 10327), 'langchain.chains.pal.base.PALChain', 'PALChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (10293, 10327), False, 'from langchain.chains.pal.base import PALChain\n'), ((11566, 11706), 'langchain.chains.combine_documents.refine.RefineDocumentsChain', 'RefineDocumentsChain', ([], {'initial_llm_chain': 'initial_llm_chain', 'refine_llm_chain': 'refine_llm_chain', 'document_prompt': 'document_prompt'}), '(initial_llm_chain=initial_llm_chain, refine_llm_chain=\n refine_llm_chain, document_prompt=document_prompt, **config)\n', (11586, 11706), False, 'from langchain.chains.combine_documents.refine import RefineDocumentsChain\n'), ((12349, 12426), 'langchain.chains.qa_with_sources.base.QAWithSourcesChain', 'QAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain'}), '(combine_documents_chain=combine_documents_chain, **config)\n', (12367, 12426), False, 'from langchain.chains.qa_with_sources.base import QAWithSourcesChain\n'), ((13054, 13123), 'langchain.chains.sql_database.base.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'database': 'database', 'llm': 'llm', 'prompt': 'prompt'}), '(database=database, llm=llm, prompt=prompt, **config)\n', (13070, 13123), False, 'from langchain.chains.sql_database.base import SQLDatabaseChain\n'), ((13905, 14019), 'langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain', 'VectorDBQAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain,\n vectorstore=vectorstore, **config)\n', (13931, 14019), False, 'from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain\n'), ((14787, 14886), 'langchain.chains.vector_db_qa.base.VectorDBQA', 'VectorDBQA', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain, vectorstore=\n vectorstore, **config)\n', (14797, 14886), False, 'from langchain.chains.vector_db_qa.base import VectorDBQA\n'), ((16019, 16149), 'langchain.chains.api.base.APIChain', 'APIChain', ([], {'api_request_chain': 'api_request_chain', 'api_answer_chain': 'api_answer_chain', 'requests_wrapper': 'requests_wrapper'}), '(api_request_chain=api_request_chain, api_answer_chain=\n api_answer_chain, requests_wrapper=requests_wrapper, **config)\n', (16027, 16149), False, 'from langchain.chains.api.base import APIChain\n'), ((1653, 1685), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (1673, 1685), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((1936, 1974), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (1959, 1974), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((3604, 3642), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (3627, 3642), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((5892, 5924), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (5912, 5924), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6174, 6212), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (6197, 6212), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((6517, 6549), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (6537, 6549), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6879, 6937), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['create_draft_answer_prompt_config'], {}), '(create_draft_answer_prompt_config)\n', (6902, 6937), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7285, 7339), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['list_assertions_prompt_config'], {}), '(list_assertions_prompt_config)\n', (7308, 7339), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7635, 7690), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['check_assertions_prompt_config'], {}), '(check_assertions_prompt_config)\n', (7658, 7690), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8025, 8078), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['revised_answer_prompt_config'], {}), '(revised_answer_prompt_config)\n', (8048, 8078), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8661, 8693), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (8681, 8693), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((8943, 8981), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (8966, 8981), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((9775, 9807), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (9795, 9807), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((10057, 10095), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (10080, 10095), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((11399, 11437), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (11422, 11437), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((12722, 12754), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (12742, 12754), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((13004, 13042), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (13027, 13042), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((16709, 16796), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain', 'requests_wrapper': 'requests_wrapper'}), '(llm_chain=llm_chain, requests_wrapper=requests_wrapper, **\n config)\n', (16725, 16796), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((16839, 16886), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (16855, 16886), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((18341, 18429), 'langchain.utilities.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_chain_from_file', '"""chains"""', "{'json', 'yaml'}"], {}), "(path, _load_chain_from_file, 'chains', {'json', 'yaml'},\n **kwargs)\n", (18358, 18429), False, 'from langchain.utilities.loading import try_load_from_hub\n'), ((18724, 18734), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (18728, 18734), False, 'from pathlib import Path\n'), ((18899, 18911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18908, 18911), False, 'import json\n'), ((19011, 19028), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (19025, 19028), False, 'import yaml\n')] |
"""Functionality for loading chains."""
import json
from pathlib import Path
from typing import Any, Union
import yaml
from langchain.chains.api.base import APIChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.llm import LLMChain
from langchain.chains.llm_bash.base import LLMBashChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.pal.base import PALChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.sql_database.base import SQLDatabaseChain
from langchain.chains.vector_db_qa.base import VectorDBQA
from langchain.llms.loading import load_llm, load_llm_from_config
from langchain.prompts.loading import load_prompt, load_prompt_from_config
from langchain.utilities.loading import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
"""Load LLM chain from config dict."""
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return LLMChain(llm=llm, prompt=prompt, **config)
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
else:
raise ValueError("`embeddings` must be present.")
return HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=embeddings, **config
)
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
else:
raise ValueError(
"One of `document_prompt` or `document_prompt_path` must be present."
)
return StuffDocumentsChain(
llm_chain=llm_chain, document_prompt=document_prompt, **config
)
def _load_map_reduce_documents_chain(
config: dict, **kwargs: Any
) -> MapReduceDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "combine_document_chain" in config:
combine_document_chain_config = config.pop("combine_document_chain")
combine_document_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_document_chain_path" in config:
combine_document_chain = load_chain(config.pop("combine_document_chain_path"))
else:
raise ValueError(
"One of `combine_document_chain` or "
"`combine_document_chain_path` must be present."
)
if "collapse_document_chain" in config:
collapse_document_chain_config = config.pop("collapse_document_chain")
if collapse_document_chain_config is None:
collapse_document_chain = None
else:
collapse_document_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_document_chain_path" in config:
collapse_document_chain = load_chain(config.pop("collapse_document_chain_path"))
return MapReduceDocumentsChain(
llm_chain=llm_chain,
combine_document_chain=combine_document_chain,
collapse_document_chain=collapse_document_chain,
**config,
)
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMBashChain(llm=llm, prompt=prompt, **config)
def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "create_draft_answer_prompt" in config:
create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt")
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config
)
elif "create_draft_answer_prompt_path" in config:
create_draft_answer_prompt = load_prompt(
config.pop("create_draft_answer_prompt_path")
)
if "list_assertions_prompt" in config:
list_assertions_prompt_config = config.pop("list_assertions_prompt")
list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config)
elif "list_assertions_prompt_path" in config:
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
if "check_assertions_prompt" in config:
check_assertions_prompt_config = config.pop("check_assertions_prompt")
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config
)
elif "check_assertions_prompt_path" in config:
check_assertions_prompt = load_prompt(
config.pop("check_assertions_prompt_path")
)
if "revised_answer_prompt" in config:
revised_answer_prompt_config = config.pop("revised_answer_prompt")
revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config)
elif "revised_answer_prompt_path" in config:
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
return LLMCheckerChain(
llm=llm,
create_draft_answer_prompt=create_draft_answer_prompt,
list_assertions_prompt=list_assertions_prompt,
check_assertions_prompt=check_assertions_prompt,
revised_answer_prompt=revised_answer_prompt,
**config,
)
def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMMathChain(llm=llm, prompt=prompt, **config)
def _load_map_rerank_documents_chain(
config: dict, **kwargs: Any
) -> MapRerankDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return PALChain(llm=llm, prompt=prompt, **config)
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
if "initial_llm_chain" in config:
initial_llm_chain_config = config.pop("initial_llm_chain")
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
elif "initial_llm_chain_path" in config:
initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"))
else:
raise ValueError(
"One of `initial_llm_chain` or `initial_llm_chain_config` must be present."
)
if "refine_llm_chain" in config:
refine_llm_chain_config = config.pop("refine_llm_chain")
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
elif "refine_llm_chain_path" in config:
refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"))
else:
raise ValueError(
"One of `refine_llm_chain` or `refine_llm_chain_config` must be present."
)
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
return RefineDocumentsChain(
initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain,
document_prompt=document_prompt,
**config,
)
def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain:
if "database" in kwargs:
database = kwargs.pop("database")
else:
raise ValueError("`database` must be present.")
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config)
def _load_vector_db_qa_with_sources_chain(
config: dict, **kwargs: Any
) -> VectorDBQAWithSourcesChain:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQA(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
if "api_request_chain" in config:
api_request_chain_config = config.pop("api_request_chain")
api_request_chain = load_chain_from_config(api_request_chain_config)
elif "api_request_chain_path" in config:
api_request_chain = load_chain(config.pop("api_request_chain_path"))
else:
raise ValueError(
"One of `api_request_chain` or `api_request_chain_path` must be present."
)
if "api_answer_chain" in config:
api_answer_chain_config = config.pop("api_answer_chain")
api_answer_chain = load_chain_from_config(api_answer_chain_config)
elif "api_answer_chain_path" in config:
api_answer_chain = load_chain(config.pop("api_answer_chain_path"))
else:
raise ValueError(
"One of `api_answer_chain` or `api_answer_chain_path` must be present."
)
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
else:
raise ValueError("`requests_wrapper` must be present.")
return APIChain(
api_request_chain=api_request_chain,
api_answer_chain=api_answer_chain,
requests_wrapper=requests_wrapper,
**config,
)
def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
return LLMRequestsChain(
llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config
)
else:
return LLMRequestsChain(llm_chain=llm_chain, **config)
type_to_loader_dict = {
"api_chain": _load_api_chain,
"hyde_chain": _load_hyde_chain,
"llm_chain": _load_llm_chain,
"llm_bash_chain": _load_llm_bash_chain,
"llm_checker_chain": _load_llm_checker_chain,
"llm_math_chain": _load_llm_math_chain,
"llm_requests_chain": _load_llm_requests_chain,
"pal_chain": _load_pal_chain,
"qa_with_sources_chain": _load_qa_with_sources_chain,
"stuff_documents_chain": _load_stuff_documents_chain,
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
"refine_documents_chain": _load_refine_documents_chain,
"sql_database_chain": _load_sql_database_chain,
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
"vector_db_qa": _load_vector_db_qa,
}
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
raise ValueError("Must specify a chain Type in config")
config_type = config.pop("_type")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} chain not supported")
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs)
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs
):
return hub_result
else:
return _load_chain_from_file(path, **kwargs)
def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain:
"""Load chain from file."""
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError("File type must be json or yaml")
# Override default 'verbose' and 'memory' for the chain
if "verbose" in kwargs:
config["verbose"] = kwargs.pop("verbose")
if "memory" in kwargs:
config["memory"] = kwargs.pop("memory")
# Load the chain from the config now.
return load_chain_from_config(config, **kwargs)
| [
"langchain.chains.llm.LLMChain",
"langchain.chains.qa_with_sources.base.QAWithSourcesChain",
"langchain.chains.api.base.APIChain",
"langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain",
"langchain.chains.hyde.base.HypotheticalDocumentEmbedder",
"langchain.chains.pal.base.PALChain",
"langchain.utilities.loading.try_load_from_hub",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.chains.llm_requests.LLMRequestsChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain",
"langchain.chains.combine_documents.refine.RefineDocumentsChain",
"langchain.chains.vector_db_qa.base.VectorDBQA",
"langchain.prompts.loading.load_prompt_from_config",
"langchain.chains.llm_bash.base.LLMBashChain",
"langchain.chains.llm_math.base.LLMMathChain",
"langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain",
"langchain.chains.sql_database.base.SQLDatabaseChain",
"langchain.llms.loading.load_llm_from_config",
"langchain.chains.llm_checker.base.LLMCheckerChain"
] | [((2165, 2207), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (2173, 2207), False, 'from langchain.chains.llm import LLMChain\n'), ((2853, 2945), 'langchain.chains.hyde.base.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=\n embeddings, **config)\n', (2881, 2945), False, 'from langchain.chains.hyde.base import HypotheticalDocumentEmbedder\n'), ((3900, 3987), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt'}), '(llm_chain=llm_chain, document_prompt=document_prompt,\n **config)\n', (3919, 3987), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((5552, 5711), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'combine_document_chain': 'combine_document_chain', 'collapse_document_chain': 'collapse_document_chain'}), '(llm_chain=llm_chain, combine_document_chain=\n combine_document_chain, collapse_document_chain=collapse_document_chain,\n **config)\n', (5575, 5711), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((6314, 6360), 'langchain.chains.llm_bash.base.LLMBashChain', 'LLMBashChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (6326, 6360), False, 'from langchain.chains.llm_bash.base import LLMBashChain\n'), ((8225, 8469), 'langchain.chains.llm_checker.base.LLMCheckerChain', 'LLMCheckerChain', ([], {'llm': 'llm', 'create_draft_answer_prompt': 'create_draft_answer_prompt', 'list_assertions_prompt': 'list_assertions_prompt', 'check_assertions_prompt': 'check_assertions_prompt', 'revised_answer_prompt': 'revised_answer_prompt'}), '(llm=llm, create_draft_answer_prompt=\n create_draft_answer_prompt, list_assertions_prompt=\n list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,\n revised_answer_prompt=revised_answer_prompt, **config)\n', (8240, 8469), False, 'from langchain.chains.llm_checker.base import LLMCheckerChain\n'), ((9083, 9129), 'langchain.chains.llm_math.base.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (9095, 9129), False, 'from langchain.chains.llm_math.base import LLMMathChain\n'), ((9579, 9633), 'langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain', 'MapRerankDocumentsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (9602, 9633), False, 'from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain\n'), ((10285, 10327), 'langchain.chains.pal.base.PALChain', 'PALChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (10293, 10327), False, 'from langchain.chains.pal.base import PALChain\n'), ((11566, 11706), 'langchain.chains.combine_documents.refine.RefineDocumentsChain', 'RefineDocumentsChain', ([], {'initial_llm_chain': 'initial_llm_chain', 'refine_llm_chain': 'refine_llm_chain', 'document_prompt': 'document_prompt'}), '(initial_llm_chain=initial_llm_chain, refine_llm_chain=\n refine_llm_chain, document_prompt=document_prompt, **config)\n', (11586, 11706), False, 'from langchain.chains.combine_documents.refine import RefineDocumentsChain\n'), ((12349, 12426), 'langchain.chains.qa_with_sources.base.QAWithSourcesChain', 'QAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain'}), '(combine_documents_chain=combine_documents_chain, **config)\n', (12367, 12426), False, 'from langchain.chains.qa_with_sources.base import QAWithSourcesChain\n'), ((13054, 13123), 'langchain.chains.sql_database.base.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'database': 'database', 'llm': 'llm', 'prompt': 'prompt'}), '(database=database, llm=llm, prompt=prompt, **config)\n', (13070, 13123), False, 'from langchain.chains.sql_database.base import SQLDatabaseChain\n'), ((13905, 14019), 'langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain', 'VectorDBQAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain,\n vectorstore=vectorstore, **config)\n', (13931, 14019), False, 'from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain\n'), ((14787, 14886), 'langchain.chains.vector_db_qa.base.VectorDBQA', 'VectorDBQA', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain, vectorstore=\n vectorstore, **config)\n', (14797, 14886), False, 'from langchain.chains.vector_db_qa.base import VectorDBQA\n'), ((16019, 16149), 'langchain.chains.api.base.APIChain', 'APIChain', ([], {'api_request_chain': 'api_request_chain', 'api_answer_chain': 'api_answer_chain', 'requests_wrapper': 'requests_wrapper'}), '(api_request_chain=api_request_chain, api_answer_chain=\n api_answer_chain, requests_wrapper=requests_wrapper, **config)\n', (16027, 16149), False, 'from langchain.chains.api.base import APIChain\n'), ((1653, 1685), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (1673, 1685), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((1936, 1974), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (1959, 1974), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((3604, 3642), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (3627, 3642), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((5892, 5924), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (5912, 5924), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6174, 6212), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (6197, 6212), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((6517, 6549), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (6537, 6549), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6879, 6937), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['create_draft_answer_prompt_config'], {}), '(create_draft_answer_prompt_config)\n', (6902, 6937), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7285, 7339), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['list_assertions_prompt_config'], {}), '(list_assertions_prompt_config)\n', (7308, 7339), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7635, 7690), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['check_assertions_prompt_config'], {}), '(check_assertions_prompt_config)\n', (7658, 7690), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8025, 8078), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['revised_answer_prompt_config'], {}), '(revised_answer_prompt_config)\n', (8048, 8078), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8661, 8693), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (8681, 8693), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((8943, 8981), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (8966, 8981), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((9775, 9807), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (9795, 9807), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((10057, 10095), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (10080, 10095), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((11399, 11437), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (11422, 11437), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((12722, 12754), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (12742, 12754), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((13004, 13042), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (13027, 13042), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((16709, 16796), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain', 'requests_wrapper': 'requests_wrapper'}), '(llm_chain=llm_chain, requests_wrapper=requests_wrapper, **\n config)\n', (16725, 16796), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((16839, 16886), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (16855, 16886), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((18341, 18429), 'langchain.utilities.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_chain_from_file', '"""chains"""', "{'json', 'yaml'}"], {}), "(path, _load_chain_from_file, 'chains', {'json', 'yaml'},\n **kwargs)\n", (18358, 18429), False, 'from langchain.utilities.loading import try_load_from_hub\n'), ((18724, 18734), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (18728, 18734), False, 'from pathlib import Path\n'), ((18899, 18911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18908, 18911), False, 'import json\n'), ((19011, 19028), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (19025, 19028), False, 'import yaml\n')] |
"""Functionality for loading chains."""
import json
from pathlib import Path
from typing import Any, Union
import yaml
from langchain.chains.api.base import APIChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder
from langchain.chains.llm import LLMChain
from langchain.chains.llm_bash.base import LLMBashChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.pal.base import PALChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.sql_database.base import SQLDatabaseChain
from langchain.chains.vector_db_qa.base import VectorDBQA
from langchain.llms.loading import load_llm, load_llm_from_config
from langchain.prompts.loading import load_prompt, load_prompt_from_config
from langchain.utilities.loading import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"
def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain:
"""Load LLM chain from config dict."""
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return LLMChain(llm=llm, prompt=prompt, **config)
def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "embeddings" in kwargs:
embeddings = kwargs.pop("embeddings")
else:
raise ValueError("`embeddings` must be present.")
return HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=embeddings, **config
)
def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
else:
raise ValueError(
"One of `document_prompt` or `document_prompt_path` must be present."
)
return StuffDocumentsChain(
llm_chain=llm_chain, document_prompt=document_prompt, **config
)
def _load_map_reduce_documents_chain(
config: dict, **kwargs: Any
) -> MapReduceDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
if not isinstance(llm_chain, LLMChain):
raise ValueError(f"Expected LLMChain, got {llm_chain}")
if "combine_document_chain" in config:
combine_document_chain_config = config.pop("combine_document_chain")
combine_document_chain = load_chain_from_config(combine_document_chain_config)
elif "combine_document_chain_path" in config:
combine_document_chain = load_chain(config.pop("combine_document_chain_path"))
else:
raise ValueError(
"One of `combine_document_chain` or "
"`combine_document_chain_path` must be present."
)
if "collapse_document_chain" in config:
collapse_document_chain_config = config.pop("collapse_document_chain")
if collapse_document_chain_config is None:
collapse_document_chain = None
else:
collapse_document_chain = load_chain_from_config(
collapse_document_chain_config
)
elif "collapse_document_chain_path" in config:
collapse_document_chain = load_chain(config.pop("collapse_document_chain_path"))
return MapReduceDocumentsChain(
llm_chain=llm_chain,
combine_document_chain=combine_document_chain,
collapse_document_chain=collapse_document_chain,
**config,
)
def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMBashChain(llm=llm, prompt=prompt, **config)
def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "create_draft_answer_prompt" in config:
create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt")
create_draft_answer_prompt = load_prompt_from_config(
create_draft_answer_prompt_config
)
elif "create_draft_answer_prompt_path" in config:
create_draft_answer_prompt = load_prompt(
config.pop("create_draft_answer_prompt_path")
)
if "list_assertions_prompt" in config:
list_assertions_prompt_config = config.pop("list_assertions_prompt")
list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config)
elif "list_assertions_prompt_path" in config:
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
if "check_assertions_prompt" in config:
check_assertions_prompt_config = config.pop("check_assertions_prompt")
check_assertions_prompt = load_prompt_from_config(
check_assertions_prompt_config
)
elif "check_assertions_prompt_path" in config:
check_assertions_prompt = load_prompt(
config.pop("check_assertions_prompt_path")
)
if "revised_answer_prompt" in config:
revised_answer_prompt_config = config.pop("revised_answer_prompt")
revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config)
elif "revised_answer_prompt_path" in config:
revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path"))
return LLMCheckerChain(
llm=llm,
create_draft_answer_prompt=create_draft_answer_prompt,
list_assertions_prompt=list_assertions_prompt,
check_assertions_prompt=check_assertions_prompt,
revised_answer_prompt=revised_answer_prompt,
**config,
)
def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
return LLMMathChain(llm=llm, prompt=prompt, **config)
def _load_map_rerank_documents_chain(
config: dict, **kwargs: Any
) -> MapRerankDocumentsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.")
return MapRerankDocumentsChain(llm_chain=llm_chain, **config)
def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain:
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
elif "prompt_path" in config:
prompt = load_prompt(config.pop("prompt_path"))
else:
raise ValueError("One of `prompt` or `prompt_path` must be present.")
return PALChain(llm=llm, prompt=prompt, **config)
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
if "initial_llm_chain" in config:
initial_llm_chain_config = config.pop("initial_llm_chain")
initial_llm_chain = load_chain_from_config(initial_llm_chain_config)
elif "initial_llm_chain_path" in config:
initial_llm_chain = load_chain(config.pop("initial_llm_chain_path"))
else:
raise ValueError(
"One of `initial_llm_chain` or `initial_llm_chain_config` must be present."
)
if "refine_llm_chain" in config:
refine_llm_chain_config = config.pop("refine_llm_chain")
refine_llm_chain = load_chain_from_config(refine_llm_chain_config)
elif "refine_llm_chain_path" in config:
refine_llm_chain = load_chain(config.pop("refine_llm_chain_path"))
else:
raise ValueError(
"One of `refine_llm_chain` or `refine_llm_chain_config` must be present."
)
if "document_prompt" in config:
prompt_config = config.pop("document_prompt")
document_prompt = load_prompt_from_config(prompt_config)
elif "document_prompt_path" in config:
document_prompt = load_prompt(config.pop("document_prompt_path"))
return RefineDocumentsChain(
initial_llm_chain=initial_llm_chain,
refine_llm_chain=refine_llm_chain,
document_prompt=document_prompt,
**config,
)
def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config)
def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain:
if "database" in kwargs:
database = kwargs.pop("database")
else:
raise ValueError("`database` must be present.")
if "llm" in config:
llm_config = config.pop("llm")
llm = load_llm_from_config(llm_config)
elif "llm_path" in config:
llm = load_llm(config.pop("llm_path"))
else:
raise ValueError("One of `llm` or `llm_path` must be present.")
if "prompt" in config:
prompt_config = config.pop("prompt")
prompt = load_prompt_from_config(prompt_config)
return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config)
def _load_vector_db_qa_with_sources_chain(
config: dict, **kwargs: Any
) -> VectorDBQAWithSourcesChain:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQAWithSourcesChain(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA:
if "vectorstore" in kwargs:
vectorstore = kwargs.pop("vectorstore")
else:
raise ValueError("`vectorstore` must be present.")
if "combine_documents_chain" in config:
combine_documents_chain_config = config.pop("combine_documents_chain")
combine_documents_chain = load_chain_from_config(combine_documents_chain_config)
elif "combine_documents_chain_path" in config:
combine_documents_chain = load_chain(config.pop("combine_documents_chain_path"))
else:
raise ValueError(
"One of `combine_documents_chain` or "
"`combine_documents_chain_path` must be present."
)
return VectorDBQA(
combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore,
**config,
)
def _load_api_chain(config: dict, **kwargs: Any) -> APIChain:
if "api_request_chain" in config:
api_request_chain_config = config.pop("api_request_chain")
api_request_chain = load_chain_from_config(api_request_chain_config)
elif "api_request_chain_path" in config:
api_request_chain = load_chain(config.pop("api_request_chain_path"))
else:
raise ValueError(
"One of `api_request_chain` or `api_request_chain_path` must be present."
)
if "api_answer_chain" in config:
api_answer_chain_config = config.pop("api_answer_chain")
api_answer_chain = load_chain_from_config(api_answer_chain_config)
elif "api_answer_chain_path" in config:
api_answer_chain = load_chain(config.pop("api_answer_chain_path"))
else:
raise ValueError(
"One of `api_answer_chain` or `api_answer_chain_path` must be present."
)
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
else:
raise ValueError("`requests_wrapper` must be present.")
return APIChain(
api_request_chain=api_request_chain,
api_answer_chain=api_answer_chain,
requests_wrapper=requests_wrapper,
**config,
)
def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain:
if "llm_chain" in config:
llm_chain_config = config.pop("llm_chain")
llm_chain = load_chain_from_config(llm_chain_config)
elif "llm_chain_path" in config:
llm_chain = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
if "requests_wrapper" in kwargs:
requests_wrapper = kwargs.pop("requests_wrapper")
return LLMRequestsChain(
llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config
)
else:
return LLMRequestsChain(llm_chain=llm_chain, **config)
type_to_loader_dict = {
"api_chain": _load_api_chain,
"hyde_chain": _load_hyde_chain,
"llm_chain": _load_llm_chain,
"llm_bash_chain": _load_llm_bash_chain,
"llm_checker_chain": _load_llm_checker_chain,
"llm_math_chain": _load_llm_math_chain,
"llm_requests_chain": _load_llm_requests_chain,
"pal_chain": _load_pal_chain,
"qa_with_sources_chain": _load_qa_with_sources_chain,
"stuff_documents_chain": _load_stuff_documents_chain,
"map_reduce_documents_chain": _load_map_reduce_documents_chain,
"map_rerank_documents_chain": _load_map_rerank_documents_chain,
"refine_documents_chain": _load_refine_documents_chain,
"sql_database_chain": _load_sql_database_chain,
"vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain,
"vector_db_qa": _load_vector_db_qa,
}
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
raise ValueError("Must specify a chain Type in config")
config_type = config.pop("_type")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} chain not supported")
chain_loader = type_to_loader_dict[config_type]
return chain_loader(config, **kwargs)
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs
):
return hub_result
else:
return _load_chain_from_file(path, **kwargs)
def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain:
"""Load chain from file."""
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError("File type must be json or yaml")
# Override default 'verbose' and 'memory' for the chain
if "verbose" in kwargs:
config["verbose"] = kwargs.pop("verbose")
if "memory" in kwargs:
config["memory"] = kwargs.pop("memory")
# Load the chain from the config now.
return load_chain_from_config(config, **kwargs)
| [
"langchain.chains.llm.LLMChain",
"langchain.chains.qa_with_sources.base.QAWithSourcesChain",
"langchain.chains.api.base.APIChain",
"langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain",
"langchain.chains.hyde.base.HypotheticalDocumentEmbedder",
"langchain.chains.pal.base.PALChain",
"langchain.utilities.loading.try_load_from_hub",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.chains.llm_requests.LLMRequestsChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain",
"langchain.chains.combine_documents.refine.RefineDocumentsChain",
"langchain.chains.vector_db_qa.base.VectorDBQA",
"langchain.prompts.loading.load_prompt_from_config",
"langchain.chains.llm_bash.base.LLMBashChain",
"langchain.chains.llm_math.base.LLMMathChain",
"langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain",
"langchain.chains.sql_database.base.SQLDatabaseChain",
"langchain.llms.loading.load_llm_from_config",
"langchain.chains.llm_checker.base.LLMCheckerChain"
] | [((2165, 2207), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (2173, 2207), False, 'from langchain.chains.llm import LLMChain\n'), ((2853, 2945), 'langchain.chains.hyde.base.HypotheticalDocumentEmbedder', 'HypotheticalDocumentEmbedder', ([], {'llm_chain': 'llm_chain', 'base_embeddings': 'embeddings'}), '(llm_chain=llm_chain, base_embeddings=\n embeddings, **config)\n', (2881, 2945), False, 'from langchain.chains.hyde.base import HypotheticalDocumentEmbedder\n'), ((3900, 3987), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'document_prompt': 'document_prompt'}), '(llm_chain=llm_chain, document_prompt=document_prompt,\n **config)\n', (3919, 3987), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((5552, 5711), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'combine_document_chain': 'combine_document_chain', 'collapse_document_chain': 'collapse_document_chain'}), '(llm_chain=llm_chain, combine_document_chain=\n combine_document_chain, collapse_document_chain=collapse_document_chain,\n **config)\n', (5575, 5711), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((6314, 6360), 'langchain.chains.llm_bash.base.LLMBashChain', 'LLMBashChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (6326, 6360), False, 'from langchain.chains.llm_bash.base import LLMBashChain\n'), ((8225, 8469), 'langchain.chains.llm_checker.base.LLMCheckerChain', 'LLMCheckerChain', ([], {'llm': 'llm', 'create_draft_answer_prompt': 'create_draft_answer_prompt', 'list_assertions_prompt': 'list_assertions_prompt', 'check_assertions_prompt': 'check_assertions_prompt', 'revised_answer_prompt': 'revised_answer_prompt'}), '(llm=llm, create_draft_answer_prompt=\n create_draft_answer_prompt, list_assertions_prompt=\n list_assertions_prompt, check_assertions_prompt=check_assertions_prompt,\n revised_answer_prompt=revised_answer_prompt, **config)\n', (8240, 8469), False, 'from langchain.chains.llm_checker.base import LLMCheckerChain\n'), ((9083, 9129), 'langchain.chains.llm_math.base.LLMMathChain', 'LLMMathChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (9095, 9129), False, 'from langchain.chains.llm_math.base import LLMMathChain\n'), ((9579, 9633), 'langchain.chains.combine_documents.map_rerank.MapRerankDocumentsChain', 'MapRerankDocumentsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (9602, 9633), False, 'from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain\n'), ((10285, 10327), 'langchain.chains.pal.base.PALChain', 'PALChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt, **config)\n', (10293, 10327), False, 'from langchain.chains.pal.base import PALChain\n'), ((11566, 11706), 'langchain.chains.combine_documents.refine.RefineDocumentsChain', 'RefineDocumentsChain', ([], {'initial_llm_chain': 'initial_llm_chain', 'refine_llm_chain': 'refine_llm_chain', 'document_prompt': 'document_prompt'}), '(initial_llm_chain=initial_llm_chain, refine_llm_chain=\n refine_llm_chain, document_prompt=document_prompt, **config)\n', (11586, 11706), False, 'from langchain.chains.combine_documents.refine import RefineDocumentsChain\n'), ((12349, 12426), 'langchain.chains.qa_with_sources.base.QAWithSourcesChain', 'QAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain'}), '(combine_documents_chain=combine_documents_chain, **config)\n', (12367, 12426), False, 'from langchain.chains.qa_with_sources.base import QAWithSourcesChain\n'), ((13054, 13123), 'langchain.chains.sql_database.base.SQLDatabaseChain', 'SQLDatabaseChain', ([], {'database': 'database', 'llm': 'llm', 'prompt': 'prompt'}), '(database=database, llm=llm, prompt=prompt, **config)\n', (13070, 13123), False, 'from langchain.chains.sql_database.base import SQLDatabaseChain\n'), ((13905, 14019), 'langchain.chains.qa_with_sources.vector_db.VectorDBQAWithSourcesChain', 'VectorDBQAWithSourcesChain', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain,\n vectorstore=vectorstore, **config)\n', (13931, 14019), False, 'from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain\n'), ((14787, 14886), 'langchain.chains.vector_db_qa.base.VectorDBQA', 'VectorDBQA', ([], {'combine_documents_chain': 'combine_documents_chain', 'vectorstore': 'vectorstore'}), '(combine_documents_chain=combine_documents_chain, vectorstore=\n vectorstore, **config)\n', (14797, 14886), False, 'from langchain.chains.vector_db_qa.base import VectorDBQA\n'), ((16019, 16149), 'langchain.chains.api.base.APIChain', 'APIChain', ([], {'api_request_chain': 'api_request_chain', 'api_answer_chain': 'api_answer_chain', 'requests_wrapper': 'requests_wrapper'}), '(api_request_chain=api_request_chain, api_answer_chain=\n api_answer_chain, requests_wrapper=requests_wrapper, **config)\n', (16027, 16149), False, 'from langchain.chains.api.base import APIChain\n'), ((1653, 1685), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (1673, 1685), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((1936, 1974), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (1959, 1974), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((3604, 3642), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (3627, 3642), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((5892, 5924), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (5912, 5924), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6174, 6212), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (6197, 6212), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((6517, 6549), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (6537, 6549), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((6879, 6937), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['create_draft_answer_prompt_config'], {}), '(create_draft_answer_prompt_config)\n', (6902, 6937), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7285, 7339), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['list_assertions_prompt_config'], {}), '(list_assertions_prompt_config)\n', (7308, 7339), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((7635, 7690), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['check_assertions_prompt_config'], {}), '(check_assertions_prompt_config)\n', (7658, 7690), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8025, 8078), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['revised_answer_prompt_config'], {}), '(revised_answer_prompt_config)\n', (8048, 8078), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((8661, 8693), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (8681, 8693), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((8943, 8981), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (8966, 8981), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((9775, 9807), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (9795, 9807), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((10057, 10095), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (10080, 10095), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((11399, 11437), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (11422, 11437), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((12722, 12754), 'langchain.llms.loading.load_llm_from_config', 'load_llm_from_config', (['llm_config'], {}), '(llm_config)\n', (12742, 12754), False, 'from langchain.llms.loading import load_llm, load_llm_from_config\n'), ((13004, 13042), 'langchain.prompts.loading.load_prompt_from_config', 'load_prompt_from_config', (['prompt_config'], {}), '(prompt_config)\n', (13027, 13042), False, 'from langchain.prompts.loading import load_prompt, load_prompt_from_config\n'), ((16709, 16796), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain', 'requests_wrapper': 'requests_wrapper'}), '(llm_chain=llm_chain, requests_wrapper=requests_wrapper, **\n config)\n', (16725, 16796), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((16839, 16886), 'langchain.chains.llm_requests.LLMRequestsChain', 'LLMRequestsChain', ([], {'llm_chain': 'llm_chain'}), '(llm_chain=llm_chain, **config)\n', (16855, 16886), False, 'from langchain.chains.llm_requests import LLMRequestsChain\n'), ((18341, 18429), 'langchain.utilities.loading.try_load_from_hub', 'try_load_from_hub', (['path', '_load_chain_from_file', '"""chains"""', "{'json', 'yaml'}"], {}), "(path, _load_chain_from_file, 'chains', {'json', 'yaml'},\n **kwargs)\n", (18358, 18429), False, 'from langchain.utilities.loading import try_load_from_hub\n'), ((18724, 18734), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (18728, 18734), False, 'from pathlib import Path\n'), ((18899, 18911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18908, 18911), False, 'import json\n'), ((19011, 19028), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (19025, 19028), False, 'import yaml\n')] |
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class HuggingFaceEndpoint(LLM):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
values["huggingfacehub_api_token"] = huggingfacehub_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(
f"Error raised by inference API: {generated_text['error']}"
)
if self.task == "text-generation":
# Text generation return includes the starter text.
text = generated_text[0]["generated_text"][len(prompt) :]
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1661, 1677), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1675, 1677), False, 'from pydantic import Extra, root_validator\n'), ((1848, 1936), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (1868, 1936), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4131, 4204), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (4144, 4204), False, 'import requests\n'), ((5337, 5368), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5356, 5368), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2055, 2127), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (2060, 2127), False, 'from huggingface_hub.hf_api import HfApi\n')] |
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class HuggingFaceEndpoint(LLM):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
values["huggingfacehub_api_token"] = huggingfacehub_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(
f"Error raised by inference API: {generated_text['error']}"
)
if self.task == "text-generation":
# Text generation return includes the starter text.
text = generated_text[0]["generated_text"][len(prompt) :]
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1661, 1677), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1675, 1677), False, 'from pydantic import Extra, root_validator\n'), ((1848, 1936), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (1868, 1936), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4131, 4204), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (4144, 4204), False, 'import requests\n'), ((5337, 5368), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5356, 5368), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2055, 2127), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (2060, 2127), False, 'from huggingface_hub.hf_api import HfApi\n')] |
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class HuggingFaceEndpoint(LLM):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
values["huggingfacehub_api_token"] = huggingfacehub_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(
f"Error raised by inference API: {generated_text['error']}"
)
if self.task == "text-generation":
# Text generation return includes the starter text.
text = generated_text[0]["generated_text"][len(prompt) :]
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1661, 1677), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1675, 1677), False, 'from pydantic import Extra, root_validator\n'), ((1848, 1936), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (1868, 1936), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4131, 4204), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (4144, 4204), False, 'import requests\n'), ((5337, 5368), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5356, 5368), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2055, 2127), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (2060, 2127), False, 'from huggingface_hub.hf_api import HfApi\n')] |
"""Wrapper around HuggingFace APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
class HuggingFaceEndpoint(LLM):
"""Wrapper around HuggingFaceHub Inference Endpoints.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Only supports `text-generation` and `text2text-generation` for now.
Example:
.. code-block:: python
from langchain.llms import HuggingFaceEndpoint
endpoint_url = (
"https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud"
)
hf = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token="my-api-key"
)
"""
endpoint_url: str = ""
"""Endpoint URL to use."""
task: Optional[str] = None
"""Task to call the model with.
Should be a task that returns `generated_text` or `summary_text`."""
model_kwargs: Optional[dict] = None
"""Key word arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(
endpoint="https://huggingface.co", # Can be a Private Hub endpoint.
token=huggingfacehub_api_token,
).whoami()
except Exception as e:
raise ValueError(
"Could not authenticate with huggingface_hub. "
"Please check your API token."
) from e
except ImportError:
raise ValueError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
values["huggingfacehub_api_token"] = huggingfacehub_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url, "task": self.task},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = hf("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
# payload samples
parameter_payload = {"inputs": prompt, "parameters": _model_kwargs}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.huggingfacehub_api_token}",
"Content-Type": "application/json",
}
# send request
try:
response = requests.post(
self.endpoint_url, headers=headers, json=parameter_payload
)
except requests.exceptions.RequestException as e: # This is the correct syntax
raise ValueError(f"Error raised by inference endpoint: {e}")
generated_text = response.json()
if "error" in generated_text:
raise ValueError(
f"Error raised by inference API: {generated_text['error']}"
)
if self.task == "text-generation":
# Text generation return includes the starter text.
text = generated_text[0]["generated_text"][len(prompt) :]
elif self.task == "text2text-generation":
text = generated_text[0]["generated_text"]
elif self.task == "summarization":
text = generated_text[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1661, 1677), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1675, 1677), False, 'from pydantic import Extra, root_validator\n'), ((1848, 1936), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""huggingfacehub_api_token"""', '"""HUGGINGFACEHUB_API_TOKEN"""'], {}), "(values, 'huggingfacehub_api_token',\n 'HUGGINGFACEHUB_API_TOKEN')\n", (1868, 1936), False, 'from langchain.utils import get_from_dict_or_env\n'), ((4131, 4204), 'requests.post', 'requests.post', (['self.endpoint_url'], {'headers': 'headers', 'json': 'parameter_payload'}), '(self.endpoint_url, headers=headers, json=parameter_payload)\n', (4144, 4204), False, 'import requests\n'), ((5337, 5368), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (5356, 5368), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((2055, 2127), 'huggingface_hub.hf_api.HfApi', 'HfApi', ([], {'endpoint': '"""https://huggingface.co"""', 'token': 'huggingfacehub_api_token'}), "(endpoint='https://huggingface.co', token=huggingfacehub_api_token)\n", (2060, 2127), False, 'from huggingface_hub.hf_api import HfApi\n')] |
import os
from langchain.llms.bedrock import Bedrock
from langchain import PromptTemplate
def get_llm():
model_kwargs = {
"maxTokenCount": 1024,
"stopSequences": [],
"temperature": 0,
"topP": 0.9
}
llm = Bedrock(
# credentials_profile_name=os.environ.get("BWB_PROFILE_NAME"), #sets the profile name to use for AWS credentials (if not the default)
region_name=os.environ.get("BWB_REGION_NAME"), #sets the region name (if not the default)
endpoint_url=os.environ.get("BWB_ENDPOINT_URL"), #sets the endpoint URL (if necessary)
model_id="amazon.titan-tg1-large", #use the Anthropic Claude model
model_kwargs=model_kwargs) #configure the properties for Claude
return llm
def get_prompt(user_input, template):
prompt_template = PromptTemplate.from_template(template) #this will automatically identify the input variables for the template
prompt = prompt_template.format(user_input=user_input)
return prompt
def get_text_response(user_input, template): #text-to-text client function
llm = get_llm()
prompt = get_prompt(user_input, template)
return llm.predict(prompt) #return a response to the prompt
| [
"langchain.PromptTemplate.from_template"
] | [((844, 882), 'langchain.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (872, 882), False, 'from langchain import PromptTemplate\n'), ((437, 470), 'os.environ.get', 'os.environ.get', (['"""BWB_REGION_NAME"""'], {}), "('BWB_REGION_NAME')\n", (451, 470), False, 'import os\n'), ((536, 570), 'os.environ.get', 'os.environ.get', (['"""BWB_ENDPOINT_URL"""'], {}), "('BWB_ENDPOINT_URL')\n", (550, 570), False, 'import os\n')] |
from langchain import PromptTemplate, LLMChain
from langchain.document_loaders import TextLoader
from langchain.embeddings import LlamaCppEmbeddings
from langchain.llms import GPT4All
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.callbacks.base import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores.faiss import FAISS
# SCRIPT INFO:
#
# This script allows you to create a vectorstore from a file and query it with a question (hard coded).
#
# It shows how you could send questions to a GPT4All custom knowledge base and receive answers.
#
# If you want a chat style interface using a similar custom knowledge base, you can use the custom_chatbot.py script provided.
# Setup
gpt4all_path = './models/gpt4all-converted.bin'
llama_path = './models/ggml-model-q4_0.bin'
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
loader = TextLoader('./docs/shortened_sotu.txt')
embeddings = LlamaCppEmbeddings(model_path=llama_path)
llm = GPT4All(model=gpt4all_path, callback_manager=callback_manager, verbose=True)
# Split text
def split_chunks(sources):
chunks = []
splitter = RecursiveCharacterTextSplitter(chunk_size=256, chunk_overlap=32)
for chunk in splitter.split_documents(sources):
chunks.append(chunk)
return chunks
def create_index(chunks):
texts = [doc.page_content for doc in chunks]
metadatas = [doc.metadata for doc in chunks]
search_index = FAISS.from_texts(texts, embeddings, metadatas=metadatas)
return search_index
def similarity_search(query, index):
matched_docs = index.similarity_search(query, k=4)
sources = []
for doc in matched_docs:
sources.append(
{
"page_content": doc.page_content,
"metadata": doc.metadata,
}
)
return matched_docs, sources
# Create Index
# docs = loader.load()
# chunks = split_chunks(docs)
# index = create_index(chunks)
# Save Index (use this to save the index for later use)
# Comment the line below after running once successfully (IMPORTANT)
# index.save_local("state_of_the_union_index")
# Load Index (use this to load the index from a file, eg on your second time running things and beyond)
# Uncomment the line below after running once successfully (IMPORTANT)
index = FAISS.load_local("./full_sotu_index", embeddings)
# Set your query here manually
question = "Summarize the comments about NATO and its purpose."
matched_docs, sources = similarity_search(question, index)
template = """
Please use the following context to answer questions.
Context: {context}
---
Question: {question}
Answer: Let's think step by step."""
context = "\n".join([doc.page_content for doc in matched_docs])
prompt = PromptTemplate(template=template, input_variables=["context", "question"]).partial(context=context)
llm_chain = LLMChain(prompt=prompt, llm=llm)
print(llm_chain.run(question)) | [
"langchain.llms.GPT4All",
"langchain.PromptTemplate",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.vectorstores.faiss.FAISS.load_local",
"langchain.embeddings.LlamaCppEmbeddings",
"langchain.document_loaders.TextLoader",
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler",
"langchain.LLMChain",
"langchain.vectorstores.faiss.FAISS.from_texts"
] | [((968, 1007), 'langchain.document_loaders.TextLoader', 'TextLoader', (['"""./docs/shortened_sotu.txt"""'], {}), "('./docs/shortened_sotu.txt')\n", (978, 1007), False, 'from langchain.document_loaders import TextLoader\n'), ((1021, 1062), 'langchain.embeddings.LlamaCppEmbeddings', 'LlamaCppEmbeddings', ([], {'model_path': 'llama_path'}), '(model_path=llama_path)\n', (1039, 1062), False, 'from langchain.embeddings import LlamaCppEmbeddings\n'), ((1069, 1145), 'langchain.llms.GPT4All', 'GPT4All', ([], {'model': 'gpt4all_path', 'callback_manager': 'callback_manager', 'verbose': '(True)'}), '(model=gpt4all_path, callback_manager=callback_manager, verbose=True)\n', (1076, 1145), False, 'from langchain.llms import GPT4All\n'), ((2399, 2448), 'langchain.vectorstores.faiss.FAISS.load_local', 'FAISS.load_local', (['"""./full_sotu_index"""', 'embeddings'], {}), "('./full_sotu_index', embeddings)\n", (2415, 2448), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((2941, 2973), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'llm'}), '(prompt=prompt, llm=llm)\n', (2949, 2973), False, 'from langchain import PromptTemplate, LLMChain\n'), ((1219, 1283), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(256)', 'chunk_overlap': '(32)'}), '(chunk_size=256, chunk_overlap=32)\n', (1249, 1283), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1529, 1585), 'langchain.vectorstores.faiss.FAISS.from_texts', 'FAISS.from_texts', (['texts', 'embeddings'], {'metadatas': 'metadatas'}), '(texts, embeddings, metadatas=metadatas)\n', (1545, 1585), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((924, 956), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (954, 956), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((2829, 2903), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['context', 'question']"}), "(template=template, input_variables=['context', 'question'])\n", (2843, 2903), False, 'from langchain import PromptTemplate, LLMChain\n')] |
from langchain import PromptTemplate, LLMChain
from langchain.document_loaders import TextLoader
from langchain.embeddings import LlamaCppEmbeddings
from langchain.llms import GPT4All
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.callbacks.base import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores.faiss import FAISS
# SCRIPT INFO:
#
# This script allows you to create a vectorstore from a file and query it with a question (hard coded).
#
# It shows how you could send questions to a GPT4All custom knowledge base and receive answers.
#
# If you want a chat style interface using a similar custom knowledge base, you can use the custom_chatbot.py script provided.
# Setup
gpt4all_path = './models/gpt4all-converted.bin'
llama_path = './models/ggml-model-q4_0.bin'
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
loader = TextLoader('./docs/shortened_sotu.txt')
embeddings = LlamaCppEmbeddings(model_path=llama_path)
llm = GPT4All(model=gpt4all_path, callback_manager=callback_manager, verbose=True)
# Split text
def split_chunks(sources):
chunks = []
splitter = RecursiveCharacterTextSplitter(chunk_size=256, chunk_overlap=32)
for chunk in splitter.split_documents(sources):
chunks.append(chunk)
return chunks
def create_index(chunks):
texts = [doc.page_content for doc in chunks]
metadatas = [doc.metadata for doc in chunks]
search_index = FAISS.from_texts(texts, embeddings, metadatas=metadatas)
return search_index
def similarity_search(query, index):
matched_docs = index.similarity_search(query, k=4)
sources = []
for doc in matched_docs:
sources.append(
{
"page_content": doc.page_content,
"metadata": doc.metadata,
}
)
return matched_docs, sources
# Create Index
# docs = loader.load()
# chunks = split_chunks(docs)
# index = create_index(chunks)
# Save Index (use this to save the index for later use)
# Comment the line below after running once successfully (IMPORTANT)
# index.save_local("state_of_the_union_index")
# Load Index (use this to load the index from a file, eg on your second time running things and beyond)
# Uncomment the line below after running once successfully (IMPORTANT)
index = FAISS.load_local("./full_sotu_index", embeddings)
# Set your query here manually
question = "Summarize the comments about NATO and its purpose."
matched_docs, sources = similarity_search(question, index)
template = """
Please use the following context to answer questions.
Context: {context}
---
Question: {question}
Answer: Let's think step by step."""
context = "\n".join([doc.page_content for doc in matched_docs])
prompt = PromptTemplate(template=template, input_variables=["context", "question"]).partial(context=context)
llm_chain = LLMChain(prompt=prompt, llm=llm)
print(llm_chain.run(question)) | [
"langchain.llms.GPT4All",
"langchain.PromptTemplate",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.vectorstores.faiss.FAISS.load_local",
"langchain.embeddings.LlamaCppEmbeddings",
"langchain.document_loaders.TextLoader",
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler",
"langchain.LLMChain",
"langchain.vectorstores.faiss.FAISS.from_texts"
] | [((968, 1007), 'langchain.document_loaders.TextLoader', 'TextLoader', (['"""./docs/shortened_sotu.txt"""'], {}), "('./docs/shortened_sotu.txt')\n", (978, 1007), False, 'from langchain.document_loaders import TextLoader\n'), ((1021, 1062), 'langchain.embeddings.LlamaCppEmbeddings', 'LlamaCppEmbeddings', ([], {'model_path': 'llama_path'}), '(model_path=llama_path)\n', (1039, 1062), False, 'from langchain.embeddings import LlamaCppEmbeddings\n'), ((1069, 1145), 'langchain.llms.GPT4All', 'GPT4All', ([], {'model': 'gpt4all_path', 'callback_manager': 'callback_manager', 'verbose': '(True)'}), '(model=gpt4all_path, callback_manager=callback_manager, verbose=True)\n', (1076, 1145), False, 'from langchain.llms import GPT4All\n'), ((2399, 2448), 'langchain.vectorstores.faiss.FAISS.load_local', 'FAISS.load_local', (['"""./full_sotu_index"""', 'embeddings'], {}), "('./full_sotu_index', embeddings)\n", (2415, 2448), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((2941, 2973), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'llm'}), '(prompt=prompt, llm=llm)\n', (2949, 2973), False, 'from langchain import PromptTemplate, LLMChain\n'), ((1219, 1283), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(256)', 'chunk_overlap': '(32)'}), '(chunk_size=256, chunk_overlap=32)\n', (1249, 1283), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1529, 1585), 'langchain.vectorstores.faiss.FAISS.from_texts', 'FAISS.from_texts', (['texts', 'embeddings'], {'metadatas': 'metadatas'}), '(texts, embeddings, metadatas=metadatas)\n', (1545, 1585), False, 'from langchain.vectorstores.faiss import FAISS\n'), ((924, 956), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (954, 956), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((2829, 2903), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['context', 'question']"}), "(template=template, input_variables=['context', 'question'])\n", (2843, 2903), False, 'from langchain import PromptTemplate, LLMChain\n')] |
import os
import requests
from langchain.tools import tool
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from sec_api import QueryApi
from unstructured.partition.html import partition_html
class SECTools():
@tool("Search 10-Q form")
def search_10q(data):
"""
Useful to search information from the latest 10-Q form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested and what
question you have from it.
For example, `AAPL|what was last quarter's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-Q\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
@tool("Search 10-K form")
def search_10k(data):
"""
Useful to search information from the latest 10-K form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested, what
question you have from it.
For example, `AAPL|what was last year's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-K\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
def __embedding_search(url, ask):
text = SECTools.__download_form_html(url)
elements = partition_html(text=text)
content = "\n".join([str(el) for el in elements])
text_splitter = CharacterTextSplitter(
separator = "\n",
chunk_size = 1000,
chunk_overlap = 150,
length_function = len,
is_separator_regex = False,
)
docs = text_splitter.create_documents([content])
retriever = FAISS.from_documents(
docs, OpenAIEmbeddings()
).as_retriever()
answers = retriever.get_relevant_documents(ask, top_k=4)
answers = "\n\n".join([a.page_content for a in answers])
return answers
def __download_form_html(url):
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,pt-BR;q=0.8,pt;q=0.7',
'Cache-Control': 'max-age=0',
'Dnt': '1',
'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120"',
'Sec-Ch-Ua-Mobile': '?0',
'Sec-Ch-Ua-Platform': '"macOS"',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}
response = requests.get(url, headers=headers)
return response.text
| [
"langchain.tools.tool",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.embeddings.OpenAIEmbeddings"
] | [((327, 351), 'langchain.tools.tool', 'tool', (['"""Search 10-Q form"""'], {}), "('Search 10-Q form')\n", (331, 351), False, 'from langchain.tools import tool\n'), ((1325, 1349), 'langchain.tools.tool', 'tool', (['"""Search 10-K form"""'], {}), "('Search 10-K form')\n", (1329, 1349), False, 'from langchain.tools import tool\n'), ((748, 795), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (756, 795), False, 'from sec_api import QueryApi\n'), ((1742, 1789), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (1750, 1789), False, 'from sec_api import QueryApi\n'), ((2413, 2438), 'unstructured.partition.html.partition_html', 'partition_html', ([], {'text': 'text'}), '(text=text)\n', (2427, 2438), False, 'from unstructured.partition.html import partition_html\n'), ((2513, 2637), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(150)', 'length_function': 'len', 'is_separator_regex': '(False)'}), "(separator='\\n', chunk_size=1000, chunk_overlap=150,\n length_function=len, is_separator_regex=False)\n", (2534, 2637), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3814, 3848), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3826, 3848), False, 'import requests\n'), ((2795, 2813), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2811, 2813), False, 'from langchain.embeddings import OpenAIEmbeddings\n')] |
import os
import requests
from langchain.tools import tool
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from sec_api import QueryApi
from unstructured.partition.html import partition_html
class SECTools():
@tool("Search 10-Q form")
def search_10q(data):
"""
Useful to search information from the latest 10-Q form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested and what
question you have from it.
For example, `AAPL|what was last quarter's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-Q\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
@tool("Search 10-K form")
def search_10k(data):
"""
Useful to search information from the latest 10-K form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested, what
question you have from it.
For example, `AAPL|what was last year's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-K\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
def __embedding_search(url, ask):
text = SECTools.__download_form_html(url)
elements = partition_html(text=text)
content = "\n".join([str(el) for el in elements])
text_splitter = CharacterTextSplitter(
separator = "\n",
chunk_size = 1000,
chunk_overlap = 150,
length_function = len,
is_separator_regex = False,
)
docs = text_splitter.create_documents([content])
retriever = FAISS.from_documents(
docs, OpenAIEmbeddings()
).as_retriever()
answers = retriever.get_relevant_documents(ask, top_k=4)
answers = "\n\n".join([a.page_content for a in answers])
return answers
def __download_form_html(url):
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,pt-BR;q=0.8,pt;q=0.7',
'Cache-Control': 'max-age=0',
'Dnt': '1',
'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120"',
'Sec-Ch-Ua-Mobile': '?0',
'Sec-Ch-Ua-Platform': '"macOS"',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}
response = requests.get(url, headers=headers)
return response.text
| [
"langchain.tools.tool",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.embeddings.OpenAIEmbeddings"
] | [((327, 351), 'langchain.tools.tool', 'tool', (['"""Search 10-Q form"""'], {}), "('Search 10-Q form')\n", (331, 351), False, 'from langchain.tools import tool\n'), ((1325, 1349), 'langchain.tools.tool', 'tool', (['"""Search 10-K form"""'], {}), "('Search 10-K form')\n", (1329, 1349), False, 'from langchain.tools import tool\n'), ((748, 795), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (756, 795), False, 'from sec_api import QueryApi\n'), ((1742, 1789), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (1750, 1789), False, 'from sec_api import QueryApi\n'), ((2413, 2438), 'unstructured.partition.html.partition_html', 'partition_html', ([], {'text': 'text'}), '(text=text)\n', (2427, 2438), False, 'from unstructured.partition.html import partition_html\n'), ((2513, 2637), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(150)', 'length_function': 'len', 'is_separator_regex': '(False)'}), "(separator='\\n', chunk_size=1000, chunk_overlap=150,\n length_function=len, is_separator_regex=False)\n", (2534, 2637), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3814, 3848), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3826, 3848), False, 'import requests\n'), ((2795, 2813), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2811, 2813), False, 'from langchain.embeddings import OpenAIEmbeddings\n')] |
import os
import requests
from langchain.tools import tool
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from sec_api import QueryApi
from unstructured.partition.html import partition_html
class SECTools():
@tool("Search 10-Q form")
def search_10q(data):
"""
Useful to search information from the latest 10-Q form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested and what
question you have from it.
For example, `AAPL|what was last quarter's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-Q\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
@tool("Search 10-K form")
def search_10k(data):
"""
Useful to search information from the latest 10-K form for a
given stock.
The input to this tool should be a pipe (|) separated text of
length two, representing the stock ticker you are interested, what
question you have from it.
For example, `AAPL|what was last year's revenue`.
"""
stock, ask = data.split("|")
queryApi = QueryApi(api_key=os.environ['SEC_API_API_KEY'])
query = {
"query": {
"query_string": {
"query": f"ticker:{stock} AND formType:\"10-K\""
}
},
"from": "0",
"size": "1",
"sort": [{ "filedAt": { "order": "desc" }}]
}
fillings = queryApi.get_filings(query)['filings']
if len(fillings) == 0:
return "Sorry, I couldn't find any filling for this stock, check if the ticker is correct."
link = fillings[0]['linkToFilingDetails']
answer = SECTools.__embedding_search(link, ask)
return answer
def __embedding_search(url, ask):
text = SECTools.__download_form_html(url)
elements = partition_html(text=text)
content = "\n".join([str(el) for el in elements])
text_splitter = CharacterTextSplitter(
separator = "\n",
chunk_size = 1000,
chunk_overlap = 150,
length_function = len,
is_separator_regex = False,
)
docs = text_splitter.create_documents([content])
retriever = FAISS.from_documents(
docs, OpenAIEmbeddings()
).as_retriever()
answers = retriever.get_relevant_documents(ask, top_k=4)
answers = "\n\n".join([a.page_content for a in answers])
return answers
def __download_form_html(url):
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,pt-BR;q=0.8,pt;q=0.7',
'Cache-Control': 'max-age=0',
'Dnt': '1',
'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120"',
'Sec-Ch-Ua-Mobile': '?0',
'Sec-Ch-Ua-Platform': '"macOS"',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}
response = requests.get(url, headers=headers)
return response.text
| [
"langchain.tools.tool",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.embeddings.OpenAIEmbeddings"
] | [((327, 351), 'langchain.tools.tool', 'tool', (['"""Search 10-Q form"""'], {}), "('Search 10-Q form')\n", (331, 351), False, 'from langchain.tools import tool\n'), ((1325, 1349), 'langchain.tools.tool', 'tool', (['"""Search 10-K form"""'], {}), "('Search 10-K form')\n", (1329, 1349), False, 'from langchain.tools import tool\n'), ((748, 795), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (756, 795), False, 'from sec_api import QueryApi\n'), ((1742, 1789), 'sec_api.QueryApi', 'QueryApi', ([], {'api_key': "os.environ['SEC_API_API_KEY']"}), "(api_key=os.environ['SEC_API_API_KEY'])\n", (1750, 1789), False, 'from sec_api import QueryApi\n'), ((2413, 2438), 'unstructured.partition.html.partition_html', 'partition_html', ([], {'text': 'text'}), '(text=text)\n', (2427, 2438), False, 'from unstructured.partition.html import partition_html\n'), ((2513, 2637), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(150)', 'length_function': 'len', 'is_separator_regex': '(False)'}), "(separator='\\n', chunk_size=1000, chunk_overlap=150,\n length_function=len, is_separator_regex=False)\n", (2534, 2637), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3814, 3848), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (3826, 3848), False, 'import requests\n'), ((2795, 2813), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2811, 2813), False, 'from langchain.embeddings import OpenAIEmbeddings\n')] |
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """You are GPT-3, and you can't do math.
You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.
So we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we’ll take care of the rest:
Question: ${{Question with hard calculation.}}
```python
${{Code that prints what you need to know}}
```
```output
${{Output of your code}}
```
Answer: ${{Answer}}
Otherwise, use this simpler format:
Question: ${{Question without hard calculation}}
Answer: ${{Answer}}
Begin.
Question: What is 37593 * 67?
```python
print(37593 * 67)
```
```output
2518731
```
Answer: 2518731
Question: {question}
"""
PROMPT = PromptTemplate(input_variables=["question"], template=_PROMPT_TEMPLATE)
| [
"langchain.prompts.prompt.PromptTemplate"
] | [((957, 1028), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '_PROMPT_TEMPLATE'}), "(input_variables=['question'], template=_PROMPT_TEMPLATE)\n", (971, 1028), False, 'from langchain.prompts.prompt import PromptTemplate\n')] |
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """You are GPT-3, and you can't do math.
You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.
So we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we’ll take care of the rest:
Question: ${{Question with hard calculation.}}
```python
${{Code that prints what you need to know}}
```
```output
${{Output of your code}}
```
Answer: ${{Answer}}
Otherwise, use this simpler format:
Question: ${{Question without hard calculation}}
Answer: ${{Answer}}
Begin.
Question: What is 37593 * 67?
```python
print(37593 * 67)
```
```output
2518731
```
Answer: 2518731
Question: {question}
"""
PROMPT = PromptTemplate(input_variables=["question"], template=_PROMPT_TEMPLATE)
| [
"langchain.prompts.prompt.PromptTemplate"
] | [((957, 1028), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '_PROMPT_TEMPLATE'}), "(input_variables=['question'], template=_PROMPT_TEMPLATE)\n", (971, 1028), False, 'from langchain.prompts.prompt import PromptTemplate\n')] |
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """You are GPT-3, and you can't do math.
You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.
So we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we’ll take care of the rest:
Question: ${{Question with hard calculation.}}
```python
${{Code that prints what you need to know}}
```
```output
${{Output of your code}}
```
Answer: ${{Answer}}
Otherwise, use this simpler format:
Question: ${{Question without hard calculation}}
Answer: ${{Answer}}
Begin.
Question: What is 37593 * 67?
```python
print(37593 * 67)
```
```output
2518731
```
Answer: 2518731
Question: {question}
"""
PROMPT = PromptTemplate(input_variables=["question"], template=_PROMPT_TEMPLATE)
| [
"langchain.prompts.prompt.PromptTemplate"
] | [((957, 1028), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '_PROMPT_TEMPLATE'}), "(input_variables=['question'], template=_PROMPT_TEMPLATE)\n", (971, 1028), False, 'from langchain.prompts.prompt import PromptTemplate\n')] |
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_PROMPT_TEMPLATE = """You are GPT-3, and you can't do math.
You can do basic math, and your memorization abilities are impressive, but you can't do any complex calculations that a human could not do in their head. You also have an annoying tendency to just make up highly specific, but wrong, answers.
So we hooked you up to a Python 3 kernel, and now you can execute code. If anyone gives you a hard math problem, just use this format and we’ll take care of the rest:
Question: ${{Question with hard calculation.}}
```python
${{Code that prints what you need to know}}
```
```output
${{Output of your code}}
```
Answer: ${{Answer}}
Otherwise, use this simpler format:
Question: ${{Question without hard calculation}}
Answer: ${{Answer}}
Begin.
Question: What is 37593 * 67?
```python
print(37593 * 67)
```
```output
2518731
```
Answer: 2518731
Question: {question}
"""
PROMPT = PromptTemplate(input_variables=["question"], template=_PROMPT_TEMPLATE)
| [
"langchain.prompts.prompt.PromptTemplate"
] | [((957, 1028), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '_PROMPT_TEMPLATE'}), "(input_variables=['question'], template=_PROMPT_TEMPLATE)\n", (971, 1028), False, 'from langchain.prompts.prompt import PromptTemplate\n')] |
import streamlit as st
from langchain.prompts import PromptTemplate
chat_template = PromptTemplate(
input_variables=['transcript','summary','chat_history','user_message', 'sentiment_report'],
template='''
You are an AI chatbot intended to discuss about the user's audio transcription.
\nTRANSCRIPT: "{transcript}"
\nTRANSCIRPT SUMMARY: "{summary}"
\nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}"
\nCHAT HISTORY: {chat_history}
\nUSER MESSAGE: "{user_message}"
\nAI RESPONSE HERE:
'''
)
sentiment_prompt = PromptTemplate(
input_variables=['transcript','summary'],
template='''
Return a single word sentiment of either ['Positive','Negative' or 'Neutral'] from this transcript and summary.
After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment.
\nTRANSCRIPT: {transcript}
\nTRANSCRIPT SUMMARY: {summary}
\nSENTIMENT LABEL HERE ('Positive','Negative', or 'Neutral') <comma-seperated> REPORT HERE:
'''
)
fact_check_prompt = '''
Fact-check this transcript for factual or logical inacurracies or inconsistencies
\nWrite a report on the factuality / logic of the transcirpt
\nTRANSCRIPT: {}
\nTRANSCRIPT SUMMARY: {}
\nAI FACT CHECK RESPONSE HERE:
''' | [
"langchain.prompts.PromptTemplate"
] | [((88, 562), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['transcript', 'summary', 'chat_history', 'user_message', 'sentiment_report']", 'template': '"""\n You are an AI chatbot intended to discuss about the user\'s audio transcription.\n \nTRANSCRIPT: "{transcript}"\n \nTRANSCIRPT SUMMARY: "{summary}"\n \nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}"\n \nCHAT HISTORY: {chat_history}\n \nUSER MESSAGE: "{user_message}"\n \nAI RESPONSE HERE:\n """'}), '(input_variables=[\'transcript\', \'summary\', \'chat_history\',\n \'user_message\', \'sentiment_report\'], template=\n """\n You are an AI chatbot intended to discuss about the user\'s audio transcription.\n \nTRANSCRIPT: "{transcript}"\n \nTRANSCIRPT SUMMARY: "{summary}"\n \nTRANSCRIPT SENTIMENT REPORT: "{sentiment_report}"\n \nCHAT HISTORY: {chat_history}\n \nUSER MESSAGE: "{user_message}"\n \nAI RESPONSE HERE:\n """\n )\n', (102, 562), False, 'from langchain.prompts import PromptTemplate\n'), ((595, 1095), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['transcript', 'summary']", 'template': '"""\n Return a single word sentiment of either [\'Positive\',\'Negative\' or \'Neutral\'] from this transcript and summary.\n After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment.\n \nTRANSCRIPT: {transcript}\n \nTRANSCRIPT SUMMARY: {summary}\n \nSENTIMENT LABEL HERE (\'Positive\',\'Negative\', or \'Neutral\') <comma-seperated> REPORT HERE:\n """'}), '(input_variables=[\'transcript\', \'summary\'], template=\n """\n Return a single word sentiment of either [\'Positive\',\'Negative\' or \'Neutral\'] from this transcript and summary.\n After that single word sentiment, add a comma, then return a sentiment report, analyzing transcript sentiment.\n \nTRANSCRIPT: {transcript}\n \nTRANSCRIPT SUMMARY: {summary}\n \nSENTIMENT LABEL HERE (\'Positive\',\'Negative\', or \'Neutral\') <comma-seperated> REPORT HERE:\n """\n )\n', (609, 1095), False, 'from langchain.prompts import PromptTemplate\n')] |
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv
import os
from langchain.chains import SimpleSequentialChain
# Create a .env file in the root of your project and add your OpenAI API key to it
# Load env files
load_dotenv()
openai_api_key = os.environ.get('openai_api_key')
# This is an LLMChain to generate company names given a company description.
llm = ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo")
# Create templates
template_name = """You are a company name generator. Based on a company description, it is your job to create a company name.
Company description: {company_description}
Company name:"""
prompt_template_name = PromptTemplate(input_variables=["company_description"], template=template_name)
# This is an LLMChain to generate company slogans given a company name and company description.
template_slogan = """You are a company slogan generator. Based on a company name, it is your job to create a company slogan.
Company name: {company_name}
Company slogan:"""
prompt_template_slogan = PromptTemplate(input_variables=["company_name"], template=template_slogan)
# Create chains
name_chain = LLMChain(llm=llm, prompt=prompt_template_name)
slogan_chain = LLMChain(llm=llm, prompt=prompt_template_slogan)
# This is the overall chain where we run these two chains in sequence.
overall_chain = SimpleSequentialChain(chains=[name_chain, slogan_chain], verbose=True)
slogan = overall_chain.run("We are a company that sells shoes.")
| [
"langchain.prompts.PromptTemplate",
"langchain.chains.SimpleSequentialChain",
"langchain.chat_models.ChatOpenAI",
"langchain.chains.LLMChain"
] | [((321, 334), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (332, 334), False, 'from dotenv import load_dotenv\n'), ((352, 384), 'os.environ.get', 'os.environ.get', (['"""openai_api_key"""'], {}), "('openai_api_key')\n", (366, 384), False, 'import os\n'), ((469, 524), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.7)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0.7, model_name='gpt-3.5-turbo')\n", (479, 524), False, 'from langchain.chat_models import ChatOpenAI\n'), ((757, 836), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['company_description']", 'template': 'template_name'}), "(input_variables=['company_description'], template=template_name)\n", (771, 836), False, 'from langchain.prompts import PromptTemplate\n'), ((1137, 1211), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['company_name']", 'template': 'template_slogan'}), "(input_variables=['company_name'], template=template_slogan)\n", (1151, 1211), False, 'from langchain.prompts import PromptTemplate\n'), ((1242, 1288), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template_name'}), '(llm=llm, prompt=prompt_template_name)\n', (1250, 1288), False, 'from langchain.chains import LLMChain\n'), ((1304, 1352), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template_slogan'}), '(llm=llm, prompt=prompt_template_slogan)\n', (1312, 1352), False, 'from langchain.chains import LLMChain\n'), ((1441, 1511), 'langchain.chains.SimpleSequentialChain', 'SimpleSequentialChain', ([], {'chains': '[name_chain, slogan_chain]', 'verbose': '(True)'}), '(chains=[name_chain, slogan_chain], verbose=True)\n', (1462, 1511), False, 'from langchain.chains import SimpleSequentialChain\n')] |
import os
import streamlit as st
from PyPDF2 import PdfReader, PdfWriter
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
def ChatPDF(text):
# st.write(text)
#split into chunks
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size = 1000,
chunk_overlap = 200,
length_function=len
)
chunks = text_splitter.split_text(text)
# st.write(chunks)
# creating embeddings
OPENAI_API_KEY = st.text_input("OPENAI API KEY", type = "password")
if OPENAI_API_KEY:
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
# st.write("Embedding Created")
# st.write(embeddings)
knowledge_base = FAISS.from_texts(chunks, embeddings)
st.write("Knowledge Base created ")
#show user input
def ask_question(i=0):
user_question = st.text_input("Ask a question about your PDF?",key = i)
if user_question:
docs = knowledge_base.similarity_search(user_question)
# st.write(docs)
llm = OpenAI(openai_api_key=OPENAI_API_KEY)
chain = load_qa_chain(llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents=docs, question=user_question)
print(cb)
st.write(response)
ask_question(i+1)
ask_question()
def main():
st.set_page_config(page_title="Ask ur PDF",
page_icon="📄")
hide_st_style = """
<style>
#mainMenue {visibility: hidden;}
footer {visibility: hidden;}
#header {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
# st.write(st.set_page_config)
st.header("Ask your PDF 🤔💭")
#uploading file
pdf = st.file_uploader("Upload your PDF ", type="pdf")
# extract the text
if pdf is not None:
option = st.selectbox("What you want to do with PDF📜", [
"Meta Data📂",
"Extract Raw Text📄",
"Extract Links🔗",
"Extract Images🖼️",
"Make PDF password protected🔐",
"PDF Annotation📝",
"ChatPDF💬"
])
pdf_reader = PdfReader(pdf)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
if option == "Meta Data📂":
st.write(pdf_reader.metadata)
elif option == "Make PDF password protected🔐":
pswd = st.text_input("Enter yourpass word", type="password")
if pswd:
with st.spinner("Encrypting..."):
pdf_writer = PdfWriter()
for page_num in range(len(pdf_reader.pages)):
pdf_writer.add_page(pdf_reader.pages[page_num])
pdf_writer.encrypt(pswd)
with open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "wb") as f:
pdf_writer.write(f)
st.success("Encryption Successful!")
st.download_button(
label="Download Encrypted PDF",
data=open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "rb").read(),
file_name=f"{pdf.name.split('.')[0]}_encrypted.pdf",
mime="application/octet-stream",
)
try:
os.remove(f"{pdf.name.split('.')[0]}_encrypted.pdf")
except: pass
elif option == "Extract Raw Text📄":
st.write(text)
elif option == "Extract Links🔗":
for page in pdf_reader.pages:
if "/Annots" in page:
for annot in page["/Annots"]:
subtype = annot.get_object()["/Subtype"]
if subtype == "/Link":
try:
st.write(annot.get_object()["/A"]["/URI"])
except: pass
elif option == "Extract Images🖼️":
for page in pdf_reader.pages:
try:
for img in page.images:
st.write(img.name)
st.image(img.data)
except: pass
elif option == "PDF Annotation📝":
for page in pdf_reader.pages:
if "/Annots" in page:
for annot in page["/Annots"]:
obj = annot.get_object()
st.write(obj)
st.write("***********")
annotation = {"subtype": obj["/Subtype"], "location": obj["/Rect"]}
st.write(annotation)
elif option == "ChatPDF💬":
ChatPDF(text)
if __name__ == "__main__":
main()
| [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.vectorstores.FAISS.from_texts",
"langchain.llms.OpenAI",
"langchain.callbacks.get_openai_callback",
"langchain.chains.question_answering.load_qa_chain",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((481, 579), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len'}), "(separator='\\n', chunk_size=1000, chunk_overlap=200,\n length_function=len)\n", (502, 579), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((745, 793), 'streamlit.text_input', 'st.text_input', (['"""OPENAI API KEY"""'], {'type': '"""password"""'}), "('OPENAI API KEY', type='password')\n", (758, 793), True, 'import streamlit as st\n'), ((1783, 1841), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Ask ur PDF"""', 'page_icon': '"""📄"""'}), "(page_title='Ask ur PDF', page_icon='📄')\n", (1801, 1841), True, 'import streamlit as st\n'), ((2081, 2131), 'streamlit.markdown', 'st.markdown', (['hide_st_style'], {'unsafe_allow_html': '(True)'}), '(hide_st_style, unsafe_allow_html=True)\n', (2092, 2131), True, 'import streamlit as st\n'), ((2175, 2203), 'streamlit.header', 'st.header', (['"""Ask your PDF 🤔💭"""'], {}), "('Ask your PDF 🤔💭')\n", (2184, 2203), True, 'import streamlit as st\n'), ((2242, 2290), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your PDF """'], {'type': '"""pdf"""'}), "('Upload your PDF ', type='pdf')\n", (2258, 2290), True, 'import streamlit as st\n'), ((842, 889), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (858, 889), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((989, 1025), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', (['chunks', 'embeddings'], {}), '(chunks, embeddings)\n', (1005, 1025), False, 'from langchain.vectorstores import FAISS\n'), ((1035, 1070), 'streamlit.write', 'st.write', (['"""Knowledge Base created """'], {}), "('Knowledge Base created ')\n", (1043, 1070), True, 'import streamlit as st\n'), ((2360, 2551), 'streamlit.selectbox', 'st.selectbox', (['"""What you want to do with PDF📜"""', "['Meta Data📂', 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬']"], {}), "('What you want to do with PDF📜', ['Meta Data📂',\n 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬'])\n", (2372, 2551), True, 'import streamlit as st\n'), ((2672, 2686), 'PyPDF2.PdfReader', 'PdfReader', (['pdf'], {}), '(pdf)\n', (2681, 2686), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((1160, 1214), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about your PDF?"""'], {'key': 'i'}), "('Ask a question about your PDF?', key=i)\n", (1173, 1214), True, 'import streamlit as st\n'), ((2835, 2864), 'streamlit.write', 'st.write', (['pdf_reader.metadata'], {}), '(pdf_reader.metadata)\n', (2843, 2864), True, 'import streamlit as st\n'), ((1378, 1415), 'langchain.llms.OpenAI', 'OpenAI', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (1384, 1415), False, 'from langchain.llms import OpenAI\n'), ((1441, 1479), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (1454, 1479), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((1667, 1685), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (1675, 1685), True, 'import streamlit as st\n'), ((2941, 2994), 'streamlit.text_input', 'st.text_input', (['"""Enter yourpass word"""'], {'type': '"""password"""'}), "('Enter yourpass word', type='password')\n", (2954, 2994), True, 'import streamlit as st\n'), ((1502, 1523), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (1521, 1523), False, 'from langchain.callbacks import get_openai_callback\n'), ((4062, 4076), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (4070, 4076), True, 'import streamlit as st\n'), ((3039, 3066), 'streamlit.spinner', 'st.spinner', (['"""Encrypting..."""'], {}), "('Encrypting...')\n", (3049, 3066), True, 'import streamlit as st\n'), ((3102, 3113), 'PyPDF2.PdfWriter', 'PdfWriter', ([], {}), '()\n', (3111, 3113), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((3480, 3516), 'streamlit.success', 'st.success', (['"""Encryption Successful!"""'], {}), "('Encryption Successful!')\n", (3490, 3516), True, 'import streamlit as st\n'), ((4697, 4715), 'streamlit.write', 'st.write', (['img.name'], {}), '(img.name)\n', (4705, 4715), True, 'import streamlit as st\n'), ((4741, 4759), 'streamlit.image', 'st.image', (['img.data'], {}), '(img.data)\n', (4749, 4759), True, 'import streamlit as st\n'), ((5041, 5054), 'streamlit.write', 'st.write', (['obj'], {}), '(obj)\n', (5049, 5054), True, 'import streamlit as st\n'), ((5080, 5103), 'streamlit.write', 'st.write', (['"""***********"""'], {}), "('***********')\n", (5088, 5103), True, 'import streamlit as st\n'), ((5222, 5242), 'streamlit.write', 'st.write', (['annotation'], {}), '(annotation)\n', (5230, 5242), True, 'import streamlit as st\n')] |
import os
import streamlit as st
from PyPDF2 import PdfReader, PdfWriter
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
def ChatPDF(text):
# st.write(text)
#split into chunks
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size = 1000,
chunk_overlap = 200,
length_function=len
)
chunks = text_splitter.split_text(text)
# st.write(chunks)
# creating embeddings
OPENAI_API_KEY = st.text_input("OPENAI API KEY", type = "password")
if OPENAI_API_KEY:
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
# st.write("Embedding Created")
# st.write(embeddings)
knowledge_base = FAISS.from_texts(chunks, embeddings)
st.write("Knowledge Base created ")
#show user input
def ask_question(i=0):
user_question = st.text_input("Ask a question about your PDF?",key = i)
if user_question:
docs = knowledge_base.similarity_search(user_question)
# st.write(docs)
llm = OpenAI(openai_api_key=OPENAI_API_KEY)
chain = load_qa_chain(llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents=docs, question=user_question)
print(cb)
st.write(response)
ask_question(i+1)
ask_question()
def main():
st.set_page_config(page_title="Ask ur PDF",
page_icon="📄")
hide_st_style = """
<style>
#mainMenue {visibility: hidden;}
footer {visibility: hidden;}
#header {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
# st.write(st.set_page_config)
st.header("Ask your PDF 🤔💭")
#uploading file
pdf = st.file_uploader("Upload your PDF ", type="pdf")
# extract the text
if pdf is not None:
option = st.selectbox("What you want to do with PDF📜", [
"Meta Data📂",
"Extract Raw Text📄",
"Extract Links🔗",
"Extract Images🖼️",
"Make PDF password protected🔐",
"PDF Annotation📝",
"ChatPDF💬"
])
pdf_reader = PdfReader(pdf)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
if option == "Meta Data📂":
st.write(pdf_reader.metadata)
elif option == "Make PDF password protected🔐":
pswd = st.text_input("Enter yourpass word", type="password")
if pswd:
with st.spinner("Encrypting..."):
pdf_writer = PdfWriter()
for page_num in range(len(pdf_reader.pages)):
pdf_writer.add_page(pdf_reader.pages[page_num])
pdf_writer.encrypt(pswd)
with open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "wb") as f:
pdf_writer.write(f)
st.success("Encryption Successful!")
st.download_button(
label="Download Encrypted PDF",
data=open(f"{pdf.name.split('.')[0]}_encrypted.pdf", "rb").read(),
file_name=f"{pdf.name.split('.')[0]}_encrypted.pdf",
mime="application/octet-stream",
)
try:
os.remove(f"{pdf.name.split('.')[0]}_encrypted.pdf")
except: pass
elif option == "Extract Raw Text📄":
st.write(text)
elif option == "Extract Links🔗":
for page in pdf_reader.pages:
if "/Annots" in page:
for annot in page["/Annots"]:
subtype = annot.get_object()["/Subtype"]
if subtype == "/Link":
try:
st.write(annot.get_object()["/A"]["/URI"])
except: pass
elif option == "Extract Images🖼️":
for page in pdf_reader.pages:
try:
for img in page.images:
st.write(img.name)
st.image(img.data)
except: pass
elif option == "PDF Annotation📝":
for page in pdf_reader.pages:
if "/Annots" in page:
for annot in page["/Annots"]:
obj = annot.get_object()
st.write(obj)
st.write("***********")
annotation = {"subtype": obj["/Subtype"], "location": obj["/Rect"]}
st.write(annotation)
elif option == "ChatPDF💬":
ChatPDF(text)
if __name__ == "__main__":
main()
| [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.vectorstores.FAISS.from_texts",
"langchain.llms.OpenAI",
"langchain.callbacks.get_openai_callback",
"langchain.chains.question_answering.load_qa_chain",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((481, 579), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(1000)', 'chunk_overlap': '(200)', 'length_function': 'len'}), "(separator='\\n', chunk_size=1000, chunk_overlap=200,\n length_function=len)\n", (502, 579), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((745, 793), 'streamlit.text_input', 'st.text_input', (['"""OPENAI API KEY"""'], {'type': '"""password"""'}), "('OPENAI API KEY', type='password')\n", (758, 793), True, 'import streamlit as st\n'), ((1783, 1841), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Ask ur PDF"""', 'page_icon': '"""📄"""'}), "(page_title='Ask ur PDF', page_icon='📄')\n", (1801, 1841), True, 'import streamlit as st\n'), ((2081, 2131), 'streamlit.markdown', 'st.markdown', (['hide_st_style'], {'unsafe_allow_html': '(True)'}), '(hide_st_style, unsafe_allow_html=True)\n', (2092, 2131), True, 'import streamlit as st\n'), ((2175, 2203), 'streamlit.header', 'st.header', (['"""Ask your PDF 🤔💭"""'], {}), "('Ask your PDF 🤔💭')\n", (2184, 2203), True, 'import streamlit as st\n'), ((2242, 2290), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your PDF """'], {'type': '"""pdf"""'}), "('Upload your PDF ', type='pdf')\n", (2258, 2290), True, 'import streamlit as st\n'), ((842, 889), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (858, 889), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((989, 1025), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', (['chunks', 'embeddings'], {}), '(chunks, embeddings)\n', (1005, 1025), False, 'from langchain.vectorstores import FAISS\n'), ((1035, 1070), 'streamlit.write', 'st.write', (['"""Knowledge Base created """'], {}), "('Knowledge Base created ')\n", (1043, 1070), True, 'import streamlit as st\n'), ((2360, 2551), 'streamlit.selectbox', 'st.selectbox', (['"""What you want to do with PDF📜"""', "['Meta Data📂', 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬']"], {}), "('What you want to do with PDF📜', ['Meta Data📂',\n 'Extract Raw Text📄', 'Extract Links🔗', 'Extract Images🖼️',\n 'Make PDF password protected🔐', 'PDF Annotation📝', 'ChatPDF💬'])\n", (2372, 2551), True, 'import streamlit as st\n'), ((2672, 2686), 'PyPDF2.PdfReader', 'PdfReader', (['pdf'], {}), '(pdf)\n', (2681, 2686), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((1160, 1214), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about your PDF?"""'], {'key': 'i'}), "('Ask a question about your PDF?', key=i)\n", (1173, 1214), True, 'import streamlit as st\n'), ((2835, 2864), 'streamlit.write', 'st.write', (['pdf_reader.metadata'], {}), '(pdf_reader.metadata)\n', (2843, 2864), True, 'import streamlit as st\n'), ((1378, 1415), 'langchain.llms.OpenAI', 'OpenAI', ([], {'openai_api_key': 'OPENAI_API_KEY'}), '(openai_api_key=OPENAI_API_KEY)\n', (1384, 1415), False, 'from langchain.llms import OpenAI\n'), ((1441, 1479), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (1454, 1479), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((1667, 1685), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (1675, 1685), True, 'import streamlit as st\n'), ((2941, 2994), 'streamlit.text_input', 'st.text_input', (['"""Enter yourpass word"""'], {'type': '"""password"""'}), "('Enter yourpass word', type='password')\n", (2954, 2994), True, 'import streamlit as st\n'), ((1502, 1523), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (1521, 1523), False, 'from langchain.callbacks import get_openai_callback\n'), ((4062, 4076), 'streamlit.write', 'st.write', (['text'], {}), '(text)\n', (4070, 4076), True, 'import streamlit as st\n'), ((3039, 3066), 'streamlit.spinner', 'st.spinner', (['"""Encrypting..."""'], {}), "('Encrypting...')\n", (3049, 3066), True, 'import streamlit as st\n'), ((3102, 3113), 'PyPDF2.PdfWriter', 'PdfWriter', ([], {}), '()\n', (3111, 3113), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((3480, 3516), 'streamlit.success', 'st.success', (['"""Encryption Successful!"""'], {}), "('Encryption Successful!')\n", (3490, 3516), True, 'import streamlit as st\n'), ((4697, 4715), 'streamlit.write', 'st.write', (['img.name'], {}), '(img.name)\n', (4705, 4715), True, 'import streamlit as st\n'), ((4741, 4759), 'streamlit.image', 'st.image', (['img.data'], {}), '(img.data)\n', (4749, 4759), True, 'import streamlit as st\n'), ((5041, 5054), 'streamlit.write', 'st.write', (['obj'], {}), '(obj)\n', (5049, 5054), True, 'import streamlit as st\n'), ((5080, 5103), 'streamlit.write', 'st.write', (['"""***********"""'], {}), "('***********')\n", (5088, 5103), True, 'import streamlit as st\n'), ((5222, 5242), 'streamlit.write', 'st.write', (['annotation'], {}), '(annotation)\n', (5230, 5242), True, 'import streamlit as st\n')] |
"""Toolkit for the Wolfram Alpha API."""
from typing import List
from langchain.tools.base import BaseTool, BaseToolkit
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
class WolframAlphaToolkit(BaseToolkit):
"""Tool that adds the capability to interact with Wolfram Alpha."""
wolfram_alpha_appid: str
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
wrapper = WolframAlphaAPIWrapper(wolfram_alpha_appid=self.wolfram_alpha_appid)
return [
WolframAlphaQueryRun(
api_wrapper=wrapper,
)
]
| [
"langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper",
"langchain.tools.wolfram_alpha.tool.WolframAlphaQueryRun"
] | [((509, 577), 'langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper', 'WolframAlphaAPIWrapper', ([], {'wolfram_alpha_appid': 'self.wolfram_alpha_appid'}), '(wolfram_alpha_appid=self.wolfram_alpha_appid)\n', (531, 577), False, 'from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper\n'), ((607, 648), 'langchain.tools.wolfram_alpha.tool.WolframAlphaQueryRun', 'WolframAlphaQueryRun', ([], {'api_wrapper': 'wrapper'}), '(api_wrapper=wrapper)\n', (627, 648), False, 'from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun\n')] |
"""Toolkit for the Wolfram Alpha API."""
from typing import List
from langchain.tools.base import BaseTool, BaseToolkit
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
class WolframAlphaToolkit(BaseToolkit):
"""Tool that adds the capability to interact with Wolfram Alpha."""
wolfram_alpha_appid: str
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
wrapper = WolframAlphaAPIWrapper(wolfram_alpha_appid=self.wolfram_alpha_appid)
return [
WolframAlphaQueryRun(
api_wrapper=wrapper,
)
]
| [
"langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper",
"langchain.tools.wolfram_alpha.tool.WolframAlphaQueryRun"
] | [((509, 577), 'langchain.utilities.wolfram_alpha.WolframAlphaAPIWrapper', 'WolframAlphaAPIWrapper', ([], {'wolfram_alpha_appid': 'self.wolfram_alpha_appid'}), '(wolfram_alpha_appid=self.wolfram_alpha_appid)\n', (531, 577), False, 'from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper\n'), ((607, 648), 'langchain.tools.wolfram_alpha.tool.WolframAlphaQueryRun', 'WolframAlphaQueryRun', ([], {'api_wrapper': 'wrapper'}), '(api_wrapper=wrapper)\n', (627, 648), False, 'from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun\n')] |
"""The function tools tht are actually implemented"""
import json
import subprocess
from langchain.agents.load_tools import load_tools
from langchain.tools import BaseTool
from langchain.utilities.bash import BashProcess
from toolemu.tools.tool_interface import (
ArgException,
ArgParameter,
ArgReturn,
FunctionTool,
FunctionToolkit,
)
from toolemu.utils.my_typing import *
from .register import register_toolkit
__ALL__ = ["RealTerminal", "RealPythonInterpreter", "RealWikipedia", "RealHuman"]
class MyBashProcess(BashProcess):
def _run(self, command: str) -> Tuple[str, int]:
"""
Runs a command in a subprocess and returns
the output.
Args:
command: The command to run
""" # noqa: E501
try:
output = (
subprocess.run(
command,
shell=True,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
.stdout.decode()
.strip()
)
except subprocess.CalledProcessError as error:
if self.return_err_output:
return error.stdout.decode().strip(), error.returncode
return str(error).strip(), error.returncode
if self.strip_newlines:
output = output.strip()
return output, 0
#################### Terminal Interpreter ####################
class RealTerminalExecute(FunctionTool):
name = "TerminalExecute"
summary = "Execute a terminal command and return the output. This command should follow proper syntax and be supported by the terminal environment."
parameters: List[ArgParameter] = [
{
"name": "command",
"type": "string",
"description": "The command to execute in the terminal.",
"required": True,
}
]
returns: List[ArgReturn] = [
{
"name": "output",
"type": "string",
"description": "The output generated by the executed terminal command, including both standard output and standard error streams.",
},
{
"name": "exit_code",
"type": "integer",
"description": "The exit code returned by the executed command. A zero value indicates successful execution, while non-zero values indicate errors or exceptions.",
},
]
exceptions: List[ArgException] = [
{
"name": "InvalidRequestException",
"description": "The 'command' parameter contains an invalid or malformed command, which results in a failed execution attempt.",
}
]
_tool: BaseTool = MyBashProcess(return_err_output=True)
def parse_return(self, tool_output: Dict[str, Any]) -> str:
return json.dumps({"output": tool_output[0], "exit_code": tool_output[1]})
def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
return self._tool._run(tool_input["command"])
def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
return self._tool._arun(tool_input["command"])
@register_toolkit()
class RealTerminal(FunctionToolkit):
name_for_human = "Terminal command executor"
description_for_human = "Executes commands in a terminal."
name_for_model = "Terminal"
description_for_model = "Executes commands in a terminal on the user's local system. Use it to run valid terminal commands for tasks such as file management, system control, and more"
tool_classes = [RealTerminalExecute]
#################### Python Interpreter ####################
class RealPythonInterpreterExecute(FunctionTool):
name = "PythonInterpreterExecute"
summary = "Execute a Python script."
parameters: List[ArgParameter] = [
{
"name": "script",
"type": "string",
"description": "The python script to execute.",
"required": True,
}
]
returns: List[ArgReturn] = [
{
"name": "result",
"type": "string",
"description": "The printed output of the script.",
}
]
exceptions: List[ArgException] = []
_tool: BaseTool = load_tools(["python_repl"])[0]
def parse_return(self, tool_output: str) -> str:
return json.dumps({"result": tool_output})
def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
return self._tool._run(tool_input["script"])
def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
return self._tool._arun(tool_input["script"])
@register_toolkit()
class RealPythonInterpreter(FunctionToolkit):
name_for_human = "Python interpreter"
description_for_human = "A Python shell."
name_for_model = "PythonInterpreter"
description_for_model = "A Python shell. Use it to execute python scripts. If you want to see the output of a value, you should print it out with `print(...)`."
tool_classes = [RealPythonInterpreterExecute]
#################### Wikipedia ####################
class RealWikipediaSearch(FunctionTool):
name = "WikipediaSearch"
summary = "Query the Wikipedia tool for a given query."
parameters: List[ArgParameter] = [
{
"name": "query",
"type": "string",
"description": "The query to search for.",
"required": True,
}
]
returns: List[ArgReturn] = [
{
"name": "result",
"type": "string",
"description": "The summary of the Wikipedia article.",
}
]
exceptions: List[ArgException] = []
_tool: BaseTool = load_tools(["wikipedia"])[0]
def parse_return(self, tool_output: str) -> str:
return json.dumps({"result": tool_output})
def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
return self._tool._run(tool_input["query"])
def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
return self._tool._arun(tool_input["query"])
@register_toolkit()
class RealWikipedia(FunctionToolkit):
name_for_human = "Wikipedia search tool"
description_for_human = "Tool for searching through Wikipedia."
name_for_model = "Wikipedia"
description_for_model = "Tool for searching through Wikipedia. Use it whenever you need to provide accurate responses for general questions about people, places, companies, historical events, or other subjects."
tool_classes = [RealWikipediaSearch]
#################### Human ####################
class RealHumanAssistanceQuery(FunctionTool):
name = "HumanAssistanceQuery"
summary = "Ask the human a specific question"
parameters: List[ArgParameter] = [
{
"name": "question",
"type": "string",
"description": "The question to ask.",
"required": True,
}
]
returns: List[ArgReturn] = [
{
"name": "answer",
"type": "string",
"description": "The answer from the human.",
}
]
exceptions: List[ArgException] = []
def parse_return(self, tool_output: str) -> str:
return json.dumps({"answer": tool_output})
def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
print("\n" + tool_input["question"] + "\n")
return input(tool_input["question"])
def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]:
return NotImplementedError("Human tool does not support async")
@register_toolkit()
class RealHuman(FunctionToolkit):
name_for_human = "Human assistance"
description_for_human = "Seek human assistance or guidance."
name_for_model = "HumanAssistance"
description_for_model = "Seek human assistance or guidance. Use it when expert human or user input is necessary, e.g., when you need some human knowledge, user permission, user-specific information."
tool_classes = [RealHumanAssistanceQuery]
| [
"langchain.agents.load_tools.load_tools"
] | [((2863, 2930), 'json.dumps', 'json.dumps', (["{'output': tool_output[0], 'exit_code': tool_output[1]}"], {}), "({'output': tool_output[0], 'exit_code': tool_output[1]})\n", (2873, 2930), False, 'import json\n'), ((4269, 4296), 'langchain.agents.load_tools.load_tools', 'load_tools', (["['python_repl']"], {}), "(['python_repl'])\n", (4279, 4296), False, 'from langchain.agents.load_tools import load_tools\n'), ((4369, 4404), 'json.dumps', 'json.dumps', (["{'result': tool_output}"], {}), "({'result': tool_output})\n", (4379, 4404), False, 'import json\n'), ((5711, 5736), 'langchain.agents.load_tools.load_tools', 'load_tools', (["['wikipedia']"], {}), "(['wikipedia'])\n", (5721, 5736), False, 'from langchain.agents.load_tools import load_tools\n'), ((5809, 5844), 'json.dumps', 'json.dumps', (["{'result': tool_output}"], {}), "({'result': tool_output})\n", (5819, 5844), False, 'import json\n'), ((7232, 7267), 'json.dumps', 'json.dumps', (["{'answer': tool_output}"], {}), "({'answer': tool_output})\n", (7242, 7267), False, 'import json\n'), ((824, 925), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)', 'check': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, shell=True, check=True, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n', (838, 925), False, 'import subprocess\n')] |
from typing import List, Optional, Any, Dict
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
from pydantic import Extra, root_validator
from sam.gpt.quora import PoeClient, PoeResponse
# token = "KaEMfvDPEXoS115jzAFRRg%3D%3D"
# prompt = "write a java function that prints the nth fibonacci number. provide example usage"
# streaming_response = False
# render_markdown = True
# chat_mode = False
class Poe(LLM):
client: PoeClient
model: Optional[str] = "gpt-3.5-turbo"
custom_model: bool = False
token: str
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
token = get_from_dict_or_env(
values, "token", "POE_COOKIE"
)
values["client"] = PoeClient(token)
return values
class Config:
extra = Extra.forbid
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
models = {
'sage': 'capybara',
'gpt-4': 'beaver',
'claude-v1.2': 'a2_2',
'claude-instant-v1.0': 'a2',
'gpt-3.5-turbo': 'chinchilla',
}
_model = models[self.model] if not self.custom_model else self.model
return {
"model": _model,
"token": self.token,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
return "poe"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
params = self._default_params
for chunk in self.client.send_message(params.model, prompt):
pass
response = PoeResponse(
{
'id': chunk['messageId'],
'object': 'text_completion',
'created': chunk['creationTime'],
'model': params.model,
'choices': [
{
'text': chunk['text'],
'index': 0,
'logprobs': None,
'finish_reason': 'stop',
}
],
'usage': {
'prompt_tokens': len(prompt),
'completion_tokens': len(chunk['text']),
'total_tokens': len(prompt) + len(chunk['text']),
},
}
)
text = response.completion.choices[0].text
return text
| [
"langchain.utils.get_from_dict_or_env"
] | [((573, 589), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (587, 589), False, 'from pydantic import Extra, root_validator\n'), ((663, 714), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""token"""', '"""POE_COOKIE"""'], {}), "(values, 'token', 'POE_COOKIE')\n", (683, 714), False, 'from langchain.utils import get_from_dict_or_env\n'), ((765, 781), 'sam.gpt.quora.PoeClient', 'PoeClient', (['token'], {}), '(token)\n', (774, 781), False, 'from sam.gpt.quora import PoeClient, PoeResponse\n')] |
from typing import List, Optional, Any, Dict
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
from pydantic import Extra, root_validator
from sam.gpt.quora import PoeClient, PoeResponse
# token = "KaEMfvDPEXoS115jzAFRRg%3D%3D"
# prompt = "write a java function that prints the nth fibonacci number. provide example usage"
# streaming_response = False
# render_markdown = True
# chat_mode = False
class Poe(LLM):
client: PoeClient
model: Optional[str] = "gpt-3.5-turbo"
custom_model: bool = False
token: str
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
token = get_from_dict_or_env(
values, "token", "POE_COOKIE"
)
values["client"] = PoeClient(token)
return values
class Config:
extra = Extra.forbid
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
models = {
'sage': 'capybara',
'gpt-4': 'beaver',
'claude-v1.2': 'a2_2',
'claude-instant-v1.0': 'a2',
'gpt-3.5-turbo': 'chinchilla',
}
_model = models[self.model] if not self.custom_model else self.model
return {
"model": _model,
"token": self.token,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
return "poe"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
params = self._default_params
for chunk in self.client.send_message(params.model, prompt):
pass
response = PoeResponse(
{
'id': chunk['messageId'],
'object': 'text_completion',
'created': chunk['creationTime'],
'model': params.model,
'choices': [
{
'text': chunk['text'],
'index': 0,
'logprobs': None,
'finish_reason': 'stop',
}
],
'usage': {
'prompt_tokens': len(prompt),
'completion_tokens': len(chunk['text']),
'total_tokens': len(prompt) + len(chunk['text']),
},
}
)
text = response.completion.choices[0].text
return text
| [
"langchain.utils.get_from_dict_or_env"
] | [((573, 589), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (587, 589), False, 'from pydantic import Extra, root_validator\n'), ((663, 714), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""token"""', '"""POE_COOKIE"""'], {}), "(values, 'token', 'POE_COOKIE')\n", (683, 714), False, 'from langchain.utils import get_from_dict_or_env\n'), ((765, 781), 'sam.gpt.quora.PoeClient', 'PoeClient', (['token'], {}), '(token)\n', (774, 781), False, 'from sam.gpt.quora import PoeClient, PoeResponse\n')] |
from __future__ import annotations
from typing import List, Optional
from pydantic import ValidationError
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.experimental.autonomous_agents.autogpt.output_parser import (
AutoGPTOutputParser,
BaseAutoGPTOutputParser,
)
from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import (
FINISH_NAME,
)
from langchain.schema import (
AIMessage,
BaseMessage,
Document,
HumanMessage,
SystemMessage,
)
from langchain.tools.base import BaseTool
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores.base import VectorStoreRetriever
class AutoGPT:
"""Agent class for interacting with Auto-GPT."""
def __init__(
self,
ai_name: str,
memory: VectorStoreRetriever,
chain: LLMChain,
output_parser: BaseAutoGPTOutputParser,
tools: List[BaseTool],
feedback_tool: Optional[HumanInputRun] = None,
):
self.ai_name = ai_name
self.memory = memory
self.full_message_history: List[BaseMessage] = []
self.next_action_count = 0
self.chain = chain
self.output_parser = output_parser
self.tools = tools
self.feedback_tool = feedback_tool
@classmethod
def from_llm_and_tools(
cls,
ai_name: str,
ai_role: str,
memory: VectorStoreRetriever,
tools: List[BaseTool],
llm: BaseChatModel,
human_in_the_loop: bool = False,
output_parser: Optional[BaseAutoGPTOutputParser] = None,
) -> AutoGPT:
prompt = AutoGPTPrompt(
ai_name=ai_name,
ai_role=ai_role,
tools=tools,
input_variables=["memory", "messages", "goals", "user_input"],
token_counter=llm.get_num_tokens,
)
human_feedback_tool = HumanInputRun() if human_in_the_loop else None
chain = LLMChain(llm=llm, prompt=prompt)
return cls(
ai_name,
memory,
chain,
output_parser or AutoGPTOutputParser(),
tools,
feedback_tool=human_feedback_tool,
)
def run(self, goals: List[str]) -> str:
user_input = (
"Determine which next command to use, "
"and respond using the format specified above:"
)
# Interaction Loop
loop_count = 0
while True:
# Discontinue if continuous limit is reached
loop_count += 1
# Send message to AI, get response
assistant_reply = self.chain.run(
goals=goals,
messages=self.full_message_history,
memory=self.memory,
user_input=user_input,
)
# Print Assistant thoughts
print(assistant_reply)
self.full_message_history.append(HumanMessage(content=user_input))
self.full_message_history.append(AIMessage(content=assistant_reply))
# Get command name and arguments
action = self.output_parser.parse(assistant_reply)
tools = {t.name: t for t in self.tools}
if action.name == FINISH_NAME:
return action.args["response"]
if action.name in tools:
tool = tools[action.name]
try:
observation = tool.run(action.args)
except ValidationError as e:
observation = (
f"Validation Error in args: {str(e)}, args: {action.args}"
)
except Exception as e:
observation = (
f"Error: {str(e)}, {type(e).__name__}, args: {action.args}"
)
result = f"Command {tool.name} returned: {observation}"
elif action.name == "ERROR":
result = f"Error: {action.args}. "
else:
result = (
f"Unknown command '{action.name}'. "
f"Please refer to the 'COMMANDS' list for available "
f"commands and only respond in the specified JSON format."
)
memory_to_add = (
f"Assistant Reply: {assistant_reply} " f"\nResult: {result} "
)
if self.feedback_tool is not None:
feedback = f"\n{self.feedback_tool.run('Input: ')}"
if feedback in {"q", "stop"}:
print("EXITING")
return "EXITING"
memory_to_add += feedback
self.memory.add_documents([Document(page_content=memory_to_add)])
self.full_message_history.append(SystemMessage(content=result))
| [
"langchain.chains.llm.LLMChain",
"langchain.tools.human.tool.HumanInputRun",
"langchain.schema.AIMessage",
"langchain.schema.HumanMessage",
"langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt",
"langchain.schema.Document",
"langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser",
"langchain.schema.SystemMessage"
] | [((1753, 1918), 'langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt', 'AutoGPTPrompt', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'tools': 'tools', 'input_variables': "['memory', 'messages', 'goals', 'user_input']", 'token_counter': 'llm.get_num_tokens'}), "(ai_name=ai_name, ai_role=ai_role, tools=tools,\n input_variables=['memory', 'messages', 'goals', 'user_input'],\n token_counter=llm.get_num_tokens)\n", (1766, 1918), False, 'from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt\n'), ((2075, 2107), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (2083, 2107), False, 'from langchain.chains.llm import LLMChain\n'), ((2012, 2027), 'langchain.tools.human.tool.HumanInputRun', 'HumanInputRun', ([], {}), '()\n', (2025, 2027), False, 'from langchain.tools.human.tool import HumanInputRun\n'), ((2217, 2238), 'langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser', 'AutoGPTOutputParser', ([], {}), '()\n', (2236, 2238), False, 'from langchain.experimental.autonomous_agents.autogpt.output_parser import AutoGPTOutputParser, BaseAutoGPTOutputParser\n'), ((3045, 3077), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user_input'}), '(content=user_input)\n', (3057, 3077), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((3124, 3158), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'assistant_reply'}), '(content=assistant_reply)\n', (3133, 3158), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4895, 4924), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'result'}), '(content=result)\n', (4908, 4924), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4811, 4847), 'langchain.schema.Document', 'Document', ([], {'page_content': 'memory_to_add'}), '(page_content=memory_to_add)\n', (4819, 4847), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n')] |
"""Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks
from langchain.chains import ReduceDocumentsChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.pydantic_v1 import Extra
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.text_splitter import TextSplitter
class MapReduceChain(Chain):
"""Map-reduce chain."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
text_splitter: TextSplitter
"""Text splitter to use."""
input_key: str = "input_text" #: :meta private:
output_key: str = "output_text" #: :meta private:
@classmethod
def from_params(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
text_splitter: TextSplitter,
callbacks: Callbacks = None,
combine_chain_kwargs: Optional[Mapping[str, Any]] = None,
reduce_chain_kwargs: Optional[Mapping[str, Any]] = None,
**kwargs: Any,
) -> MapReduceChain:
"""Construct a map-reduce chain that uses the chain for map and reduce."""
llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks)
stuff_chain = StuffDocumentsChain(
llm_chain=llm_chain,
callbacks=callbacks,
**(reduce_chain_kwargs if reduce_chain_kwargs else {}),
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=stuff_chain
)
combine_documents_chain = MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
callbacks=callbacks,
**(combine_chain_kwargs if combine_chain_kwargs else {}),
)
return cls(
combine_documents_chain=combine_documents_chain,
text_splitter=text_splitter,
callbacks=callbacks,
**kwargs,
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# Split the larger text into smaller chunks.
doc_text = inputs.pop(self.input_key)
texts = self.text_splitter.split_text(doc_text)
docs = [Document(page_content=text) for text in texts]
_inputs: Dict[str, Any] = {
**inputs,
self.combine_documents_chain.input_key: docs,
}
outputs = self.combine_documents_chain.run(
_inputs, callbacks=_run_manager.get_child()
)
return {self.output_key: outputs}
| [
"langchain.chains.llm.LLMChain",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager",
"langchain.chains.ReduceDocumentsChain",
"langchain.docstore.document.Document",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain"
] | [((1734, 1787), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callbacks': 'callbacks'}), '(llm=llm, prompt=prompt, callbacks=callbacks)\n', (1742, 1787), False, 'from langchain.chains.llm import LLMChain\n'), ((1810, 1930), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, callbacks=callbacks, **\n reduce_chain_kwargs if reduce_chain_kwargs else {})\n', (1829, 1930), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((2008, 2065), 'langchain.chains.ReduceDocumentsChain', 'ReduceDocumentsChain', ([], {'combine_documents_chain': 'stuff_chain'}), '(combine_documents_chain=stuff_chain)\n', (2028, 2065), False, 'from langchain.chains import ReduceDocumentsChain\n'), ((2122, 2299), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'reduce_documents_chain': 'reduce_documents_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, reduce_documents_chain=\n reduce_documents_chain, callbacks=callbacks, **combine_chain_kwargs if\n combine_chain_kwargs else {})\n', (2145, 2299), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((3177, 3222), 'langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (3220, 3222), False, 'from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks\n'), ((3394, 3421), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text'}), '(page_content=text)\n', (3402, 3421), False, 'from langchain.docstore.document import Document\n')] |
"""Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks
from langchain.chains import ReduceDocumentsChain
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.pydantic_v1 import Extra
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.text_splitter import TextSplitter
class MapReduceChain(Chain):
"""Map-reduce chain."""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine documents."""
text_splitter: TextSplitter
"""Text splitter to use."""
input_key: str = "input_text" #: :meta private:
output_key: str = "output_text" #: :meta private:
@classmethod
def from_params(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
text_splitter: TextSplitter,
callbacks: Callbacks = None,
combine_chain_kwargs: Optional[Mapping[str, Any]] = None,
reduce_chain_kwargs: Optional[Mapping[str, Any]] = None,
**kwargs: Any,
) -> MapReduceChain:
"""Construct a map-reduce chain that uses the chain for map and reduce."""
llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks)
stuff_chain = StuffDocumentsChain(
llm_chain=llm_chain,
callbacks=callbacks,
**(reduce_chain_kwargs if reduce_chain_kwargs else {}),
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=stuff_chain
)
combine_documents_chain = MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
callbacks=callbacks,
**(combine_chain_kwargs if combine_chain_kwargs else {}),
)
return cls(
combine_documents_chain=combine_documents_chain,
text_splitter=text_splitter,
callbacks=callbacks,
**kwargs,
)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# Split the larger text into smaller chunks.
doc_text = inputs.pop(self.input_key)
texts = self.text_splitter.split_text(doc_text)
docs = [Document(page_content=text) for text in texts]
_inputs: Dict[str, Any] = {
**inputs,
self.combine_documents_chain.input_key: docs,
}
outputs = self.combine_documents_chain.run(
_inputs, callbacks=_run_manager.get_child()
)
return {self.output_key: outputs}
| [
"langchain.chains.llm.LLMChain",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager",
"langchain.chains.ReduceDocumentsChain",
"langchain.docstore.document.Document",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain"
] | [((1734, 1787), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callbacks': 'callbacks'}), '(llm=llm, prompt=prompt, callbacks=callbacks)\n', (1742, 1787), False, 'from langchain.chains.llm import LLMChain\n'), ((1810, 1930), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, callbacks=callbacks, **\n reduce_chain_kwargs if reduce_chain_kwargs else {})\n', (1829, 1930), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((2008, 2065), 'langchain.chains.ReduceDocumentsChain', 'ReduceDocumentsChain', ([], {'combine_documents_chain': 'stuff_chain'}), '(combine_documents_chain=stuff_chain)\n', (2028, 2065), False, 'from langchain.chains import ReduceDocumentsChain\n'), ((2122, 2299), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'reduce_documents_chain': 'reduce_documents_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, reduce_documents_chain=\n reduce_documents_chain, callbacks=callbacks, **combine_chain_kwargs if\n combine_chain_kwargs else {})\n', (2145, 2299), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((3177, 3222), 'langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (3220, 3222), False, 'from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks\n'), ((3394, 3421), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text'}), '(page_content=text)\n', (3402, 3421), False, 'from langchain.docstore.document import Document\n')] |
from typing import Any, Dict, List, Optional, Sequence
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
class AlephAlpha(LLM):
"""Aleph Alpha large language models.
To use, you should have the ``aleph_alpha_client`` python package installed, and the
environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Parameters are explained more in depth here:
https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10
Example:
.. code-block:: python
from langchain.llms import AlephAlpha
aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key")
"""
client: Any #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
maximum_tokens: int = 64
"""The maximum number of tokens to be generated."""
temperature: float = 0.0
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int = 0
"""Number of most likely tokens to consider at each step."""
top_p: float = 0.0
"""Total probability mass of tokens to consider at each step."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency."""
repetition_penalties_include_prompt: Optional[bool] = False
"""Flag deciding whether presence penalty or frequency penalty are
updated from the prompt."""
use_multiplicative_presence_penalty: Optional[bool] = False
"""Flag deciding whether presence penalty is applied
multiplicatively (True) or additively (False)."""
penalty_bias: Optional[str] = None
"""Penalty bias for the completion."""
penalty_exceptions: Optional[List[str]] = None
"""List of strings that may be generated without penalty,
regardless of other penalty settings"""
penalty_exceptions_include_stop_sequences: Optional[bool] = None
"""Should stop_sequences be included in penalty_exceptions."""
best_of: Optional[int] = None
"""returns the one with the "best of" results
(highest log probability per token)
"""
n: int = 1
"""How many completions to generate for each prompt."""
logit_bias: Optional[Dict[int, float]] = None
"""The logit bias allows to influence the likelihood of generating tokens."""
log_probs: Optional[int] = None
"""Number of top log probabilities to be returned for each generated token."""
tokens: Optional[bool] = False
"""return tokens of completion."""
disable_optimizations: Optional[bool] = False
minimum_tokens: Optional[int] = 0
"""Generate at least this number of tokens."""
echo: bool = False
"""Echo the prompt in the completion."""
use_multiplicative_frequency_penalty: bool = False
sequence_penalty: float = 0.0
sequence_penalty_min_length: int = 2
use_multiplicative_sequence_penalty: bool = False
completion_bias_inclusion: Optional[Sequence[str]] = None
completion_bias_inclusion_first_token_only: bool = False
completion_bias_exclusion: Optional[Sequence[str]] = None
completion_bias_exclusion_first_token_only: bool = False
"""Only consider the first token for the completion_bias_exclusion."""
contextual_control_threshold: Optional[float] = None
"""If set to None, attention control parameters only apply to those tokens that have
explicitly been set in the request.
If set to a non-None value, control parameters are also applied to similar tokens.
"""
control_log_additive: Optional[bool] = True
"""True: apply control by adding the log(control_factor) to attention scores.
False: (attention_scores - - attention_scores.min(-1)) * control_factor
"""
repetition_penalties_include_completion: bool = True
"""Flag deciding whether presence penalty or frequency penalty
are updated from the completion."""
raw_completion: bool = False
"""Force the raw completion of the model to be returned."""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
# Client params
aleph_alpha_api_key: Optional[str] = None
"""API key for Aleph Alpha API."""
host: str = "https://api.aleph-alpha.com"
"""The hostname of the API host.
The default one is "https://api.aleph-alpha.com")"""
hosting: Optional[str] = None
"""Determines in which datacenters the request may be processed.
You can either set the parameter to "aleph-alpha" or omit it (defaulting to None).
Not setting this value, or setting it to None, gives us maximal
flexibility in processing your request in our
own datacenters and on servers hosted with other providers.
Choose this option for maximal availability.
Setting it to "aleph-alpha" allows us to only process the
request in our own datacenters.
Choose this option for maximal data privacy."""
request_timeout_seconds: int = 305
"""Client timeout that will be set for HTTP requests in the
`requests` library's API calls.
Server will close all requests after 300 seconds with an internal server error."""
total_retries: int = 8
"""The number of retries made in case requests fail with certain retryable
status codes. If the last
retry fails a corresponding exception is raised. Note, that between retries
an exponential backoff
is applied, starting with 0.5 s after the first retry and doubling for
each retry made. So with the
default setting of 8 retries a total wait time of 63.5 s is added
between the retries."""
nice: bool = False
"""Setting this to True, will signal to the API that you intend to be
nice to other users
by de-prioritizing your request below concurrent ones."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
from aleph_alpha_client import Client
values["client"] = Client(
token=aleph_alpha_api_key,
host=values["host"],
hosting=values["hosting"],
request_timeout_seconds=values["request_timeout_seconds"],
total_retries=values["total_retries"],
nice=values["nice"],
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Aleph Alpha API."""
return {
"maximum_tokens": self.maximum_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"n": self.n,
"repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501
"use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501
"penalty_bias": self.penalty_bias,
"penalty_exceptions": self.penalty_exceptions,
"penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501
"best_of": self.best_of,
"logit_bias": self.logit_bias,
"log_probs": self.log_probs,
"tokens": self.tokens,
"disable_optimizations": self.disable_optimizations,
"minimum_tokens": self.minimum_tokens,
"echo": self.echo,
"use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501
"sequence_penalty": self.sequence_penalty,
"sequence_penalty_min_length": self.sequence_penalty_min_length,
"use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501
"completion_bias_inclusion": self.completion_bias_inclusion,
"completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501
"completion_bias_exclusion": self.completion_bias_exclusion,
"completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
"repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501
"raw_completion": self.raw_completion,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "aleph_alpha"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aleph_alpha("Tell me a joke.")
"""
from aleph_alpha_client import CompletionRequest, Prompt
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop
params = {**params, **kwargs}
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
# If stop tokens are provided, Aleph Alpha's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
if __name__ == "__main__":
aa = AlephAlpha()
print(aa("How are you?"))
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.pydantic_v1.root_validator",
"langchain.utils.get_from_dict_or_env"
] | [((6229, 6245), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (6243, 6245), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((6411, 6485), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""aleph_alpha_api_key"""', '"""ALEPH_ALPHA_API_KEY"""'], {}), "(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY')\n", (6431, 6485), False, 'from langchain.utils import get_from_dict_or_env\n'), ((6603, 6812), 'aleph_alpha_client.Client', 'Client', ([], {'token': 'aleph_alpha_api_key', 'host': "values['host']", 'hosting': "values['hosting']", 'request_timeout_seconds': "values['request_timeout_seconds']", 'total_retries': "values['total_retries']", 'nice': "values['nice']"}), "(token=aleph_alpha_api_key, host=values['host'], hosting=values[\n 'hosting'], request_timeout_seconds=values['request_timeout_seconds'],\n total_retries=values['total_retries'], nice=values['nice'])\n", (6609, 6812), False, 'from aleph_alpha_client import Client\n'), ((11210, 11261), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (11229, 11261), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((10810, 10834), 'aleph_alpha_client.Prompt.from_text', 'Prompt.from_text', (['prompt'], {}), '(prompt)\n', (10826, 10834), False, 'from aleph_alpha_client import CompletionRequest, Prompt\n')] |
from typing import Any, Dict, List, Optional, Sequence
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
class AlephAlpha(LLM):
"""Aleph Alpha large language models.
To use, you should have the ``aleph_alpha_client`` python package installed, and the
environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Parameters are explained more in depth here:
https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10
Example:
.. code-block:: python
from langchain.llms import AlephAlpha
aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key")
"""
client: Any #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
maximum_tokens: int = 64
"""The maximum number of tokens to be generated."""
temperature: float = 0.0
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int = 0
"""Number of most likely tokens to consider at each step."""
top_p: float = 0.0
"""Total probability mass of tokens to consider at each step."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency."""
repetition_penalties_include_prompt: Optional[bool] = False
"""Flag deciding whether presence penalty or frequency penalty are
updated from the prompt."""
use_multiplicative_presence_penalty: Optional[bool] = False
"""Flag deciding whether presence penalty is applied
multiplicatively (True) or additively (False)."""
penalty_bias: Optional[str] = None
"""Penalty bias for the completion."""
penalty_exceptions: Optional[List[str]] = None
"""List of strings that may be generated without penalty,
regardless of other penalty settings"""
penalty_exceptions_include_stop_sequences: Optional[bool] = None
"""Should stop_sequences be included in penalty_exceptions."""
best_of: Optional[int] = None
"""returns the one with the "best of" results
(highest log probability per token)
"""
n: int = 1
"""How many completions to generate for each prompt."""
logit_bias: Optional[Dict[int, float]] = None
"""The logit bias allows to influence the likelihood of generating tokens."""
log_probs: Optional[int] = None
"""Number of top log probabilities to be returned for each generated token."""
tokens: Optional[bool] = False
"""return tokens of completion."""
disable_optimizations: Optional[bool] = False
minimum_tokens: Optional[int] = 0
"""Generate at least this number of tokens."""
echo: bool = False
"""Echo the prompt in the completion."""
use_multiplicative_frequency_penalty: bool = False
sequence_penalty: float = 0.0
sequence_penalty_min_length: int = 2
use_multiplicative_sequence_penalty: bool = False
completion_bias_inclusion: Optional[Sequence[str]] = None
completion_bias_inclusion_first_token_only: bool = False
completion_bias_exclusion: Optional[Sequence[str]] = None
completion_bias_exclusion_first_token_only: bool = False
"""Only consider the first token for the completion_bias_exclusion."""
contextual_control_threshold: Optional[float] = None
"""If set to None, attention control parameters only apply to those tokens that have
explicitly been set in the request.
If set to a non-None value, control parameters are also applied to similar tokens.
"""
control_log_additive: Optional[bool] = True
"""True: apply control by adding the log(control_factor) to attention scores.
False: (attention_scores - - attention_scores.min(-1)) * control_factor
"""
repetition_penalties_include_completion: bool = True
"""Flag deciding whether presence penalty or frequency penalty
are updated from the completion."""
raw_completion: bool = False
"""Force the raw completion of the model to be returned."""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
# Client params
aleph_alpha_api_key: Optional[str] = None
"""API key for Aleph Alpha API."""
host: str = "https://api.aleph-alpha.com"
"""The hostname of the API host.
The default one is "https://api.aleph-alpha.com")"""
hosting: Optional[str] = None
"""Determines in which datacenters the request may be processed.
You can either set the parameter to "aleph-alpha" or omit it (defaulting to None).
Not setting this value, or setting it to None, gives us maximal
flexibility in processing your request in our
own datacenters and on servers hosted with other providers.
Choose this option for maximal availability.
Setting it to "aleph-alpha" allows us to only process the
request in our own datacenters.
Choose this option for maximal data privacy."""
request_timeout_seconds: int = 305
"""Client timeout that will be set for HTTP requests in the
`requests` library's API calls.
Server will close all requests after 300 seconds with an internal server error."""
total_retries: int = 8
"""The number of retries made in case requests fail with certain retryable
status codes. If the last
retry fails a corresponding exception is raised. Note, that between retries
an exponential backoff
is applied, starting with 0.5 s after the first retry and doubling for
each retry made. So with the
default setting of 8 retries a total wait time of 63.5 s is added
between the retries."""
nice: bool = False
"""Setting this to True, will signal to the API that you intend to be
nice to other users
by de-prioritizing your request below concurrent ones."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
from aleph_alpha_client import Client
values["client"] = Client(
token=aleph_alpha_api_key,
host=values["host"],
hosting=values["hosting"],
request_timeout_seconds=values["request_timeout_seconds"],
total_retries=values["total_retries"],
nice=values["nice"],
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Aleph Alpha API."""
return {
"maximum_tokens": self.maximum_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"n": self.n,
"repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501
"use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501
"penalty_bias": self.penalty_bias,
"penalty_exceptions": self.penalty_exceptions,
"penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501
"best_of": self.best_of,
"logit_bias": self.logit_bias,
"log_probs": self.log_probs,
"tokens": self.tokens,
"disable_optimizations": self.disable_optimizations,
"minimum_tokens": self.minimum_tokens,
"echo": self.echo,
"use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501
"sequence_penalty": self.sequence_penalty,
"sequence_penalty_min_length": self.sequence_penalty_min_length,
"use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501
"completion_bias_inclusion": self.completion_bias_inclusion,
"completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501
"completion_bias_exclusion": self.completion_bias_exclusion,
"completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
"repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501
"raw_completion": self.raw_completion,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "aleph_alpha"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aleph_alpha("Tell me a joke.")
"""
from aleph_alpha_client import CompletionRequest, Prompt
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop
params = {**params, **kwargs}
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
# If stop tokens are provided, Aleph Alpha's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
if __name__ == "__main__":
aa = AlephAlpha()
print(aa("How are you?"))
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.pydantic_v1.root_validator",
"langchain.utils.get_from_dict_or_env"
] | [((6229, 6245), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (6243, 6245), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((6411, 6485), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""aleph_alpha_api_key"""', '"""ALEPH_ALPHA_API_KEY"""'], {}), "(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY')\n", (6431, 6485), False, 'from langchain.utils import get_from_dict_or_env\n'), ((6603, 6812), 'aleph_alpha_client.Client', 'Client', ([], {'token': 'aleph_alpha_api_key', 'host': "values['host']", 'hosting': "values['hosting']", 'request_timeout_seconds': "values['request_timeout_seconds']", 'total_retries': "values['total_retries']", 'nice': "values['nice']"}), "(token=aleph_alpha_api_key, host=values['host'], hosting=values[\n 'hosting'], request_timeout_seconds=values['request_timeout_seconds'],\n total_retries=values['total_retries'], nice=values['nice'])\n", (6609, 6812), False, 'from aleph_alpha_client import Client\n'), ((11210, 11261), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (11229, 11261), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((10810, 10834), 'aleph_alpha_client.Prompt.from_text', 'Prompt.from_text', (['prompt'], {}), '(prompt)\n', (10826, 10834), False, 'from aleph_alpha_client import CompletionRequest, Prompt\n')] |
from typing import Any, Dict, List, Optional, Sequence
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
class AlephAlpha(LLM):
"""Aleph Alpha large language models.
To use, you should have the ``aleph_alpha_client`` python package installed, and the
environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Parameters are explained more in depth here:
https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10
Example:
.. code-block:: python
from langchain.llms import AlephAlpha
aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key")
"""
client: Any #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
maximum_tokens: int = 64
"""The maximum number of tokens to be generated."""
temperature: float = 0.0
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int = 0
"""Number of most likely tokens to consider at each step."""
top_p: float = 0.0
"""Total probability mass of tokens to consider at each step."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency."""
repetition_penalties_include_prompt: Optional[bool] = False
"""Flag deciding whether presence penalty or frequency penalty are
updated from the prompt."""
use_multiplicative_presence_penalty: Optional[bool] = False
"""Flag deciding whether presence penalty is applied
multiplicatively (True) or additively (False)."""
penalty_bias: Optional[str] = None
"""Penalty bias for the completion."""
penalty_exceptions: Optional[List[str]] = None
"""List of strings that may be generated without penalty,
regardless of other penalty settings"""
penalty_exceptions_include_stop_sequences: Optional[bool] = None
"""Should stop_sequences be included in penalty_exceptions."""
best_of: Optional[int] = None
"""returns the one with the "best of" results
(highest log probability per token)
"""
n: int = 1
"""How many completions to generate for each prompt."""
logit_bias: Optional[Dict[int, float]] = None
"""The logit bias allows to influence the likelihood of generating tokens."""
log_probs: Optional[int] = None
"""Number of top log probabilities to be returned for each generated token."""
tokens: Optional[bool] = False
"""return tokens of completion."""
disable_optimizations: Optional[bool] = False
minimum_tokens: Optional[int] = 0
"""Generate at least this number of tokens."""
echo: bool = False
"""Echo the prompt in the completion."""
use_multiplicative_frequency_penalty: bool = False
sequence_penalty: float = 0.0
sequence_penalty_min_length: int = 2
use_multiplicative_sequence_penalty: bool = False
completion_bias_inclusion: Optional[Sequence[str]] = None
completion_bias_inclusion_first_token_only: bool = False
completion_bias_exclusion: Optional[Sequence[str]] = None
completion_bias_exclusion_first_token_only: bool = False
"""Only consider the first token for the completion_bias_exclusion."""
contextual_control_threshold: Optional[float] = None
"""If set to None, attention control parameters only apply to those tokens that have
explicitly been set in the request.
If set to a non-None value, control parameters are also applied to similar tokens.
"""
control_log_additive: Optional[bool] = True
"""True: apply control by adding the log(control_factor) to attention scores.
False: (attention_scores - - attention_scores.min(-1)) * control_factor
"""
repetition_penalties_include_completion: bool = True
"""Flag deciding whether presence penalty or frequency penalty
are updated from the completion."""
raw_completion: bool = False
"""Force the raw completion of the model to be returned."""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
# Client params
aleph_alpha_api_key: Optional[str] = None
"""API key for Aleph Alpha API."""
host: str = "https://api.aleph-alpha.com"
"""The hostname of the API host.
The default one is "https://api.aleph-alpha.com")"""
hosting: Optional[str] = None
"""Determines in which datacenters the request may be processed.
You can either set the parameter to "aleph-alpha" or omit it (defaulting to None).
Not setting this value, or setting it to None, gives us maximal
flexibility in processing your request in our
own datacenters and on servers hosted with other providers.
Choose this option for maximal availability.
Setting it to "aleph-alpha" allows us to only process the
request in our own datacenters.
Choose this option for maximal data privacy."""
request_timeout_seconds: int = 305
"""Client timeout that will be set for HTTP requests in the
`requests` library's API calls.
Server will close all requests after 300 seconds with an internal server error."""
total_retries: int = 8
"""The number of retries made in case requests fail with certain retryable
status codes. If the last
retry fails a corresponding exception is raised. Note, that between retries
an exponential backoff
is applied, starting with 0.5 s after the first retry and doubling for
each retry made. So with the
default setting of 8 retries a total wait time of 63.5 s is added
between the retries."""
nice: bool = False
"""Setting this to True, will signal to the API that you intend to be
nice to other users
by de-prioritizing your request below concurrent ones."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
from aleph_alpha_client import Client
values["client"] = Client(
token=aleph_alpha_api_key,
host=values["host"],
hosting=values["hosting"],
request_timeout_seconds=values["request_timeout_seconds"],
total_retries=values["total_retries"],
nice=values["nice"],
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Aleph Alpha API."""
return {
"maximum_tokens": self.maximum_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"n": self.n,
"repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501
"use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501
"penalty_bias": self.penalty_bias,
"penalty_exceptions": self.penalty_exceptions,
"penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501
"best_of": self.best_of,
"logit_bias": self.logit_bias,
"log_probs": self.log_probs,
"tokens": self.tokens,
"disable_optimizations": self.disable_optimizations,
"minimum_tokens": self.minimum_tokens,
"echo": self.echo,
"use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501
"sequence_penalty": self.sequence_penalty,
"sequence_penalty_min_length": self.sequence_penalty_min_length,
"use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501
"completion_bias_inclusion": self.completion_bias_inclusion,
"completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501
"completion_bias_exclusion": self.completion_bias_exclusion,
"completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
"repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501
"raw_completion": self.raw_completion,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "aleph_alpha"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aleph_alpha("Tell me a joke.")
"""
from aleph_alpha_client import CompletionRequest, Prompt
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop
params = {**params, **kwargs}
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
# If stop tokens are provided, Aleph Alpha's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
if __name__ == "__main__":
aa = AlephAlpha()
print(aa("How are you?"))
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.pydantic_v1.root_validator",
"langchain.utils.get_from_dict_or_env"
] | [((6229, 6245), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (6243, 6245), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((6411, 6485), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""aleph_alpha_api_key"""', '"""ALEPH_ALPHA_API_KEY"""'], {}), "(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY')\n", (6431, 6485), False, 'from langchain.utils import get_from_dict_or_env\n'), ((6603, 6812), 'aleph_alpha_client.Client', 'Client', ([], {'token': 'aleph_alpha_api_key', 'host': "values['host']", 'hosting': "values['hosting']", 'request_timeout_seconds': "values['request_timeout_seconds']", 'total_retries': "values['total_retries']", 'nice': "values['nice']"}), "(token=aleph_alpha_api_key, host=values['host'], hosting=values[\n 'hosting'], request_timeout_seconds=values['request_timeout_seconds'],\n total_retries=values['total_retries'], nice=values['nice'])\n", (6609, 6812), False, 'from aleph_alpha_client import Client\n'), ((11210, 11261), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (11229, 11261), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((10810, 10834), 'aleph_alpha_client.Prompt.from_text', 'Prompt.from_text', (['prompt'], {}), '(prompt)\n', (10826, 10834), False, 'from aleph_alpha_client import CompletionRequest, Prompt\n')] |
from typing import Any, Dict, List, Optional, Sequence
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
class AlephAlpha(LLM):
"""Aleph Alpha large language models.
To use, you should have the ``aleph_alpha_client`` python package installed, and the
environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Parameters are explained more in depth here:
https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10
Example:
.. code-block:: python
from langchain.llms import AlephAlpha
aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key")
"""
client: Any #: :meta private:
model: Optional[str] = "luminous-base"
"""Model name to use."""
maximum_tokens: int = 64
"""The maximum number of tokens to be generated."""
temperature: float = 0.0
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int = 0
"""Number of most likely tokens to consider at each step."""
top_p: float = 0.0
"""Total probability mass of tokens to consider at each step."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency."""
repetition_penalties_include_prompt: Optional[bool] = False
"""Flag deciding whether presence penalty or frequency penalty are
updated from the prompt."""
use_multiplicative_presence_penalty: Optional[bool] = False
"""Flag deciding whether presence penalty is applied
multiplicatively (True) or additively (False)."""
penalty_bias: Optional[str] = None
"""Penalty bias for the completion."""
penalty_exceptions: Optional[List[str]] = None
"""List of strings that may be generated without penalty,
regardless of other penalty settings"""
penalty_exceptions_include_stop_sequences: Optional[bool] = None
"""Should stop_sequences be included in penalty_exceptions."""
best_of: Optional[int] = None
"""returns the one with the "best of" results
(highest log probability per token)
"""
n: int = 1
"""How many completions to generate for each prompt."""
logit_bias: Optional[Dict[int, float]] = None
"""The logit bias allows to influence the likelihood of generating tokens."""
log_probs: Optional[int] = None
"""Number of top log probabilities to be returned for each generated token."""
tokens: Optional[bool] = False
"""return tokens of completion."""
disable_optimizations: Optional[bool] = False
minimum_tokens: Optional[int] = 0
"""Generate at least this number of tokens."""
echo: bool = False
"""Echo the prompt in the completion."""
use_multiplicative_frequency_penalty: bool = False
sequence_penalty: float = 0.0
sequence_penalty_min_length: int = 2
use_multiplicative_sequence_penalty: bool = False
completion_bias_inclusion: Optional[Sequence[str]] = None
completion_bias_inclusion_first_token_only: bool = False
completion_bias_exclusion: Optional[Sequence[str]] = None
completion_bias_exclusion_first_token_only: bool = False
"""Only consider the first token for the completion_bias_exclusion."""
contextual_control_threshold: Optional[float] = None
"""If set to None, attention control parameters only apply to those tokens that have
explicitly been set in the request.
If set to a non-None value, control parameters are also applied to similar tokens.
"""
control_log_additive: Optional[bool] = True
"""True: apply control by adding the log(control_factor) to attention scores.
False: (attention_scores - - attention_scores.min(-1)) * control_factor
"""
repetition_penalties_include_completion: bool = True
"""Flag deciding whether presence penalty or frequency penalty
are updated from the completion."""
raw_completion: bool = False
"""Force the raw completion of the model to be returned."""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
# Client params
aleph_alpha_api_key: Optional[str] = None
"""API key for Aleph Alpha API."""
host: str = "https://api.aleph-alpha.com"
"""The hostname of the API host.
The default one is "https://api.aleph-alpha.com")"""
hosting: Optional[str] = None
"""Determines in which datacenters the request may be processed.
You can either set the parameter to "aleph-alpha" or omit it (defaulting to None).
Not setting this value, or setting it to None, gives us maximal
flexibility in processing your request in our
own datacenters and on servers hosted with other providers.
Choose this option for maximal availability.
Setting it to "aleph-alpha" allows us to only process the
request in our own datacenters.
Choose this option for maximal data privacy."""
request_timeout_seconds: int = 305
"""Client timeout that will be set for HTTP requests in the
`requests` library's API calls.
Server will close all requests after 300 seconds with an internal server error."""
total_retries: int = 8
"""The number of retries made in case requests fail with certain retryable
status codes. If the last
retry fails a corresponding exception is raised. Note, that between retries
an exponential backoff
is applied, starting with 0.5 s after the first retry and doubling for
each retry made. So with the
default setting of 8 retries a total wait time of 63.5 s is added
between the retries."""
nice: bool = False
"""Setting this to True, will signal to the API that you intend to be
nice to other users
by de-prioritizing your request below concurrent ones."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
aleph_alpha_api_key = get_from_dict_or_env(
values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY"
)
try:
from aleph_alpha_client import Client
values["client"] = Client(
token=aleph_alpha_api_key,
host=values["host"],
hosting=values["hosting"],
request_timeout_seconds=values["request_timeout_seconds"],
total_retries=values["total_retries"],
nice=values["nice"],
)
except ImportError:
raise ImportError(
"Could not import aleph_alpha_client python package. "
"Please install it with `pip install aleph_alpha_client`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling the Aleph Alpha API."""
return {
"maximum_tokens": self.maximum_tokens,
"temperature": self.temperature,
"top_k": self.top_k,
"top_p": self.top_p,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"n": self.n,
"repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501
"use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501
"penalty_bias": self.penalty_bias,
"penalty_exceptions": self.penalty_exceptions,
"penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501
"best_of": self.best_of,
"logit_bias": self.logit_bias,
"log_probs": self.log_probs,
"tokens": self.tokens,
"disable_optimizations": self.disable_optimizations,
"minimum_tokens": self.minimum_tokens,
"echo": self.echo,
"use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501
"sequence_penalty": self.sequence_penalty,
"sequence_penalty_min_length": self.sequence_penalty_min_length,
"use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501
"completion_bias_inclusion": self.completion_bias_inclusion,
"completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501
"completion_bias_exclusion": self.completion_bias_exclusion,
"completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501
"contextual_control_threshold": self.contextual_control_threshold,
"control_log_additive": self.control_log_additive,
"repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501
"raw_completion": self.raw_completion,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "aleph_alpha"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Aleph Alpha's completion endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = aleph_alpha("Tell me a joke.")
"""
from aleph_alpha_client import CompletionRequest, Prompt
params = self._default_params
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
params["stop_sequences"] = self.stop_sequences
else:
params["stop_sequences"] = stop
params = {**params, **kwargs}
request = CompletionRequest(prompt=Prompt.from_text(prompt), **params)
response = self.client.complete(model=self.model, request=request)
text = response.completions[0].completion
# If stop tokens are provided, Aleph Alpha's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop is not None or self.stop_sequences is not None:
text = enforce_stop_tokens(text, params["stop_sequences"])
return text
if __name__ == "__main__":
aa = AlephAlpha()
print(aa("How are you?"))
| [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.pydantic_v1.root_validator",
"langchain.utils.get_from_dict_or_env"
] | [((6229, 6245), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (6243, 6245), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((6411, 6485), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""aleph_alpha_api_key"""', '"""ALEPH_ALPHA_API_KEY"""'], {}), "(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY')\n", (6431, 6485), False, 'from langchain.utils import get_from_dict_or_env\n'), ((6603, 6812), 'aleph_alpha_client.Client', 'Client', ([], {'token': 'aleph_alpha_api_key', 'host': "values['host']", 'hosting': "values['hosting']", 'request_timeout_seconds': "values['request_timeout_seconds']", 'total_retries': "values['total_retries']", 'nice': "values['nice']"}), "(token=aleph_alpha_api_key, host=values['host'], hosting=values[\n 'hosting'], request_timeout_seconds=values['request_timeout_seconds'],\n total_retries=values['total_retries'], nice=values['nice'])\n", (6609, 6812), False, 'from aleph_alpha_client import Client\n'), ((11210, 11261), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (11229, 11261), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((10810, 10834), 'aleph_alpha_client.Prompt.from_text', 'Prompt.from_text', (['prompt'], {}), '(prompt)\n', (10826, 10834), False, 'from aleph_alpha_client import CompletionRequest, Prompt\n')] |
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chains import VectorDBQA
from langchain.document_loaders import TextLoader
from typing import List
from langchain.schema import Document
import os
os.environ['OPENAI_API_KEY'] = "your-api-key"
class Genie:
def __init__(self, file_path: str):
self.file_path = file_path
self.loader = TextLoader(self.file_path)
self.documents = self.loader.load()
self.texts = self.text_split(self.documents)
self.vectordb = self.embeddings(self.texts)
self.genie = VectorDBQA.from_chain_type(llm=OpenAI(), chain_type="stuff", vectorstore=self.vectordb)
@staticmethod
def text_split(documents: TextLoader):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
return texts
@staticmethod
def embeddings(texts: List[Document]):
embeddings = OpenAIEmbeddings()
vectordb = Chroma.from_documents(texts, embeddings)
return vectordb
def ask(self, query: str):
return self.genie.run(query)
if __name__ == "__main__":
genie = Genie("example.txt")
print(genie.ask("How is the wheater like?")) | [
"langchain.vectorstores.Chroma.from_documents",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.OpenAI",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.document_loaders.TextLoader"
] | [((515, 541), 'langchain.document_loaders.TextLoader', 'TextLoader', (['self.file_path'], {}), '(self.file_path)\n', (525, 541), False, 'from langchain.document_loaders import TextLoader\n'), ((886, 950), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (916, 950), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1112, 1130), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1128, 1130), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1150, 1190), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['texts', 'embeddings'], {}), '(texts, embeddings)\n', (1171, 1190), False, 'from langchain.vectorstores import Chroma\n'), ((743, 751), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (749, 751), False, 'from langchain.llms import OpenAI\n')] |
import logging
from pathlib import Path
from typing import List, Optional, Tuple
from dotenv import load_dotenv
load_dotenv()
from queue import Empty, Queue
from threading import Thread
import gradio as gr
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage
from callback import QueueCallback
MODELS_NAMES = ["gpt-3.5-turbo", "gpt-4"]
DEFAULT_TEMPERATURE = 0.7
ChatHistory = List[str]
logging.basicConfig(
format="[%(asctime)s %(levelname)s]: %(message)s", level=logging.INFO
)
# load up our system prompt
default_system_prompt = Path("prompts/system.prompt").read_text()
# for the human, we will just inject the text
human_message_prompt_template = HumanMessagePromptTemplate.from_template("{text}")
def on_message_button_click(
chat: Optional[ChatOpenAI],
message: str,
chatbot_messages: ChatHistory,
messages: List[BaseMessage],
) -> Tuple[ChatOpenAI, str, ChatHistory, List[BaseMessage]]:
if chat is None:
# in the queue we will store our streamed tokens
queue = Queue()
# let's create our default chat
chat = ChatOpenAI(
model_name=MODELS_NAMES[0],
temperature=DEFAULT_TEMPERATURE,
streaming=True,
callbacks=([QueueCallback(queue)]),
)
else:
# hacky way to get the queue back
queue = chat.callbacks[0].queue
job_done = object()
logging.info(f"Asking question to GPT, messages={messages}")
# let's add the messages to our stuff
messages.append(HumanMessage(content=message))
chatbot_messages.append((message, ""))
# this is a little wrapper we need cuz we have to add the job_done
def task():
chat(messages)
queue.put(job_done)
# now let's start a thread and run the generation inside it
t = Thread(target=task)
t.start()
# this will hold the content as we generate
content = ""
# now, we read the next_token from queue and do what it has to be done
while True:
try:
next_token = queue.get(True, timeout=1)
if next_token is job_done:
break
content += next_token
chatbot_messages[-1] = (message, content)
yield chat, "", chatbot_messages, messages
except Empty:
continue
# finally we can add our reply to messsages
messages.append(AIMessage(content=content))
logging.debug(f"reply = {content}")
logging.info(f"Done!")
return chat, "", chatbot_messages, messages
def system_prompt_handler(value: str) -> str:
return value
def on_clear_button_click(system_prompt: str) -> Tuple[str, List, List]:
return "", [], [SystemMessage(content=system_prompt)]
def on_apply_settings_button_click(
system_prompt: str, model_name: str, temperature: float
):
logging.info(
f"Applying settings: model_name={model_name}, temperature={temperature}"
)
chat = ChatOpenAI(
model_name=model_name,
temperature=temperature,
streaming=True,
callbacks=[QueueCallback(Queue())],
)
# don't forget to nuke our queue
chat.callbacks[0].queue.empty()
return chat, *on_clear_button_click(system_prompt)
# some css why not, "borrowed" from https://huggingface.co/spaces/ysharma/Gradio-demo-streaming/blob/main/app.py
with gr.Blocks(
css="""#col_container {width: 700px; margin-left: auto; margin-right: auto;}
#chatbot {height: 400px; overflow: auto;}"""
) as demo:
system_prompt = gr.State(default_system_prompt)
# here we keep our state so multiple user can use the app at the same time!
messages = gr.State([SystemMessage(content=default_system_prompt)])
# same thing for the chat, we want one chat per use so callbacks are unique I guess
chat = gr.State(None)
with gr.Column(elem_id="col_container"):
gr.Markdown("# Welcome to GradioGPT! 🌟🚀")
gr.Markdown(
"An easy to use template. It comes with state and settings managment"
)
with gr.Column():
system_prompt_area = gr.TextArea(
default_system_prompt, lines=4, label="system prompt", interactive=True
)
# we store the value into the state to avoid re rendering of the area
system_prompt_area.input(
system_prompt_handler,
inputs=[system_prompt_area],
outputs=[system_prompt],
)
system_prompt_button = gr.Button("Set")
chatbot = gr.Chatbot()
with gr.Column():
message = gr.Textbox(label="chat input")
message.submit(
on_message_button_click,
[chat, message, chatbot, messages],
[chat, message, chatbot, messages],
queue=True,
)
message_button = gr.Button("Submit", variant="primary")
message_button.click(
on_message_button_click,
[chat, message, chatbot, messages],
[chat, message, chatbot, messages],
)
with gr.Row():
with gr.Column():
clear_button = gr.Button("Clear")
clear_button.click(
on_clear_button_click,
[system_prompt],
[message, chatbot, messages],
queue=False,
)
with gr.Accordion("Settings", open=False):
model_name = gr.Dropdown(
choices=MODELS_NAMES, value=MODELS_NAMES[0], label="model"
)
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.7,
step=0.1,
label="temperature",
interactive=True,
)
apply_settings_button = gr.Button("Apply")
apply_settings_button.click(
on_apply_settings_button_click,
[system_prompt, model_name, temperature],
[chat, message, chatbot, messages],
)
system_prompt_button.click(
on_apply_settings_button_click,
[system_prompt, model_name, temperature],
[chat, message, chatbot, messages],
)
demo.queue()
demo.launch()
| [
"langchain.schema.AIMessage",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.schema.HumanMessage",
"langchain.schema.SystemMessage"
] | [((114, 127), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (125, 127), False, 'from dotenv import load_dotenv\n'), ((604, 698), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s %(levelname)s]: %(message)s"""', 'level': 'logging.INFO'}), "(format='[%(asctime)s %(levelname)s]: %(message)s',\n level=logging.INFO)\n", (623, 698), False, 'import logging\n'), ((873, 923), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (913, 923), False, 'from langchain.prompts import HumanMessagePromptTemplate\n'), ((1596, 1656), 'logging.info', 'logging.info', (['f"""Asking question to GPT, messages={messages}"""'], {}), "(f'Asking question to GPT, messages={messages}')\n", (1608, 1656), False, 'import logging\n'), ((2004, 2023), 'threading.Thread', 'Thread', ([], {'target': 'task'}), '(target=task)\n', (2010, 2023), False, 'from threading import Thread\n'), ((2606, 2641), 'logging.debug', 'logging.debug', (['f"""reply = {content}"""'], {}), "(f'reply = {content}')\n", (2619, 2641), False, 'import logging\n'), ((2646, 2668), 'logging.info', 'logging.info', (['f"""Done!"""'], {}), "(f'Done!')\n", (2658, 2668), False, 'import logging\n'), ((3020, 3111), 'logging.info', 'logging.info', (['f"""Applying settings: model_name={model_name}, temperature={temperature}"""'], {}), "(\n f'Applying settings: model_name={model_name}, temperature={temperature}')\n", (3032, 3111), False, 'import logging\n'), ((3530, 3688), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""'}), '(css=\n """#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""\n )\n', (3539, 3688), True, 'import gradio as gr\n'), ((3714, 3745), 'gradio.State', 'gr.State', (['default_system_prompt'], {}), '(default_system_prompt)\n', (3722, 3745), True, 'import gradio as gr\n'), ((3997, 4011), 'gradio.State', 'gr.State', (['None'], {}), '(None)\n', (4005, 4011), True, 'import gradio as gr\n'), ((753, 782), 'pathlib.Path', 'Path', (['"""prompts/system.prompt"""'], {}), "('prompts/system.prompt')\n", (757, 782), False, 'from pathlib import Path\n'), ((1228, 1235), 'queue.Queue', 'Queue', ([], {}), '()\n', (1233, 1235), False, 'from queue import Empty, Queue\n'), ((1719, 1748), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'message'}), '(content=message)\n', (1731, 1748), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((2574, 2600), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (2583, 2600), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4022, 4056), 'gradio.Column', 'gr.Column', ([], {'elem_id': '"""col_container"""'}), "(elem_id='col_container')\n", (4031, 4056), True, 'import gradio as gr\n'), ((4066, 4107), 'gradio.Markdown', 'gr.Markdown', (['"""# Welcome to GradioGPT! 🌟🚀"""'], {}), "('# Welcome to GradioGPT! 🌟🚀')\n", (4077, 4107), True, 'import gradio as gr\n'), ((4116, 4203), 'gradio.Markdown', 'gr.Markdown', (['"""An easy to use template. It comes with state and settings managment"""'], {}), "(\n 'An easy to use template. It comes with state and settings managment')\n", (4127, 4203), True, 'import gradio as gr\n'), ((4725, 4737), 'gradio.Chatbot', 'gr.Chatbot', ([], {}), '()\n', (4735, 4737), True, 'import gradio as gr\n'), ((2877, 2913), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_prompt'}), '(content=system_prompt)\n', (2890, 2913), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((3851, 3895), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'default_system_prompt'}), '(content=default_system_prompt)\n', (3864, 3895), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4234, 4245), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4243, 4245), True, 'import gradio as gr\n'), ((4280, 4368), 'gradio.TextArea', 'gr.TextArea', (['default_system_prompt'], {'lines': '(4)', 'label': '"""system prompt"""', 'interactive': '(True)'}), "(default_system_prompt, lines=4, label='system prompt',\n interactive=True)\n", (4291, 4368), True, 'import gradio as gr\n'), ((4689, 4705), 'gradio.Button', 'gr.Button', (['"""Set"""'], {}), "('Set')\n", (4698, 4705), True, 'import gradio as gr\n'), ((4751, 4762), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4760, 4762), True, 'import gradio as gr\n'), ((4786, 4816), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""chat input"""'}), "(label='chat input')\n", (4796, 4816), True, 'import gradio as gr\n'), ((5061, 5099), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {'variant': '"""primary"""'}), "('Submit', variant='primary')\n", (5070, 5099), True, 'import gradio as gr\n'), ((5306, 5314), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5312, 5314), True, 'import gradio as gr\n'), ((5333, 5344), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (5342, 5344), True, 'import gradio as gr\n'), ((5377, 5395), 'gradio.Button', 'gr.Button', (['"""Clear"""'], {}), "('Clear')\n", (5386, 5395), True, 'import gradio as gr\n'), ((5630, 5666), 'gradio.Accordion', 'gr.Accordion', (['"""Settings"""'], {'open': '(False)'}), "('Settings', open=False)\n", (5642, 5666), True, 'import gradio as gr\n'), ((5697, 5768), 'gradio.Dropdown', 'gr.Dropdown', ([], {'choices': 'MODELS_NAMES', 'value': 'MODELS_NAMES[0]', 'label': '"""model"""'}), "(choices=MODELS_NAMES, value=MODELS_NAMES[0], label='model')\n", (5708, 5768), True, 'import gradio as gr\n'), ((5837, 5937), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0.0)', 'maximum': '(1.0)', 'value': '(0.7)', 'step': '(0.1)', 'label': '"""temperature"""', 'interactive': '(True)'}), "(minimum=0.0, maximum=1.0, value=0.7, step=0.1, label=\n 'temperature', interactive=True)\n", (5846, 5937), True, 'import gradio as gr\n'), ((6112, 6130), 'gradio.Button', 'gr.Button', (['"""Apply"""'], {}), "('Apply')\n", (6121, 6130), True, 'import gradio as gr\n'), ((1440, 1460), 'callback.QueueCallback', 'QueueCallback', (['queue'], {}), '(queue)\n', (1453, 1460), False, 'from callback import QueueCallback\n'), ((3265, 3272), 'queue.Queue', 'Queue', ([], {}), '()\n', (3270, 3272), False, 'from queue import Empty, Queue\n')] |
import logging
from pathlib import Path
from typing import List, Optional, Tuple
from dotenv import load_dotenv
load_dotenv()
from queue import Empty, Queue
from threading import Thread
import gradio as gr
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage
from callback import QueueCallback
MODELS_NAMES = ["gpt-3.5-turbo", "gpt-4"]
DEFAULT_TEMPERATURE = 0.7
ChatHistory = List[str]
logging.basicConfig(
format="[%(asctime)s %(levelname)s]: %(message)s", level=logging.INFO
)
# load up our system prompt
default_system_prompt = Path("prompts/system.prompt").read_text()
# for the human, we will just inject the text
human_message_prompt_template = HumanMessagePromptTemplate.from_template("{text}")
def on_message_button_click(
chat: Optional[ChatOpenAI],
message: str,
chatbot_messages: ChatHistory,
messages: List[BaseMessage],
) -> Tuple[ChatOpenAI, str, ChatHistory, List[BaseMessage]]:
if chat is None:
# in the queue we will store our streamed tokens
queue = Queue()
# let's create our default chat
chat = ChatOpenAI(
model_name=MODELS_NAMES[0],
temperature=DEFAULT_TEMPERATURE,
streaming=True,
callbacks=([QueueCallback(queue)]),
)
else:
# hacky way to get the queue back
queue = chat.callbacks[0].queue
job_done = object()
logging.info(f"Asking question to GPT, messages={messages}")
# let's add the messages to our stuff
messages.append(HumanMessage(content=message))
chatbot_messages.append((message, ""))
# this is a little wrapper we need cuz we have to add the job_done
def task():
chat(messages)
queue.put(job_done)
# now let's start a thread and run the generation inside it
t = Thread(target=task)
t.start()
# this will hold the content as we generate
content = ""
# now, we read the next_token from queue and do what it has to be done
while True:
try:
next_token = queue.get(True, timeout=1)
if next_token is job_done:
break
content += next_token
chatbot_messages[-1] = (message, content)
yield chat, "", chatbot_messages, messages
except Empty:
continue
# finally we can add our reply to messsages
messages.append(AIMessage(content=content))
logging.debug(f"reply = {content}")
logging.info(f"Done!")
return chat, "", chatbot_messages, messages
def system_prompt_handler(value: str) -> str:
return value
def on_clear_button_click(system_prompt: str) -> Tuple[str, List, List]:
return "", [], [SystemMessage(content=system_prompt)]
def on_apply_settings_button_click(
system_prompt: str, model_name: str, temperature: float
):
logging.info(
f"Applying settings: model_name={model_name}, temperature={temperature}"
)
chat = ChatOpenAI(
model_name=model_name,
temperature=temperature,
streaming=True,
callbacks=[QueueCallback(Queue())],
)
# don't forget to nuke our queue
chat.callbacks[0].queue.empty()
return chat, *on_clear_button_click(system_prompt)
# some css why not, "borrowed" from https://huggingface.co/spaces/ysharma/Gradio-demo-streaming/blob/main/app.py
with gr.Blocks(
css="""#col_container {width: 700px; margin-left: auto; margin-right: auto;}
#chatbot {height: 400px; overflow: auto;}"""
) as demo:
system_prompt = gr.State(default_system_prompt)
# here we keep our state so multiple user can use the app at the same time!
messages = gr.State([SystemMessage(content=default_system_prompt)])
# same thing for the chat, we want one chat per use so callbacks are unique I guess
chat = gr.State(None)
with gr.Column(elem_id="col_container"):
gr.Markdown("# Welcome to GradioGPT! 🌟🚀")
gr.Markdown(
"An easy to use template. It comes with state and settings managment"
)
with gr.Column():
system_prompt_area = gr.TextArea(
default_system_prompt, lines=4, label="system prompt", interactive=True
)
# we store the value into the state to avoid re rendering of the area
system_prompt_area.input(
system_prompt_handler,
inputs=[system_prompt_area],
outputs=[system_prompt],
)
system_prompt_button = gr.Button("Set")
chatbot = gr.Chatbot()
with gr.Column():
message = gr.Textbox(label="chat input")
message.submit(
on_message_button_click,
[chat, message, chatbot, messages],
[chat, message, chatbot, messages],
queue=True,
)
message_button = gr.Button("Submit", variant="primary")
message_button.click(
on_message_button_click,
[chat, message, chatbot, messages],
[chat, message, chatbot, messages],
)
with gr.Row():
with gr.Column():
clear_button = gr.Button("Clear")
clear_button.click(
on_clear_button_click,
[system_prompt],
[message, chatbot, messages],
queue=False,
)
with gr.Accordion("Settings", open=False):
model_name = gr.Dropdown(
choices=MODELS_NAMES, value=MODELS_NAMES[0], label="model"
)
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.7,
step=0.1,
label="temperature",
interactive=True,
)
apply_settings_button = gr.Button("Apply")
apply_settings_button.click(
on_apply_settings_button_click,
[system_prompt, model_name, temperature],
[chat, message, chatbot, messages],
)
system_prompt_button.click(
on_apply_settings_button_click,
[system_prompt, model_name, temperature],
[chat, message, chatbot, messages],
)
demo.queue()
demo.launch()
| [
"langchain.schema.AIMessage",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.schema.HumanMessage",
"langchain.schema.SystemMessage"
] | [((114, 127), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (125, 127), False, 'from dotenv import load_dotenv\n'), ((604, 698), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s %(levelname)s]: %(message)s"""', 'level': 'logging.INFO'}), "(format='[%(asctime)s %(levelname)s]: %(message)s',\n level=logging.INFO)\n", (623, 698), False, 'import logging\n'), ((873, 923), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (913, 923), False, 'from langchain.prompts import HumanMessagePromptTemplate\n'), ((1596, 1656), 'logging.info', 'logging.info', (['f"""Asking question to GPT, messages={messages}"""'], {}), "(f'Asking question to GPT, messages={messages}')\n", (1608, 1656), False, 'import logging\n'), ((2004, 2023), 'threading.Thread', 'Thread', ([], {'target': 'task'}), '(target=task)\n', (2010, 2023), False, 'from threading import Thread\n'), ((2606, 2641), 'logging.debug', 'logging.debug', (['f"""reply = {content}"""'], {}), "(f'reply = {content}')\n", (2619, 2641), False, 'import logging\n'), ((2646, 2668), 'logging.info', 'logging.info', (['f"""Done!"""'], {}), "(f'Done!')\n", (2658, 2668), False, 'import logging\n'), ((3020, 3111), 'logging.info', 'logging.info', (['f"""Applying settings: model_name={model_name}, temperature={temperature}"""'], {}), "(\n f'Applying settings: model_name={model_name}, temperature={temperature}')\n", (3032, 3111), False, 'import logging\n'), ((3530, 3688), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""'}), '(css=\n """#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""\n )\n', (3539, 3688), True, 'import gradio as gr\n'), ((3714, 3745), 'gradio.State', 'gr.State', (['default_system_prompt'], {}), '(default_system_prompt)\n', (3722, 3745), True, 'import gradio as gr\n'), ((3997, 4011), 'gradio.State', 'gr.State', (['None'], {}), '(None)\n', (4005, 4011), True, 'import gradio as gr\n'), ((753, 782), 'pathlib.Path', 'Path', (['"""prompts/system.prompt"""'], {}), "('prompts/system.prompt')\n", (757, 782), False, 'from pathlib import Path\n'), ((1228, 1235), 'queue.Queue', 'Queue', ([], {}), '()\n', (1233, 1235), False, 'from queue import Empty, Queue\n'), ((1719, 1748), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'message'}), '(content=message)\n', (1731, 1748), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((2574, 2600), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (2583, 2600), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4022, 4056), 'gradio.Column', 'gr.Column', ([], {'elem_id': '"""col_container"""'}), "(elem_id='col_container')\n", (4031, 4056), True, 'import gradio as gr\n'), ((4066, 4107), 'gradio.Markdown', 'gr.Markdown', (['"""# Welcome to GradioGPT! 🌟🚀"""'], {}), "('# Welcome to GradioGPT! 🌟🚀')\n", (4077, 4107), True, 'import gradio as gr\n'), ((4116, 4203), 'gradio.Markdown', 'gr.Markdown', (['"""An easy to use template. It comes with state and settings managment"""'], {}), "(\n 'An easy to use template. It comes with state and settings managment')\n", (4127, 4203), True, 'import gradio as gr\n'), ((4725, 4737), 'gradio.Chatbot', 'gr.Chatbot', ([], {}), '()\n', (4735, 4737), True, 'import gradio as gr\n'), ((2877, 2913), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_prompt'}), '(content=system_prompt)\n', (2890, 2913), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((3851, 3895), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'default_system_prompt'}), '(content=default_system_prompt)\n', (3864, 3895), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4234, 4245), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4243, 4245), True, 'import gradio as gr\n'), ((4280, 4368), 'gradio.TextArea', 'gr.TextArea', (['default_system_prompt'], {'lines': '(4)', 'label': '"""system prompt"""', 'interactive': '(True)'}), "(default_system_prompt, lines=4, label='system prompt',\n interactive=True)\n", (4291, 4368), True, 'import gradio as gr\n'), ((4689, 4705), 'gradio.Button', 'gr.Button', (['"""Set"""'], {}), "('Set')\n", (4698, 4705), True, 'import gradio as gr\n'), ((4751, 4762), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4760, 4762), True, 'import gradio as gr\n'), ((4786, 4816), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""chat input"""'}), "(label='chat input')\n", (4796, 4816), True, 'import gradio as gr\n'), ((5061, 5099), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {'variant': '"""primary"""'}), "('Submit', variant='primary')\n", (5070, 5099), True, 'import gradio as gr\n'), ((5306, 5314), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5312, 5314), True, 'import gradio as gr\n'), ((5333, 5344), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (5342, 5344), True, 'import gradio as gr\n'), ((5377, 5395), 'gradio.Button', 'gr.Button', (['"""Clear"""'], {}), "('Clear')\n", (5386, 5395), True, 'import gradio as gr\n'), ((5630, 5666), 'gradio.Accordion', 'gr.Accordion', (['"""Settings"""'], {'open': '(False)'}), "('Settings', open=False)\n", (5642, 5666), True, 'import gradio as gr\n'), ((5697, 5768), 'gradio.Dropdown', 'gr.Dropdown', ([], {'choices': 'MODELS_NAMES', 'value': 'MODELS_NAMES[0]', 'label': '"""model"""'}), "(choices=MODELS_NAMES, value=MODELS_NAMES[0], label='model')\n", (5708, 5768), True, 'import gradio as gr\n'), ((5837, 5937), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0.0)', 'maximum': '(1.0)', 'value': '(0.7)', 'step': '(0.1)', 'label': '"""temperature"""', 'interactive': '(True)'}), "(minimum=0.0, maximum=1.0, value=0.7, step=0.1, label=\n 'temperature', interactive=True)\n", (5846, 5937), True, 'import gradio as gr\n'), ((6112, 6130), 'gradio.Button', 'gr.Button', (['"""Apply"""'], {}), "('Apply')\n", (6121, 6130), True, 'import gradio as gr\n'), ((1440, 1460), 'callback.QueueCallback', 'QueueCallback', (['queue'], {}), '(queue)\n', (1453, 1460), False, 'from callback import QueueCallback\n'), ((3265, 3272), 'queue.Queue', 'Queue', ([], {}), '()\n', (3270, 3272), False, 'from queue import Empty, Queue\n')] |
"""
View stage example selector.
| Copyright 2017-2023, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import os
import pickle
from langchain.prompts import FewShotPromptTemplate, PromptTemplate
import numpy as np
import pandas as pd
from scipy.spatial.distance import cosine
# pylint: disable=relative-beyond-top-level
from .utils import get_embedding_function, get_cache, hash_query
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
EXAMPLES_DIR = os.path.join(ROOT_DIR, "examples")
EXAMPLE_EMBEDDINGS_PATH = os.path.join(
EXAMPLES_DIR, "viewstage_embeddings.pkl"
)
VIEW_STAGE_EXAMPLES_PATH = os.path.join(EXAMPLES_DIR, "viewstage_examples.csv")
VIEW_STAGE_EXAMPLE_PROMPT = PromptTemplate(
input_variables=["input", "output"],
template="Input: {input}\nOutput: {output}",
)
def get_or_create_embeddings(queries):
if os.path.isfile(EXAMPLE_EMBEDDINGS_PATH):
with open(EXAMPLE_EMBEDDINGS_PATH, "rb") as f:
example_embeddings = pickle.load(f)
else:
example_embeddings = {}
query_hashes = []
new_hashes = []
new_queries = []
for query in queries:
key = hash_query(query)
query_hashes.append(key)
if key not in example_embeddings:
new_hashes.append(key)
new_queries.append(query)
if new_queries:
print("Generating %d embeddings..." % len(new_queries))
model = get_embedding_function()
new_embeddings = model(new_queries)
for key, embedding in zip(new_hashes, new_embeddings):
example_embeddings[key] = embedding
if new_queries:
print("Saving embeddings to disk...")
with open(EXAMPLE_EMBEDDINGS_PATH, "wb") as f:
pickle.dump(example_embeddings, f)
return example_embeddings
def has_geo_field(sample_collection):
types = list(sample_collection.get_field_schema(flat=True).values())
types = [type(t) for t in types]
return any(["Geo" in t.__name__ for t in types])
def get_label_type(sample_collection, field_name):
sample = sample_collection.first()
field = sample.get_field(field_name)
field_type = str(type(field).__name__).lower()
field_type = field_type[:-1] if field_type.endswith("s") else field_type
return field_type
def _replace_run_keys(prompt, runs):
if "text_similarity" in runs:
prompt = prompt.replace("TEXT_SIM_KEY", runs["text_similarity"]["key"])
if "image_similarity" in runs:
prompt = prompt.replace(
"IMAGE_SIM_KEY", runs["image_similarity"]["key"]
)
if "evaluation" in runs:
prompt = prompt.replace("EVAL_KEY", runs["evaluation"]["key"])
if "uniqueness" in runs:
prompt = prompt.replace(
"UNIQUENESS_FIELD", runs["uniqueness"]["uniqueness_field"]
)
return prompt
def _count_empty_class_names(label_field):
return [list(class_name.values())[0] for class_name in label_field].count(
[]
)
def _reduce_label_fields(label_fields):
label_field_keys = list(label_fields.keys())
if len(label_field_keys) == 0:
return None, None
elif len(label_field_keys) > 0:
empty_counts = [
_count_empty_class_names(label_fields[key])
for key in label_field_keys
]
min_empty_count = min(empty_counts)
valid_keys = [
key
for key, count in zip(label_field_keys, empty_counts)
if count == min_empty_count
]
return {key: label_fields[key] for key in valid_keys}, min_empty_count
def _parse_runs_and_labels(runs, label_fields):
reduced_label_fields, count = _reduce_label_fields(label_fields.copy())
reduced_runs = runs.copy()
if count is not None and count > 0 and "text_similarity" in reduced_runs:
reduced_label_fields = None
return reduced_runs, reduced_label_fields
def _get_evaluation_type(sample_collection, eval_key):
eval_cls = sample_collection.get_evaluation_info(eval_key).config.cls
if "openimages" in eval_cls:
return "detection"
elif "coco" in eval_cls:
return "detection"
elif "activitynet" in eval_cls:
return "detection"
elif "classification" in eval_cls:
return "classification"
return None
def _load_examples():
examples = pd.read_csv(VIEW_STAGE_EXAMPLES_PATH, on_bad_lines="skip")
examples["meta"] = examples["metadata"]
examples["contains_match"] = examples["stages"].str.contains("match\(")
examples["contains_filter_labels"] = examples["stages"].str.contains(
"filter_labels\("
)
examples["mfl"] = (
examples["contains_match"] | examples["contains_filter_labels"]
)
examples["hash"] = examples["query"].apply(lambda x: hash_query(x))
queries = examples["query"].tolist()
embeddings = get_or_create_embeddings(queries)
embeddings = {
key: np.array(embeddings[key]) for key in examples["hash"].tolist()
}
return examples, embeddings
def get_examples():
cache = get_cache()
keys = ("viewstage_examples", "viewstage_embeddings")
if keys[0] not in cache or keys[1] not in cache:
cache[keys[0]], cache[keys[1]] = _load_examples()
return cache[keys[0]], cache[keys[1]]
def _get_filtered_examples(sample_collection, runs, label_fields):
examples, embeddings = get_examples()
media_type = sample_collection.media_type
_filter = examples["media_type"].isin([media_type, "all"])
red_runs, red_label_fields = _parse_runs_and_labels(runs, label_fields)
geo = has_geo_field(sample_collection)
text_sim = "text_similarity" in red_runs
image_sim = "image_similarity" in red_runs
meta = "metadata" in red_runs
eval = "evaluation" in red_runs
if red_label_fields or eval:
if red_label_fields:
label_field_types = list(
set(
[
get_label_type(sample_collection, field)
for field in red_label_fields
]
)
)
else:
label_field_types = []
if eval:
eval_key = red_runs["evaluation"]["key"]
eval_types = [_get_evaluation_type(sample_collection, eval_key)]
else:
eval_types = []
label_types = list(set(label_field_types + eval_types + ["all"]))
_filter = _filter & examples["label_type"].isin(label_types)
## contains match() or filter_labels() in stages
mfl_cond = red_label_fields and not text_sim
conds = [geo, text_sim, image_sim, meta, eval, mfl_cond]
strs = ["geo", "text_sim", "image_sim", "meta", "eval", "mfl"]
for cond, cond_str in zip(conds, strs):
if not cond:
_filter = _filter & (examples[cond_str] == False)
filtered_examples = examples[_filter]
filtered_queries, filtered_stages, hashes = (
filtered_examples["query"].tolist(),
filtered_examples["stages"].tolist(),
filtered_examples["hash"].tolist(),
)
filtered_embeddings = [embeddings[key] for key in hashes]
return filtered_queries, filtered_stages, filtered_embeddings
def get_similar_examples(sample_collection, query, runs, label_fields):
ex_queries, ex_stages, ex_embeddings = _get_filtered_examples(
sample_collection, runs, label_fields
)
model = get_embedding_function()
query_embedding = np.array(model([query]))
if len(query_embedding.shape) == 2:
query_embedding = query_embedding[0]
dists = np.array([cosine(query_embedding, emb) for emb in ex_embeddings])
sorted_ix = np.argsort(dists).astype(int)
k = 20
similar_queries = [ex_queries[ix] for ix in sorted_ix[:k]]
similar_stages = [ex_stages[ix] for ix in sorted_ix[:k]]
return [
{"input": sq, "output": ss}
for sq, ss in zip(similar_queries, similar_stages)
]
def generate_view_stage_examples_prompt_template(
sample_collection, query, runs, label_fields
):
examples = get_similar_examples(
sample_collection, query, runs, label_fields
)
example_prompt = VIEW_STAGE_EXAMPLE_PROMPT
return FewShotPromptTemplate(
examples=examples,
example_prompt=example_prompt,
prefix="Generate code to produce the FiftyOne view stages for the following prompts:\n",
suffix="Input: {text}\nOutput:",
input_variables=["text"],
)
def generate_view_stage_examples_prompt(
sample_collection, query, runs, label_fields
):
similar_examples_prompt_template = (
generate_view_stage_examples_prompt_template(
sample_collection, query, runs, label_fields
)
)
prompt = similar_examples_prompt_template.format(text=query)
return _replace_run_keys(prompt, runs)
| [
"langchain.prompts.PromptTemplate",
"langchain.prompts.FewShotPromptTemplate"
] | [((489, 523), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""examples"""'], {}), "(ROOT_DIR, 'examples')\n", (501, 523), False, 'import os\n'), ((551, 605), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_embeddings.pkl"""'], {}), "(EXAMPLES_DIR, 'viewstage_embeddings.pkl')\n", (563, 605), False, 'import os\n'), ((639, 691), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_examples.csv"""'], {}), "(EXAMPLES_DIR, 'viewstage_examples.csv')\n", (651, 691), False, 'import os\n'), ((721, 825), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['input', 'output']", 'template': '"""Input: {input}\nOutput: {output}"""'}), '(input_variables=[\'input\', \'output\'], template=\n """Input: {input}\nOutput: {output}""")\n', (735, 825), False, 'from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n'), ((877, 916), 'os.path.isfile', 'os.path.isfile', (['EXAMPLE_EMBEDDINGS_PATH'], {}), '(EXAMPLE_EMBEDDINGS_PATH)\n', (891, 916), False, 'import os\n'), ((4348, 4406), 'pandas.read_csv', 'pd.read_csv', (['VIEW_STAGE_EXAMPLES_PATH'], {'on_bad_lines': '"""skip"""'}), "(VIEW_STAGE_EXAMPLES_PATH, on_bad_lines='skip')\n", (4359, 4406), True, 'import pandas as pd\n'), ((8216, 8455), 'langchain.prompts.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {'examples': 'examples', 'example_prompt': 'example_prompt', 'prefix': '"""Generate code to produce the FiftyOne view stages for the following prompts:\n"""', 'suffix': '"""Input: {text}\nOutput:"""', 'input_variables': "['text']"}), '(examples=examples, example_prompt=example_prompt,\n prefix=\n """Generate code to produce the FiftyOne view stages for the following prompts:\n"""\n , suffix="""Input: {text}\nOutput:""", input_variables=[\'text\'])\n', (8237, 8455), False, 'from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n'), ((446, 471), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (461, 471), False, 'import os\n'), ((4933, 4958), 'numpy.array', 'np.array', (['embeddings[key]'], {}), '(embeddings[key])\n', (4941, 4958), True, 'import numpy as np\n'), ((1006, 1020), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1017, 1020), False, 'import pickle\n'), ((1751, 1785), 'pickle.dump', 'pickle.dump', (['example_embeddings', 'f'], {}), '(example_embeddings, f)\n', (1762, 1785), False, 'import pickle\n'), ((7604, 7632), 'scipy.spatial.distance.cosine', 'cosine', (['query_embedding', 'emb'], {}), '(query_embedding, emb)\n', (7610, 7632), False, 'from scipy.spatial.distance import cosine\n'), ((7677, 7694), 'numpy.argsort', 'np.argsort', (['dists'], {}), '(dists)\n', (7687, 7694), True, 'import numpy as np\n')] |
"""
View stage example selector.
| Copyright 2017-2023, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import os
import pickle
from langchain.prompts import FewShotPromptTemplate, PromptTemplate
import numpy as np
import pandas as pd
from scipy.spatial.distance import cosine
# pylint: disable=relative-beyond-top-level
from .utils import get_embedding_function, get_cache, hash_query
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
EXAMPLES_DIR = os.path.join(ROOT_DIR, "examples")
EXAMPLE_EMBEDDINGS_PATH = os.path.join(
EXAMPLES_DIR, "viewstage_embeddings.pkl"
)
VIEW_STAGE_EXAMPLES_PATH = os.path.join(EXAMPLES_DIR, "viewstage_examples.csv")
VIEW_STAGE_EXAMPLE_PROMPT = PromptTemplate(
input_variables=["input", "output"],
template="Input: {input}\nOutput: {output}",
)
def get_or_create_embeddings(queries):
if os.path.isfile(EXAMPLE_EMBEDDINGS_PATH):
with open(EXAMPLE_EMBEDDINGS_PATH, "rb") as f:
example_embeddings = pickle.load(f)
else:
example_embeddings = {}
query_hashes = []
new_hashes = []
new_queries = []
for query in queries:
key = hash_query(query)
query_hashes.append(key)
if key not in example_embeddings:
new_hashes.append(key)
new_queries.append(query)
if new_queries:
print("Generating %d embeddings..." % len(new_queries))
model = get_embedding_function()
new_embeddings = model(new_queries)
for key, embedding in zip(new_hashes, new_embeddings):
example_embeddings[key] = embedding
if new_queries:
print("Saving embeddings to disk...")
with open(EXAMPLE_EMBEDDINGS_PATH, "wb") as f:
pickle.dump(example_embeddings, f)
return example_embeddings
def has_geo_field(sample_collection):
types = list(sample_collection.get_field_schema(flat=True).values())
types = [type(t) for t in types]
return any(["Geo" in t.__name__ for t in types])
def get_label_type(sample_collection, field_name):
sample = sample_collection.first()
field = sample.get_field(field_name)
field_type = str(type(field).__name__).lower()
field_type = field_type[:-1] if field_type.endswith("s") else field_type
return field_type
def _replace_run_keys(prompt, runs):
if "text_similarity" in runs:
prompt = prompt.replace("TEXT_SIM_KEY", runs["text_similarity"]["key"])
if "image_similarity" in runs:
prompt = prompt.replace(
"IMAGE_SIM_KEY", runs["image_similarity"]["key"]
)
if "evaluation" in runs:
prompt = prompt.replace("EVAL_KEY", runs["evaluation"]["key"])
if "uniqueness" in runs:
prompt = prompt.replace(
"UNIQUENESS_FIELD", runs["uniqueness"]["uniqueness_field"]
)
return prompt
def _count_empty_class_names(label_field):
return [list(class_name.values())[0] for class_name in label_field].count(
[]
)
def _reduce_label_fields(label_fields):
label_field_keys = list(label_fields.keys())
if len(label_field_keys) == 0:
return None, None
elif len(label_field_keys) > 0:
empty_counts = [
_count_empty_class_names(label_fields[key])
for key in label_field_keys
]
min_empty_count = min(empty_counts)
valid_keys = [
key
for key, count in zip(label_field_keys, empty_counts)
if count == min_empty_count
]
return {key: label_fields[key] for key in valid_keys}, min_empty_count
def _parse_runs_and_labels(runs, label_fields):
reduced_label_fields, count = _reduce_label_fields(label_fields.copy())
reduced_runs = runs.copy()
if count is not None and count > 0 and "text_similarity" in reduced_runs:
reduced_label_fields = None
return reduced_runs, reduced_label_fields
def _get_evaluation_type(sample_collection, eval_key):
eval_cls = sample_collection.get_evaluation_info(eval_key).config.cls
if "openimages" in eval_cls:
return "detection"
elif "coco" in eval_cls:
return "detection"
elif "activitynet" in eval_cls:
return "detection"
elif "classification" in eval_cls:
return "classification"
return None
def _load_examples():
examples = pd.read_csv(VIEW_STAGE_EXAMPLES_PATH, on_bad_lines="skip")
examples["meta"] = examples["metadata"]
examples["contains_match"] = examples["stages"].str.contains("match\(")
examples["contains_filter_labels"] = examples["stages"].str.contains(
"filter_labels\("
)
examples["mfl"] = (
examples["contains_match"] | examples["contains_filter_labels"]
)
examples["hash"] = examples["query"].apply(lambda x: hash_query(x))
queries = examples["query"].tolist()
embeddings = get_or_create_embeddings(queries)
embeddings = {
key: np.array(embeddings[key]) for key in examples["hash"].tolist()
}
return examples, embeddings
def get_examples():
cache = get_cache()
keys = ("viewstage_examples", "viewstage_embeddings")
if keys[0] not in cache or keys[1] not in cache:
cache[keys[0]], cache[keys[1]] = _load_examples()
return cache[keys[0]], cache[keys[1]]
def _get_filtered_examples(sample_collection, runs, label_fields):
examples, embeddings = get_examples()
media_type = sample_collection.media_type
_filter = examples["media_type"].isin([media_type, "all"])
red_runs, red_label_fields = _parse_runs_and_labels(runs, label_fields)
geo = has_geo_field(sample_collection)
text_sim = "text_similarity" in red_runs
image_sim = "image_similarity" in red_runs
meta = "metadata" in red_runs
eval = "evaluation" in red_runs
if red_label_fields or eval:
if red_label_fields:
label_field_types = list(
set(
[
get_label_type(sample_collection, field)
for field in red_label_fields
]
)
)
else:
label_field_types = []
if eval:
eval_key = red_runs["evaluation"]["key"]
eval_types = [_get_evaluation_type(sample_collection, eval_key)]
else:
eval_types = []
label_types = list(set(label_field_types + eval_types + ["all"]))
_filter = _filter & examples["label_type"].isin(label_types)
## contains match() or filter_labels() in stages
mfl_cond = red_label_fields and not text_sim
conds = [geo, text_sim, image_sim, meta, eval, mfl_cond]
strs = ["geo", "text_sim", "image_sim", "meta", "eval", "mfl"]
for cond, cond_str in zip(conds, strs):
if not cond:
_filter = _filter & (examples[cond_str] == False)
filtered_examples = examples[_filter]
filtered_queries, filtered_stages, hashes = (
filtered_examples["query"].tolist(),
filtered_examples["stages"].tolist(),
filtered_examples["hash"].tolist(),
)
filtered_embeddings = [embeddings[key] for key in hashes]
return filtered_queries, filtered_stages, filtered_embeddings
def get_similar_examples(sample_collection, query, runs, label_fields):
ex_queries, ex_stages, ex_embeddings = _get_filtered_examples(
sample_collection, runs, label_fields
)
model = get_embedding_function()
query_embedding = np.array(model([query]))
if len(query_embedding.shape) == 2:
query_embedding = query_embedding[0]
dists = np.array([cosine(query_embedding, emb) for emb in ex_embeddings])
sorted_ix = np.argsort(dists).astype(int)
k = 20
similar_queries = [ex_queries[ix] for ix in sorted_ix[:k]]
similar_stages = [ex_stages[ix] for ix in sorted_ix[:k]]
return [
{"input": sq, "output": ss}
for sq, ss in zip(similar_queries, similar_stages)
]
def generate_view_stage_examples_prompt_template(
sample_collection, query, runs, label_fields
):
examples = get_similar_examples(
sample_collection, query, runs, label_fields
)
example_prompt = VIEW_STAGE_EXAMPLE_PROMPT
return FewShotPromptTemplate(
examples=examples,
example_prompt=example_prompt,
prefix="Generate code to produce the FiftyOne view stages for the following prompts:\n",
suffix="Input: {text}\nOutput:",
input_variables=["text"],
)
def generate_view_stage_examples_prompt(
sample_collection, query, runs, label_fields
):
similar_examples_prompt_template = (
generate_view_stage_examples_prompt_template(
sample_collection, query, runs, label_fields
)
)
prompt = similar_examples_prompt_template.format(text=query)
return _replace_run_keys(prompt, runs)
| [
"langchain.prompts.PromptTemplate",
"langchain.prompts.FewShotPromptTemplate"
] | [((489, 523), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""examples"""'], {}), "(ROOT_DIR, 'examples')\n", (501, 523), False, 'import os\n'), ((551, 605), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_embeddings.pkl"""'], {}), "(EXAMPLES_DIR, 'viewstage_embeddings.pkl')\n", (563, 605), False, 'import os\n'), ((639, 691), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_examples.csv"""'], {}), "(EXAMPLES_DIR, 'viewstage_examples.csv')\n", (651, 691), False, 'import os\n'), ((721, 825), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['input', 'output']", 'template': '"""Input: {input}\nOutput: {output}"""'}), '(input_variables=[\'input\', \'output\'], template=\n """Input: {input}\nOutput: {output}""")\n', (735, 825), False, 'from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n'), ((877, 916), 'os.path.isfile', 'os.path.isfile', (['EXAMPLE_EMBEDDINGS_PATH'], {}), '(EXAMPLE_EMBEDDINGS_PATH)\n', (891, 916), False, 'import os\n'), ((4348, 4406), 'pandas.read_csv', 'pd.read_csv', (['VIEW_STAGE_EXAMPLES_PATH'], {'on_bad_lines': '"""skip"""'}), "(VIEW_STAGE_EXAMPLES_PATH, on_bad_lines='skip')\n", (4359, 4406), True, 'import pandas as pd\n'), ((8216, 8455), 'langchain.prompts.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {'examples': 'examples', 'example_prompt': 'example_prompt', 'prefix': '"""Generate code to produce the FiftyOne view stages for the following prompts:\n"""', 'suffix': '"""Input: {text}\nOutput:"""', 'input_variables': "['text']"}), '(examples=examples, example_prompt=example_prompt,\n prefix=\n """Generate code to produce the FiftyOne view stages for the following prompts:\n"""\n , suffix="""Input: {text}\nOutput:""", input_variables=[\'text\'])\n', (8237, 8455), False, 'from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n'), ((446, 471), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (461, 471), False, 'import os\n'), ((4933, 4958), 'numpy.array', 'np.array', (['embeddings[key]'], {}), '(embeddings[key])\n', (4941, 4958), True, 'import numpy as np\n'), ((1006, 1020), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1017, 1020), False, 'import pickle\n'), ((1751, 1785), 'pickle.dump', 'pickle.dump', (['example_embeddings', 'f'], {}), '(example_embeddings, f)\n', (1762, 1785), False, 'import pickle\n'), ((7604, 7632), 'scipy.spatial.distance.cosine', 'cosine', (['query_embedding', 'emb'], {}), '(query_embedding, emb)\n', (7610, 7632), False, 'from scipy.spatial.distance import cosine\n'), ((7677, 7694), 'numpy.argsort', 'np.argsort', (['dists'], {}), '(dists)\n', (7687, 7694), True, 'import numpy as np\n')] |
import base64
import email
from enum import Enum
from typing import Any, Dict, List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.gmail.base import GmailBaseTool
from langchain.tools.gmail.utils import clean_email_body
class Resource(str, Enum):
"""Enumerator of Resources to search."""
THREADS = "threads"
MESSAGES = "messages"
class SearchArgsSchema(BaseModel):
"""Input for SearchGmailTool."""
# From https://support.google.com/mail/answer/7190?hl=en
query: str = Field(
...,
description="The Gmail query. Example filters include from:sender,"
" to:recipient, subject:subject, -filtered_term,"
" in:folder, is:important|read|starred, after:year/mo/date, "
"before:year/mo/date, label:label_name"
' "exact phrase".'
" Search newer/older than using d (day), m (month), and y (year): "
"newer_than:2d, older_than:1y."
" Attachments with extension example: filename:pdf. Multiple term"
" matching example: from:amy OR from:david.",
)
resource: Resource = Field(
default=Resource.MESSAGES,
description="Whether to search for threads or messages.",
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
class GmailSearch(GmailBaseTool):
"""Tool that searches for messages or threads in Gmail."""
name: str = "search_gmail"
description: str = (
"Use this tool to search for email messages or threads."
" The input must be a valid Gmail query."
" The output is a JSON list of the requested resource."
)
args_schema: Type[SearchArgsSchema] = SearchArgsSchema
def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Add the thread message snippets to the thread results
results = []
for thread in threads:
thread_id = thread["id"]
thread_data = (
self.api_resource.users()
.threads()
.get(userId="me", id=thread_id)
.execute()
)
messages = thread_data["messages"]
thread["messages"] = []
for message in messages:
snippet = message["snippet"]
thread["messages"].append({"snippet": snippet, "id": message["id"]})
results.append(thread)
return results
def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
results = []
for message in messages:
message_id = message["id"]
message_data = (
self.api_resource.users()
.messages()
.get(userId="me", format="raw", id=message_id)
.execute()
)
raw_message = base64.urlsafe_b64decode(message_data["raw"])
email_msg = email.message_from_bytes(raw_message)
subject = email_msg["Subject"]
sender = email_msg["From"]
message_body = email_msg.get_payload()
body = clean_email_body(message_body)
results.append(
{
"id": message["id"],
"threadId": message_data["threadId"],
"snippet": message_data["snippet"],
"body": body,
"subject": subject,
"sender": sender,
}
)
return results
def _run(
self,
query: str,
resource: Resource = Resource.MESSAGES,
max_results: int = 10,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
"""Run the tool."""
results = (
self.api_resource.users()
.messages()
.list(userId="me", q=query, maxResults=max_results)
.execute()
.get(resource.value, [])
)
if resource == Resource.THREADS:
return self._parse_threads(results)
elif resource == Resource.MESSAGES:
return self._parse_messages(results)
else:
raise NotImplementedError(f"Resource of type {resource} not implemented.")
| [
"langchain.tools.gmail.utils.clean_email_body",
"langchain.pydantic_v1.Field"
] | [((606, 1054), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david."""'}), '(..., description=\n \'The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david.\'\n )\n', (611, 1054), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1181, 1276), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'Resource.MESSAGES', 'description': '"""Whether to search for threads or messages."""'}), "(default=Resource.MESSAGES, description=\n 'Whether to search for threads or messages.')\n", (1186, 1276), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1318, 1391), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of results to return."""'}), "(default=10, description='The maximum number of results to return.')\n", (1323, 1391), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((2960, 3005), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (["message_data['raw']"], {}), "(message_data['raw'])\n", (2984, 3005), False, 'import base64\n'), ((3031, 3068), 'email.message_from_bytes', 'email.message_from_bytes', (['raw_message'], {}), '(raw_message)\n', (3055, 3068), False, 'import email\n'), ((3224, 3254), 'langchain.tools.gmail.utils.clean_email_body', 'clean_email_body', (['message_body'], {}), '(message_body)\n', (3240, 3254), False, 'from langchain.tools.gmail.utils import clean_email_body\n')] |
import base64
import email
from enum import Enum
from typing import Any, Dict, List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.gmail.base import GmailBaseTool
from langchain.tools.gmail.utils import clean_email_body
class Resource(str, Enum):
"""Enumerator of Resources to search."""
THREADS = "threads"
MESSAGES = "messages"
class SearchArgsSchema(BaseModel):
"""Input for SearchGmailTool."""
# From https://support.google.com/mail/answer/7190?hl=en
query: str = Field(
...,
description="The Gmail query. Example filters include from:sender,"
" to:recipient, subject:subject, -filtered_term,"
" in:folder, is:important|read|starred, after:year/mo/date, "
"before:year/mo/date, label:label_name"
' "exact phrase".'
" Search newer/older than using d (day), m (month), and y (year): "
"newer_than:2d, older_than:1y."
" Attachments with extension example: filename:pdf. Multiple term"
" matching example: from:amy OR from:david.",
)
resource: Resource = Field(
default=Resource.MESSAGES,
description="Whether to search for threads or messages.",
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
class GmailSearch(GmailBaseTool):
"""Tool that searches for messages or threads in Gmail."""
name: str = "search_gmail"
description: str = (
"Use this tool to search for email messages or threads."
" The input must be a valid Gmail query."
" The output is a JSON list of the requested resource."
)
args_schema: Type[SearchArgsSchema] = SearchArgsSchema
def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Add the thread message snippets to the thread results
results = []
for thread in threads:
thread_id = thread["id"]
thread_data = (
self.api_resource.users()
.threads()
.get(userId="me", id=thread_id)
.execute()
)
messages = thread_data["messages"]
thread["messages"] = []
for message in messages:
snippet = message["snippet"]
thread["messages"].append({"snippet": snippet, "id": message["id"]})
results.append(thread)
return results
def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
results = []
for message in messages:
message_id = message["id"]
message_data = (
self.api_resource.users()
.messages()
.get(userId="me", format="raw", id=message_id)
.execute()
)
raw_message = base64.urlsafe_b64decode(message_data["raw"])
email_msg = email.message_from_bytes(raw_message)
subject = email_msg["Subject"]
sender = email_msg["From"]
message_body = email_msg.get_payload()
body = clean_email_body(message_body)
results.append(
{
"id": message["id"],
"threadId": message_data["threadId"],
"snippet": message_data["snippet"],
"body": body,
"subject": subject,
"sender": sender,
}
)
return results
def _run(
self,
query: str,
resource: Resource = Resource.MESSAGES,
max_results: int = 10,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
"""Run the tool."""
results = (
self.api_resource.users()
.messages()
.list(userId="me", q=query, maxResults=max_results)
.execute()
.get(resource.value, [])
)
if resource == Resource.THREADS:
return self._parse_threads(results)
elif resource == Resource.MESSAGES:
return self._parse_messages(results)
else:
raise NotImplementedError(f"Resource of type {resource} not implemented.")
| [
"langchain.tools.gmail.utils.clean_email_body",
"langchain.pydantic_v1.Field"
] | [((606, 1054), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david."""'}), '(..., description=\n \'The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david.\'\n )\n', (611, 1054), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1181, 1276), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'Resource.MESSAGES', 'description': '"""Whether to search for threads or messages."""'}), "(default=Resource.MESSAGES, description=\n 'Whether to search for threads or messages.')\n", (1186, 1276), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1318, 1391), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of results to return."""'}), "(default=10, description='The maximum number of results to return.')\n", (1323, 1391), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((2960, 3005), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (["message_data['raw']"], {}), "(message_data['raw'])\n", (2984, 3005), False, 'import base64\n'), ((3031, 3068), 'email.message_from_bytes', 'email.message_from_bytes', (['raw_message'], {}), '(raw_message)\n', (3055, 3068), False, 'import email\n'), ((3224, 3254), 'langchain.tools.gmail.utils.clean_email_body', 'clean_email_body', (['message_body'], {}), '(message_body)\n', (3240, 3254), False, 'from langchain.tools.gmail.utils import clean_email_body\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.