code
stringlengths
141
97.3k
apis
sequencelengths
1
24
extract_api
stringlengths
113
214k
"""LanceDB vector store.""" import logging from typing import Any, List, Optional import numpy as np from pandas import DataFrame from llama_index.legacy.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.legacy.vector_stores.types import ( MetadataFilters, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.legacy.vector_stores.utils import ( DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict, ) _logger = logging.getLogger(__name__) def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.legacy_filters(): if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) def _to_llama_similarities(results: DataFrame) -> List[float]: keys = results.keys() normalized_similarities: np.ndarray if "score" in keys: normalized_similarities = np.exp(results["score"] - np.max(results["score"])) elif "_distance" in keys: normalized_similarities = np.exp(-results["_distance"]) else: normalized_similarities = np.linspace(1, 0, len(results)) return normalized_similarities.tolist() class LanceDBVectorStore(VectorStore): """ The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". vector_column_name (str, optional): The vector column name in the table if different from default. Defaults to "vector", in keeping with lancedb convention. nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True def __init__( self, uri: str, table_name: str = "vectors", vector_column_name: str = "vector", nprobes: int = 20, refine_factor: Optional[int] = None, text_key: str = DEFAULT_TEXT_KEY, doc_id_key: str = DEFAULT_DOC_ID_KEY, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.vector_column_name = vector_column_name self.nprobes = nprobes self.text_key = text_key self.doc_id_key = doc_id_key self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return def add( self, nodes: List[BaseNode], **add_kwargs: Any, ) -> List[str]: data = [] ids = [] for node in nodes: metadata = node_to_metadata_dict( node, remove_text=False, flat_metadata=self.flat_metadata ) append_data = { "id": node.node_id, "doc_id": node.ref_doc_id, "vector": node.get_embedding(), "text": node.get_content(metadata_mode=MetadataMode.NONE), "metadata": metadata, } data.append(append_data) ids.append(node.node_id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self.connection.open_table(self.table_name) table.delete('document_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self.connection.open_table(self.table_name) lance_query = ( table.search( query=query.query_embedding, vector_column_name=self.vector_column_name, ) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_pandas() nodes = [] for _, item in results.iterrows(): try: node = metadata_dict_to_node(item.metadata) node.embedding = list(item[self.vector_column_name]) except Exception: # deprecated legacy logic for backward compatibility _logger.debug( "Failed to parse Node metadata, fallback to legacy logic." ) if "metadata" in item: metadata, node_info, _relation = legacy_metadata_dict_to_node( item.metadata, text_key=self.text_key ) else: metadata, node_info = {}, {} node = TextNode( text=item[self.text_key] or "", id_=item.id, metadata=metadata, start_char_idx=node_info.get("start", None), end_char_idx=node_info.get("end", None), relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id=item[self.doc_id_key] ), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((607, 634), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (624, 634), False, 'import logging\n'), ((3288, 3308), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3303, 3308), False, 'import lancedb\n'), ((1371, 1400), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1377, 1400), True, 'import numpy as np\n'), ((3843, 3928), 'llama_index.legacy.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(False)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=False, flat_metadata=self.flat_metadata\n )\n', (3864, 3928), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((1281, 1305), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1287, 1305), True, 'import numpy as np\n'), ((6116, 6152), 'llama_index.legacy.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['item.metadata'], {}), '(item.metadata)\n', (6137, 6152), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((6541, 6608), 'llama_index.legacy.vector_stores.utils.legacy_metadata_dict_to_node', 'legacy_metadata_dict_to_node', (['item.metadata'], {'text_key': 'self.text_key'}), '(item.metadata, text_key=self.text_key)\n', (6569, 6608), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((7094, 7140), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item[self.doc_id_key]'}), '(node_id=item[self.doc_id_key])\n', (7109, 7140), False, 'from llama_index.legacy.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]
"""LanceDB vector store.""" import logging from typing import Any, List, Optional import numpy as np from pandas import DataFrame from llama_index.legacy.schema import ( BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode, ) from llama_index.legacy.vector_stores.types import ( MetadataFilters, VectorStore, VectorStoreQuery, VectorStoreQueryResult, ) from llama_index.legacy.vector_stores.utils import ( DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict, ) _logger = logging.getLogger(__name__) def _to_lance_filter(standard_filters: MetadataFilters) -> Any: """Translate standard metadata filters to Lance specific spec.""" filters = [] for filter in standard_filters.legacy_filters(): if isinstance(filter.value, str): filters.append(filter.key + ' = "' + filter.value + '"') else: filters.append(filter.key + " = " + str(filter.value)) return " AND ".join(filters) def _to_llama_similarities(results: DataFrame) -> List[float]: keys = results.keys() normalized_similarities: np.ndarray if "score" in keys: normalized_similarities = np.exp(results["score"] - np.max(results["score"])) elif "_distance" in keys: normalized_similarities = np.exp(-results["_distance"]) else: normalized_similarities = np.linspace(1, 0, len(results)) return normalized_similarities.tolist() class LanceDBVectorStore(VectorStore): """ The LanceDB Vector Store. Stores text and embeddings in LanceDB. The vector store will open an existing LanceDB dataset or create the dataset if it does not exist. Args: uri (str, required): Location where LanceDB will store its files. table_name (str, optional): The table name where the embeddings will be stored. Defaults to "vectors". vector_column_name (str, optional): The vector column name in the table if different from default. Defaults to "vector", in keeping with lancedb convention. nprobes (int, optional): The number of probes used. A higher number makes search more accurate but also slower. Defaults to 20. refine_factor: (int, optional): Refine the results by reading extra elements and re-ranking them in memory. Defaults to None Raises: ImportError: Unable to import `lancedb`. Returns: LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and querying it. """ stores_text = True flat_metadata: bool = True def __init__( self, uri: str, table_name: str = "vectors", vector_column_name: str = "vector", nprobes: int = 20, refine_factor: Optional[int] = None, text_key: str = DEFAULT_TEXT_KEY, doc_id_key: str = DEFAULT_DOC_ID_KEY, **kwargs: Any, ) -> None: """Init params.""" import_err_msg = "`lancedb` package not found, please run `pip install lancedb`" try: import lancedb except ImportError: raise ImportError(import_err_msg) self.connection = lancedb.connect(uri) self.uri = uri self.table_name = table_name self.vector_column_name = vector_column_name self.nprobes = nprobes self.text_key = text_key self.doc_id_key = doc_id_key self.refine_factor = refine_factor @property def client(self) -> None: """Get client.""" return def add( self, nodes: List[BaseNode], **add_kwargs: Any, ) -> List[str]: data = [] ids = [] for node in nodes: metadata = node_to_metadata_dict( node, remove_text=False, flat_metadata=self.flat_metadata ) append_data = { "id": node.node_id, "doc_id": node.ref_doc_id, "vector": node.get_embedding(), "text": node.get_content(metadata_mode=MetadataMode.NONE), "metadata": metadata, } data.append(append_data) ids.append(node.node_id) if self.table_name in self.connection.table_names(): tbl = self.connection.open_table(self.table_name) tbl.add(data) else: self.connection.create_table(self.table_name, data) return ids def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """ Delete nodes using with ref_doc_id. Args: ref_doc_id (str): The doc_id of the document to delete. """ table = self.connection.open_table(self.table_name) table.delete('document_id = "' + ref_doc_id + '"') def query( self, query: VectorStoreQuery, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes.""" if query.filters is not None: if "where" in kwargs: raise ValueError( "Cannot specify filter via both query and kwargs. " "Use kwargs only for lancedb specific items that are " "not supported via the generic query interface." ) where = _to_lance_filter(query.filters) else: where = kwargs.pop("where", None) table = self.connection.open_table(self.table_name) lance_query = ( table.search( query=query.query_embedding, vector_column_name=self.vector_column_name, ) .limit(query.similarity_top_k) .where(where) .nprobes(self.nprobes) ) if self.refine_factor is not None: lance_query.refine_factor(self.refine_factor) results = lance_query.to_pandas() nodes = [] for _, item in results.iterrows(): try: node = metadata_dict_to_node(item.metadata) node.embedding = list(item[self.vector_column_name]) except Exception: # deprecated legacy logic for backward compatibility _logger.debug( "Failed to parse Node metadata, fallback to legacy logic." ) if "metadata" in item: metadata, node_info, _relation = legacy_metadata_dict_to_node( item.metadata, text_key=self.text_key ) else: metadata, node_info = {}, {} node = TextNode( text=item[self.text_key] or "", id_=item.id, metadata=metadata, start_char_idx=node_info.get("start", None), end_char_idx=node_info.get("end", None), relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id=item[self.doc_id_key] ), }, ) nodes.append(node) return VectorStoreQueryResult( nodes=nodes, similarities=_to_llama_similarities(results), ids=results["id"].tolist(), )
[ "lancedb.connect" ]
[((607, 634), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (624, 634), False, 'import logging\n'), ((3288, 3308), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3303, 3308), False, 'import lancedb\n'), ((1371, 1400), 'numpy.exp', 'np.exp', (["(-results['_distance'])"], {}), "(-results['_distance'])\n", (1377, 1400), True, 'import numpy as np\n'), ((3843, 3928), 'llama_index.legacy.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(False)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=False, flat_metadata=self.flat_metadata\n )\n', (3864, 3928), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((1281, 1305), 'numpy.max', 'np.max', (["results['score']"], {}), "(results['score'])\n", (1287, 1305), True, 'import numpy as np\n'), ((6116, 6152), 'llama_index.legacy.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['item.metadata'], {}), '(item.metadata)\n', (6137, 6152), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((6541, 6608), 'llama_index.legacy.vector_stores.utils.legacy_metadata_dict_to_node', 'legacy_metadata_dict_to_node', (['item.metadata'], {'text_key': 'self.text_key'}), '(item.metadata, text_key=self.text_key)\n', (6569, 6608), False, 'from llama_index.legacy.vector_stores.utils import DEFAULT_DOC_ID_KEY, DEFAULT_TEXT_KEY, legacy_metadata_dict_to_node, metadata_dict_to_node, node_to_metadata_dict\n'), ((7094, 7140), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'item[self.doc_id_key]'}), '(node_id=item[self.doc_id_key])\n', (7109, 7140), False, 'from llama_index.legacy.schema import BaseNode, MetadataMode, NodeRelationship, RelatedNodeInfo, TextNode\n')]
from typing import List, Any from dataclasses import dataclass import lancedb import pandas as pd from autochain.tools.base import Tool from autochain.models.base import BaseLanguageModel from autochain.tools.internal_search.base_search_tool import BaseSearchTool @dataclass class LanceDBDoc: doc: str vector: List[float] = None class LanceDBSeach(Tool, BaseSearchTool): """ Use LanceDB as the internal search tool LanceDB is a vector database that supports vector search. Args: uri: the uri of the database. Default to "lancedb" table_name: the name of the table. Default to "table" metric: the metric used for vector search. Default to "cosine" encoder: the encoder used to encode the documents. Default to None docs: the documents to be indexed. Default to None """ class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True docs: List[LanceDBDoc] uri: str = "lancedb" table_name: str = "table" metric: str = "cosine" encoder: BaseLanguageModel = None db: lancedb.db.DBConnection = None table: lancedb.table.Table = None def __init__(self, **kwargs) -> None: super().__init__(**kwargs) self.db = lancedb.connect(self.uri) if self.docs: self._encode_docs(self.docs) self._create_table(self.docs) def _create_table(self, docs: List[LanceDBDoc]) -> None: self.table = self.db.create_table(self.table_name, self._docs_to_dataframe(docs), mode="overwrite") def _encode_docs(self, docs: List[LanceDBDoc]) -> None: for doc in docs: if not doc.vector: if not self.encoder: raise ValueError("Encoder is not provided for encoding docs") doc.vector = self.encoder.encode([doc.doc]).embeddings[0] def _docs_to_dataframe(self, docs: List[LanceDBDoc]) -> pd.DataFrame: return pd.DataFrame( [ {"doc": doc.doc, "vector": doc.vector} for doc in docs ] ) def _run( self, query: str, top_k: int = 2, *args: Any, **kwargs: Any, ) -> str: if self.table is None: return "" embeddings = self.encoder.encode([query]).embeddings[0] result = self.table.search(embeddings).limit(top_k).to_df()["doc"].to_list() return "\n".join([f"Doc {i}: {doc}" for i, doc in enumerate(result)]) def add_docs(self, docs: List[LanceDBDoc], **kwargs): if not len(docs): return self._encode_docs(docs) self.table.add(self._docs_to_dataframe(docs)) if self.table else self._create_table(docs) def clear_index(self): if self.table_name in self.db.table_names(): self.db.drop_table(self.table_name) self.table = None
[ "lancedb.connect" ]
[((1275, 1300), 'lancedb.connect', 'lancedb.connect', (['self.uri'], {}), '(self.uri)\n', (1290, 1300), False, 'import lancedb\n'), ((1984, 2054), 'pandas.DataFrame', 'pd.DataFrame', (["[{'doc': doc.doc, 'vector': doc.vector} for doc in docs]"], {}), "([{'doc': doc.doc, 'vector': doc.vector} for doc in docs])\n", (1996, 2054), True, 'import pandas as pd\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Time : 2023/8/9 15:42 @Author : unkn-wn (Leon Yee) @File : lancedb_store.py """ import lancedb import shutil, os class LanceStore: def __init__(self, name): db = lancedb.connect('./data/lancedb') self.db = db self.name = name self.table = None def search(self, query, n_results=2, metric="L2", nprobes=20, **kwargs): # This assumes query is a vector embedding # kwargs can be used for optional filtering # .select - only searches the specified columns # .where - SQL syntax filtering for metadata (e.g. where("price > 100")) # .metric - specifies the distance metric to use # .nprobes - values will yield better recall (more likely to find vectors if they exist) at the expense of latency. if self.table == None: raise Exception("Table not created yet, please add data first.") results = self.table \ .search(query) \ .limit(n_results) \ .select(kwargs.get('select')) \ .where(kwargs.get('where')) \ .metric(metric) \ .nprobes(nprobes) \ .to_df() return results def persist(self): raise NotImplementedError def write(self, data, metadatas, ids): # This function is similar to add(), but it's for more generalized updates # "data" is the list of embeddings # Inserts into table by expanding metadatas into a dataframe: [{'vector', 'id', 'meta', 'meta2'}, ...] documents = [] for i in range(len(data)): row = { 'vector': data[i], 'id': ids[i] } row.update(metadatas[i]) documents.append(row) if self.table != None: self.table.add(documents) else: self.table = self.db.create_table(self.name, documents) def add(self, data, metadata, _id): # This function is for adding individual documents # It assumes you're passing in a single vector embedding, metadata, and id row = { 'vector': data, 'id': _id } row.update(metadata) if self.table != None: self.table.add([row]) else: self.table = self.db.create_table(self.name, [row]) def delete(self, _id): # This function deletes a row by id. # LanceDB delete syntax uses SQL syntax, so you can use "in" or "=" if self.table == None: raise Exception("Table not created yet, please add data first") if isinstance(_id, str): return self.table.delete(f"id = '{_id}'") else: return self.table.delete(f"id = {_id}") def drop(self, name): # This function drops a table, if it exists. path = os.path.join(self.db.uri, name + '.lance') if os.path.exists(path): shutil.rmtree(path)
[ "lancedb.connect" ]
[((234, 267), 'lancedb.connect', 'lancedb.connect', (['"""./data/lancedb"""'], {}), "('./data/lancedb')\n", (249, 267), False, 'import lancedb\n'), ((2866, 2908), 'os.path.join', 'os.path.join', (['self.db.uri', "(name + '.lance')"], {}), "(self.db.uri, name + '.lance')\n", (2878, 2908), False, 'import shutil, os\n'), ((2920, 2940), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2934, 2940), False, 'import shutil, os\n'), ((2954, 2973), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (2967, 2973), False, 'import shutil, os\n')]
import pytest from langchain_community.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings @pytest.mark.requires("lancedb") def test_lancedb_with_connection() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) result = store.similarity_search("text 1") result_texts = [doc.page_content for doc in result] assert "text 1" in result_texts @pytest.mark.requires("lancedb") def test_lancedb_without_connection() -> None: embeddings = FakeEmbeddings() texts = ["text 1", "text 2", "item 3"] store = LanceDB(embedding=embeddings) store.add_texts(texts) result = store.similarity_search("text 1") result_texts = [doc.page_content for doc in result] assert "text 1" in result_texts @pytest.mark.requires("lancedb") def test_lancedb_add_texts() -> None: embeddings = FakeEmbeddings() store = LanceDB(embedding=embeddings) store.add_texts(["text 2"]) result = store.similarity_search("text 2") result_texts = [doc.page_content for doc in result] assert "text 2" in result_texts
[ "lancedb.connect" ]
[((151, 182), 'pytest.mark.requires', 'pytest.mark.requires', (['"""lancedb"""'], {}), "('lancedb')\n", (171, 182), False, 'import pytest\n'), ((810, 841), 'pytest.mark.requires', 'pytest.mark.requires', (['"""lancedb"""'], {}), "('lancedb')\n", (830, 841), False, 'import pytest\n'), ((1178, 1209), 'pytest.mark.requires', 'pytest.mark.requires', (['"""lancedb"""'], {}), "('lancedb')\n", (1198, 1209), False, 'import pytest\n'), ((264, 280), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (278, 280), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((290, 321), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (305, 321), False, 'import lancedb\n'), ((641, 667), 'langchain_community.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (648, 667), False, 'from langchain_community.vectorstores import LanceDB\n'), ((906, 922), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (920, 922), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((979, 1008), 'langchain_community.vectorstores.LanceDB', 'LanceDB', ([], {'embedding': 'embeddings'}), '(embedding=embeddings)\n', (986, 1008), False, 'from langchain_community.vectorstores import LanceDB\n'), ((1265, 1281), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (1279, 1281), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((1295, 1324), 'langchain_community.vectorstores.LanceDB', 'LanceDB', ([], {'embedding': 'embeddings'}), '(embedding=embeddings)\n', (1302, 1324), False, 'from langchain_community.vectorstores import LanceDB\n')]
""" Unit test for retrieve_utils.py """ import pytest try: import chromadb from autogen.retrieve_utils import ( split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db, ) from autogen.token_count_utils import count_token except ImportError: skip = True else: skip = False import os try: from unstructured.partition.auto import partition HAS_UNSTRUCTURED = True except ImportError: HAS_UNSTRUCTURED = False test_dir = os.path.join(os.path.dirname(__file__), "test_files") expected_text = """AutoGen is an advanced tool designed to assist developers in harnessing the capabilities of Large Language Models (LLMs) for various applications. The primary purpose of AutoGen is to automate and simplify the process of building applications that leverage the power of LLMs, allowing for seamless integration, testing, and deployment.""" @pytest.mark.skipif(skip, reason="dependency is not installed") class TestRetrieveUtils: def test_split_text_to_chunks(self): long_text = "A" * 10000 chunks = split_text_to_chunks(long_text, max_tokens=1000) assert all(count_token(chunk) <= 1000 for chunk in chunks) def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self): with pytest.raises(AssertionError): split_text_to_chunks("A" * 10000, chunk_mode="bogus_chunk_mode") def test_extract_text_from_pdf(self): pdf_file_path = os.path.join(test_dir, "example.pdf") assert "".join(expected_text.split()) == "".join(extract_text_from_pdf(pdf_file_path).strip().split()) def test_split_files_to_chunks(self): pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") chunks = split_files_to_chunks([pdf_file_path, txt_file_path]) assert all( isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip() for chunk in chunks ) def test_get_files_from_dir(self): files = get_files_from_dir(test_dir, recursive=False) assert all(os.path.isfile(file) for file in files) pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") files = get_files_from_dir([pdf_file_path, txt_file_path]) assert all(os.path.isfile(file) for file in files) files = get_files_from_dir( [ pdf_file_path, txt_file_path, os.path.join(test_dir, "..", "..", "website/docs"), "https://raw.githubusercontent.com/microsoft/autogen/main/README.md", ], recursive=True, ) assert all(os.path.isfile(file) for file in files) files = get_files_from_dir( [ pdf_file_path, txt_file_path, os.path.join(test_dir, "..", "..", "website/docs"), "https://raw.githubusercontent.com/microsoft/autogen/main/README.md", ], recursive=True, types=["pdf", "txt"], ) assert all(os.path.isfile(file) for file in files) assert len(files) == 3 def test_is_url(self): assert is_url("https://www.example.com") assert not is_url("not_a_url") def test_create_vector_db_from_dir(self): db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): client = chromadb.PersistentClient(path=db_path) else: client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir(test_dir, client=client) assert client.get_collection("all-my-documents") def test_query_vector_db(self): db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): client = chromadb.PersistentClient(path=db_path) else: # If the database does not exist, create it first client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir(test_dir, client=client) results = query_vector_db(["autogen"], client=client) assert isinstance(results, dict) and any("autogen" in res[0].lower() for res in results.get("documents", [])) def test_custom_vector_db(self): try: import lancedb except ImportError: return from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent db_path = "/tmp/lancedb" def create_lancedb(): db = lancedb.connect(db_path) data = [ {"vector": [1.1, 1.2], "id": 1, "documents": "This is a test document spark"}, {"vector": [0.2, 1.8], "id": 2, "documents": "This is another test document"}, {"vector": [0.1, 0.3], "id": 3, "documents": "This is a third test document spark"}, {"vector": [0.5, 0.7], "id": 4, "documents": "This is a fourth test document"}, {"vector": [2.1, 1.3], "id": 5, "documents": "This is a fifth test document spark"}, {"vector": [5.1, 8.3], "id": 6, "documents": "This is a sixth test document"}, ] try: db.create_table("my_table", data) except OSError: pass class MyRetrieveUserProxyAgent(RetrieveUserProxyAgent): def query_vector_db( self, query_texts, n_results=10, search_string="", ): if query_texts: vector = [0.1, 0.3] db = lancedb.connect(db_path) table = db.open_table("my_table") query = table.search(vector).where(f"documents LIKE '%{search_string}%'").limit(n_results).to_df() return {"ids": [query["id"].tolist()], "documents": [query["documents"].tolist()]} def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = ""): results = self.query_vector_db( query_texts=[problem], n_results=n_results, search_string=search_string, ) self._results = results print("doc_ids: ", results["ids"]) ragragproxyagent = MyRetrieveUserProxyAgent( name="ragproxyagent", human_input_mode="NEVER", max_consecutive_auto_reply=2, retrieve_config={ "task": "qa", "chunk_token_size": 2000, "client": "__", "embedding_model": "all-mpnet-base-v2", }, ) create_lancedb() ragragproxyagent.retrieve_docs("This is a test document spark", n_results=10, search_string="spark") assert ragragproxyagent._results["ids"] == [[3, 1, 5]] def test_custom_text_split_function(self): def custom_text_split_function(text): return [text[: len(text) // 2], text[len(text) // 2 :]] db_path = "/tmp/test_retrieve_utils_chromadb.db" client = chromadb.PersistentClient(path=db_path) create_vector_db_from_dir( os.path.join(test_dir, "example.txt"), client=client, collection_name="mytestcollection", custom_text_split_function=custom_text_split_function, get_or_create=True, recursive=False, ) results = query_vector_db(["autogen"], client=client, collection_name="mytestcollection", n_results=1) assert ( "AutoGen is an advanced tool designed to assist developers in harnessing the capabilities" in results.get("documents")[0][0] ) def test_retrieve_utils(self): client = chromadb.PersistentClient(path="/tmp/chromadb") create_vector_db_from_dir( dir_path="./website/docs", client=client, collection_name="autogen-docs", custom_text_types=["txt", "md", "rtf", "rst"], get_or_create=True, ) results = query_vector_db( query_texts=[ "How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?", ], n_results=4, client=client, collection_name="autogen-docs", search_string="AutoGen", ) print(results["ids"][0]) assert len(results["ids"][0]) == 4 @pytest.mark.skipif( not HAS_UNSTRUCTURED, reason="do not run if unstructured is not installed", ) def test_unstructured(self): pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") word_file_path = os.path.join(test_dir, "example.docx") chunks = split_files_to_chunks([pdf_file_path, txt_file_path, word_file_path]) assert all( isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip() for chunk in chunks ) if __name__ == "__main__": pytest.main() db_path = "/tmp/test_retrieve_utils_chromadb.db" if os.path.exists(db_path): os.remove(db_path) # Delete the database file after tests are finished
[ "lancedb.connect" ]
[((1011, 1073), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip'], {'reason': '"""dependency is not installed"""'}), "(skip, reason='dependency is not installed')\n", (1029, 1073), False, 'import pytest\n'), ((609, 634), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (624, 634), False, 'import os\n'), ((8685, 8784), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_UNSTRUCTURED)'], {'reason': '"""do not run if unstructured is not installed"""'}), "(not HAS_UNSTRUCTURED, reason=\n 'do not run if unstructured is not installed')\n", (8703, 8784), False, 'import pytest\n'), ((9322, 9335), 'pytest.main', 'pytest.main', ([], {}), '()\n', (9333, 9335), False, 'import pytest\n'), ((9397, 9420), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (9411, 9420), False, 'import os\n'), ((1189, 1237), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (['long_text'], {'max_tokens': '(1000)'}), '(long_text, max_tokens=1000)\n', (1209, 1237), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((1564, 1601), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1576, 1601), False, 'import os\n'), ((1780, 1817), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (1792, 1817), False, 'import os\n'), ((1842, 1879), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (1854, 1879), False, 'import os\n'), ((1897, 1950), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (1918, 1950), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2185, 2230), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['test_dir'], {'recursive': '(False)'}), '(test_dir, recursive=False)\n', (2203, 2230), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((2314, 2351), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (2326, 2351), False, 'import os\n'), ((2376, 2413), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (2388, 2413), False, 'import os\n'), ((2430, 2480), 'autogen.retrieve_utils.get_files_from_dir', 'get_files_from_dir', (['[pdf_file_path, txt_file_path]'], {}), '([pdf_file_path, txt_file_path])\n', (2448, 2480), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3404, 3437), 'autogen.retrieve_utils.is_url', 'is_url', (['"""https://www.example.com"""'], {}), "('https://www.example.com')\n", (3410, 3437), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3592, 3615), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (3606, 3615), False, 'import os\n'), ((3979, 4002), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (3993, 4002), False, 'import os\n'), ((4273, 4316), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client'}), "(['autogen'], client=client)\n", (4288, 4316), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7313, 7352), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (7338, 7352), False, 'import chromadb\n'), ((7670, 7767), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', (["['autogen']"], {'client': 'client', 'collection_name': '"""mytestcollection"""', 'n_results': '(1)'}), "(['autogen'], client=client, collection_name=\n 'mytestcollection', n_results=1)\n", (7685, 7767), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((7992, 8039), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""/tmp/chromadb"""'}), "(path='/tmp/chromadb')\n", (8017, 8039), False, 'import chromadb\n'), ((8048, 8222), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', ([], {'dir_path': '"""./website/docs"""', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'custom_text_types': "['txt', 'md', 'rtf', 'rst']", 'get_or_create': '(True)'}), "(dir_path='./website/docs', client=client,\n collection_name='autogen-docs', custom_text_types=['txt', 'md', 'rtf',\n 'rst'], get_or_create=True)\n", (8073, 8222), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8304, 8514), 'autogen.retrieve_utils.query_vector_db', 'query_vector_db', ([], {'query_texts': "['How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ]", 'n_results': '(4)', 'client': 'client', 'collection_name': '"""autogen-docs"""', 'search_string': '"""AutoGen"""'}), "(query_texts=[\n 'How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?'\n ], n_results=4, client=client, collection_name='autogen-docs',\n search_string='AutoGen')\n", (8319, 8514), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((8860, 8897), 'os.path.join', 'os.path.join', (['test_dir', '"""example.pdf"""'], {}), "(test_dir, 'example.pdf')\n", (8872, 8897), False, 'import os\n'), ((8922, 8959), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (8934, 8959), False, 'import os\n'), ((8985, 9023), 'os.path.join', 'os.path.join', (['test_dir', '"""example.docx"""'], {}), "(test_dir, 'example.docx')\n", (8997, 9023), False, 'import os\n'), ((9041, 9110), 'autogen.retrieve_utils.split_files_to_chunks', 'split_files_to_chunks', (['[pdf_file_path, txt_file_path, word_file_path]'], {}), '([pdf_file_path, txt_file_path, word_file_path])\n', (9062, 9110), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((9430, 9448), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (9439, 9448), False, 'import os\n'), ((1389, 1418), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1402, 1418), False, 'import pytest\n'), ((1432, 1496), 'autogen.retrieve_utils.split_text_to_chunks', 'split_text_to_chunks', (["('A' * 10000)"], {'chunk_mode': '"""bogus_chunk_mode"""'}), "('A' * 10000, chunk_mode='bogus_chunk_mode')\n", (1452, 1496), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3457, 3476), 'autogen.retrieve_utils.is_url', 'is_url', (['"""not_a_url"""'], {}), "('not_a_url')\n", (3463, 3476), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((3638, 3677), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3663, 3677), False, 'import chromadb\n'), ((3713, 3752), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (3738, 3752), False, 'import chromadb\n'), ((3765, 3815), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (3790, 3815), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((4025, 4064), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4050, 4064), False, 'import chromadb\n'), ((4151, 4190), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'db_path'}), '(path=db_path)\n', (4176, 4190), False, 'import chromadb\n'), ((4203, 4253), 'autogen.retrieve_utils.create_vector_db_from_dir', 'create_vector_db_from_dir', (['test_dir'], {'client': 'client'}), '(test_dir, client=client)\n', (4228, 4253), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n'), ((4737, 4761), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (4752, 4761), False, 'import lancedb\n'), ((7400, 7437), 'os.path.join', 'os.path.join', (['test_dir', '"""example.txt"""'], {}), "(test_dir, 'example.txt')\n", (7412, 7437), False, 'import os\n'), ((2250, 2270), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2264, 2270), False, 'import os\n'), ((2500, 2520), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2514, 2520), False, 'import os\n'), ((2668, 2718), 'os.path.join', 'os.path.join', (['test_dir', '""".."""', '""".."""', '"""website/docs"""'], {}), "(test_dir, '..', '..', 'website/docs')\n", (2680, 2718), False, 'import os\n'), ((2878, 2898), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2892, 2898), False, 'import os\n'), ((3046, 3096), 'os.path.join', 'os.path.join', (['test_dir', '""".."""', '""".."""', '"""website/docs"""'], {}), "(test_dir, '..', '..', 'website/docs')\n", (3058, 3096), False, 'import os\n'), ((3290, 3310), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3304, 3310), False, 'import os\n'), ((5817, 5841), 'lancedb.connect', 'lancedb.connect', (['db_path'], {}), '(db_path)\n', (5832, 5841), False, 'import lancedb\n'), ((1257, 1275), 'autogen.token_count_utils.count_token', 'count_token', (['chunk'], {}), '(chunk)\n', (1268, 1275), False, 'from autogen.token_count_utils import count_token\n'), ((1659, 1695), 'autogen.retrieve_utils.extract_text_from_pdf', 'extract_text_from_pdf', (['pdf_file_path'], {}), '(pdf_file_path)\n', (1680, 1695), False, 'from autogen.retrieve_utils import split_text_to_chunks, extract_text_from_pdf, split_files_to_chunks, get_files_from_dir, is_url, create_vector_db_from_dir, query_vector_db\n')]
from langchain.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) result = store.similarity_search("text 1") result_texts = [doc.page_content for doc in result] assert "text 1" in result_texts def test_lancedb_add_texts() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) store.add_texts(["text 2"]) result = store.similarity_search("text 2") result_texts = [doc.page_content for doc in result] assert "text 2" in result_texts
[ "lancedb.connect" ]
[((190, 206), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (204, 206), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((216, 247), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (231, 247), False, 'import lancedb\n'), ((567, 593), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (574, 593), False, 'from langchain.vectorstores import LanceDB\n'), ((810, 826), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (824, 826), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((836, 867), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (851, 867), False, 'import lancedb\n'), ((1167, 1193), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1174, 1193), False, 'from langchain.vectorstores import LanceDB\n')]
from langchain.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) result = store.similarity_search("text 1") result_texts = [doc.page_content for doc in result] assert "text 1" in result_texts def test_lancedb_add_texts() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) store.add_texts(["text 2"]) result = store.similarity_search("text 2") result_texts = [doc.page_content for doc in result] assert "text 2" in result_texts
[ "lancedb.connect" ]
[((190, 206), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (204, 206), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((216, 247), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (231, 247), False, 'import lancedb\n'), ((567, 593), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (574, 593), False, 'from langchain.vectorstores import LanceDB\n'), ((810, 826), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (824, 826), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((836, 867), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (851, 867), False, 'import lancedb\n'), ((1167, 1193), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1174, 1193), False, 'from langchain.vectorstores import LanceDB\n')]
from langchain.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) result = store.similarity_search("text 1") result_texts = [doc.page_content for doc in result] assert "text 1" in result_texts def test_lancedb_add_texts() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) store.add_texts(["text 2"]) result = store.similarity_search("text 2") result_texts = [doc.page_content for doc in result] assert "text 2" in result_texts
[ "lancedb.connect" ]
[((190, 206), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (204, 206), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((216, 247), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (231, 247), False, 'import lancedb\n'), ((567, 593), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (574, 593), False, 'from langchain.vectorstores import LanceDB\n'), ((810, 826), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (824, 826), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((836, 867), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (851, 867), False, 'import lancedb\n'), ((1167, 1193), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1174, 1193), False, 'from langchain.vectorstores import LanceDB\n')]
from langchain.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) result = store.similarity_search("text 1") result_texts = [doc.page_content for doc in result] assert "text 1" in result_texts def test_lancedb_add_texts() -> None: import lancedb embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) store.add_texts(["text 2"]) result = store.similarity_search("text 2") result_texts = [doc.page_content for doc in result] assert "text 2" in result_texts
[ "lancedb.connect" ]
[((190, 206), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (204, 206), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((216, 247), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (231, 247), False, 'import lancedb\n'), ((567, 593), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (574, 593), False, 'from langchain.vectorstores import LanceDB\n'), ((810, 826), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (824, 826), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((836, 867), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (851, 867), False, 'import lancedb\n'), ((1167, 1193), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1174, 1193), False, 'from langchain.vectorstores import LanceDB\n')]
import lancedb from langchain.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) result = store.similarity_search("text 1") result_texts = [doc.page_content for doc in result] assert "text 1" in result_texts def test_lancedb_add_texts() -> None: embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) store.add_texts(["text 2"]) result = store.similarity_search("text 2") result_texts = [doc.page_content for doc in result] assert "text 2" in result_texts
[ "lancedb.connect" ]
[((186, 202), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (200, 202), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((212, 243), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (227, 243), False, 'import lancedb\n'), ((563, 589), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (570, 589), False, 'from langchain.vectorstores import LanceDB\n'), ((786, 802), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (800, 802), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((812, 843), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (827, 843), False, 'import lancedb\n'), ((1143, 1169), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1150, 1169), False, 'from langchain.vectorstores import LanceDB\n')]
import lancedb from langchain.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) result = store.similarity_search("text 1") result_texts = [doc.page_content for doc in result] assert "text 1" in result_texts def test_lancedb_add_texts() -> None: embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) store.add_texts(["text 2"]) result = store.similarity_search("text 2") result_texts = [doc.page_content for doc in result] assert "text 2" in result_texts
[ "lancedb.connect" ]
[((186, 202), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (200, 202), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((212, 243), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (227, 243), False, 'import lancedb\n'), ((563, 589), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (570, 589), False, 'from langchain.vectorstores import LanceDB\n'), ((786, 802), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (800, 802), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((812, 843), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (827, 843), False, 'import lancedb\n'), ((1143, 1169), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1150, 1169), False, 'from langchain.vectorstores import LanceDB\n')]
import lancedb from langchain.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) result = store.similarity_search("text 1") result_texts = [doc.page_content for doc in result] assert "text 1" in result_texts def test_lancedb_add_texts() -> None: embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) store.add_texts(["text 2"]) result = store.similarity_search("text 2") result_texts = [doc.page_content for doc in result] assert "text 2" in result_texts
[ "lancedb.connect" ]
[((186, 202), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (200, 202), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((212, 243), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (227, 243), False, 'import lancedb\n'), ((563, 589), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (570, 589), False, 'from langchain.vectorstores import LanceDB\n'), ((786, 802), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (800, 802), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((812, 843), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (827, 843), False, 'import lancedb\n'), ((1143, 1169), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1150, 1169), False, 'from langchain.vectorstores import LanceDB\n')]
import lancedb from langchain.vectorstores import LanceDB from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_lancedb() -> None: embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1", "text 2", "item 3"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) result = store.similarity_search("text 1") result_texts = [doc.page_content for doc in result] assert "text 1" in result_texts def test_lancedb_add_texts() -> None: embeddings = FakeEmbeddings() db = lancedb.connect("/tmp/lancedb") texts = ["text 1"] vectors = embeddings.embed_documents(texts) table = db.create_table( "my_table", data=[ {"vector": vectors[idx], "id": text, "text": text} for idx, text in enumerate(texts) ], mode="overwrite", ) store = LanceDB(table, embeddings) store.add_texts(["text 2"]) result = store.similarity_search("text 2") result_texts = [doc.page_content for doc in result] assert "text 2" in result_texts
[ "lancedb.connect" ]
[((186, 202), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (200, 202), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((212, 243), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (227, 243), False, 'import lancedb\n'), ((563, 589), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (570, 589), False, 'from langchain.vectorstores import LanceDB\n'), ((786, 802), 'tests.integration_tests.vectorstores.fake_embeddings.FakeEmbeddings', 'FakeEmbeddings', ([], {}), '()\n', (800, 802), False, 'from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings\n'), ((812, 843), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (827, 843), False, 'import lancedb\n'), ((1143, 1169), 'langchain.vectorstores.LanceDB', 'LanceDB', (['table', 'embeddings'], {}), '(table, embeddings)\n', (1150, 1169), False, 'from langchain.vectorstores import LanceDB\n')]
import argparse from pprint import pprint import pandas as pd from mlx_lm import generate, load import lancedb.embeddings.gte TEMPLATE = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible using the context text provided. Your answers should only answer the question once and not have any text after the answer is done. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. CONTEXT: {context} Question: {question} Answer: """ if __name__ == "__main__": import lancedb parser = argparse.ArgumentParser(description="Query a vector DB") # Input parser.add_argument( "--question", help="The question that needs to be answered", default="what is flash attention?", ) # Input parser.add_argument( "--db_path", type=str, default="/tmp/lancedb", help="The path to read the vector DB", ) args = parser.parse_args() db = lancedb.connect(args.db_path) tbl = db.open_table("test") resp = tbl.search(args.question).limit(10).to_pandas() context = "\n".join(resp["text"].values) context = "\n".join(pd.Series(context.split("\n")).drop_duplicates()) prompt = TEMPLATE.format(context=context, question=args.question) model, tokenizer = load("mlx-community/NeuralBeagle14-7B-4bit-mlx") ans = generate(model, tokenizer, prompt=prompt, verbose=False, max_tokens=512) pprint(ans)
[ "lancedb.connect" ]
[((689, 745), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Query a vector DB"""'}), "(description='Query a vector DB')\n", (712, 745), False, 'import argparse\n'), ((1112, 1141), 'lancedb.connect', 'lancedb.connect', (['args.db_path'], {}), '(args.db_path)\n', (1127, 1141), False, 'import lancedb\n'), ((1446, 1494), 'mlx_lm.load', 'load', (['"""mlx-community/NeuralBeagle14-7B-4bit-mlx"""'], {}), "('mlx-community/NeuralBeagle14-7B-4bit-mlx')\n", (1450, 1494), False, 'from mlx_lm import generate, load\n'), ((1505, 1577), 'mlx_lm.generate', 'generate', (['model', 'tokenizer'], {'prompt': 'prompt', 'verbose': '(False)', 'max_tokens': '(512)'}), '(model, tokenizer, prompt=prompt, verbose=False, max_tokens=512)\n', (1513, 1577), False, 'from mlx_lm import generate, load\n'), ((1583, 1594), 'pprint.pprint', 'pprint', (['ans'], {}), '(ans)\n', (1589, 1594), False, 'from pprint import pprint\n')]
import lancedb uri = "./.lancedb" db = lancedb.connect(uri) table = db.open_table("my_table") # table.delete("createAt = '1690358416394516300'") # 此条莫名失败了。Column createat does not exist in the dataset table.delete("item = 'foo'") df = table.to_pandas() print(df)
[ "lancedb.connect" ]
[((40, 60), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (55, 60), False, 'import lancedb\n')]
import requests import time import numpy as np import pyarrow as pa import lancedb import logging import os from tqdm import tqdm from pathlib import Path from transformers import AutoConfig logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) TEI_URL= os.getenv("EMBED_URL") + "/embed" DIRPATH = "/usr/src/docs_dir" TABLE_NAME = "docs" config = AutoConfig.from_pretrained(os.getenv("EMBED_MODEL")) EMB_DIM = config.hidden_size CREATE_INDEX = int(os.getenv("CREATE_INDEX")) BATCH_SIZE = int(os.getenv("BATCH_SIZE")) NUM_PARTITIONS = int(os.getenv("NUM_PARTITIONS")) NUM_SUB_VECTORS = int(os.getenv("NUM_SUB_VECTORS")) HEADERS = { "Content-Type": "application/json" } def embed_and_index(): files = Path(DIRPATH).rglob("*") texts = [] for file in files: if file.is_file(): try: text = file.open().read() if text: texts.append(text) except (OSError, UnicodeDecodeError) as e: logger.error("Error reading file: ", e) except Exception as e: logger.error("Unhandled exception: ", e) raise logger.info(f"Successfully read {len(texts)} files") db = lancedb.connect("/usr/src/.lancedb") schema = pa.schema( [ pa.field("vector", pa.list_(pa.float32(), EMB_DIM)), pa.field("text", pa.string()), ] ) tbl = db.create_table(TABLE_NAME, schema=schema, mode="overwrite") start = time.time() for i in tqdm(range(int(np.ceil(len(texts) / BATCH_SIZE)))): payload = { "inputs": texts[i * BATCH_SIZE:(i + 1) * BATCH_SIZE], "truncate": True } resp = requests.post(TEI_URL, json=payload, headers=HEADERS) if resp.status_code != 200: raise RuntimeError(resp.text) vectors = resp.json() data = [ {"vector": vec, "text": text} for vec, text in zip(vectors, texts[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]) ] tbl.add(data=data) logger.info(f"Embedding and ingestion of {len(texts)} items took {time.time() - start}") # IVF-PQ indexing if CREATE_INDEX: tbl.create_index(num_partitions=NUM_PARTITIONS, num_sub_vectors=NUM_SUB_VECTORS) if __name__ == "__main__": embed_and_index()
[ "lancedb.connect" ]
[((194, 233), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (213, 233), False, 'import logging\n'), ((243, 270), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (260, 270), False, 'import logging\n'), ((281, 303), 'os.getenv', 'os.getenv', (['"""EMBED_URL"""'], {}), "('EMBED_URL')\n", (290, 303), False, 'import os\n'), ((401, 425), 'os.getenv', 'os.getenv', (['"""EMBED_MODEL"""'], {}), "('EMBED_MODEL')\n", (410, 425), False, 'import os\n'), ((475, 500), 'os.getenv', 'os.getenv', (['"""CREATE_INDEX"""'], {}), "('CREATE_INDEX')\n", (484, 500), False, 'import os\n'), ((519, 542), 'os.getenv', 'os.getenv', (['"""BATCH_SIZE"""'], {}), "('BATCH_SIZE')\n", (528, 542), False, 'import os\n'), ((565, 592), 'os.getenv', 'os.getenv', (['"""NUM_PARTITIONS"""'], {}), "('NUM_PARTITIONS')\n", (574, 592), False, 'import os\n'), ((616, 644), 'os.getenv', 'os.getenv', (['"""NUM_SUB_VECTORS"""'], {}), "('NUM_SUB_VECTORS')\n", (625, 644), False, 'import os\n'), ((1244, 1280), 'lancedb.connect', 'lancedb.connect', (['"""/usr/src/.lancedb"""'], {}), "('/usr/src/.lancedb')\n", (1259, 1280), False, 'import lancedb\n'), ((1523, 1534), 'time.time', 'time.time', ([], {}), '()\n', (1532, 1534), False, 'import time\n'), ((1746, 1799), 'requests.post', 'requests.post', (['TEI_URL'], {'json': 'payload', 'headers': 'HEADERS'}), '(TEI_URL, json=payload, headers=HEADERS)\n', (1759, 1799), False, 'import requests\n'), ((737, 750), 'pathlib.Path', 'Path', (['DIRPATH'], {}), '(DIRPATH)\n', (741, 750), False, 'from pathlib import Path\n'), ((1409, 1420), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (1418, 1420), True, 'import pyarrow as pa\n'), ((1355, 1367), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1365, 1367), True, 'import pyarrow as pa\n'), ((2166, 2177), 'time.time', 'time.time', ([], {}), '()\n', (2175, 2177), False, 'import time\n')]
import lancedb from datasets import Dataset from homematch.config import DATA_DIR, TABLE_NAME from homematch.data.types import ImageData def datagen() -> list[ImageData]: dataset = Dataset.load_from_disk(DATA_DIR / "properties_dataset") # return Image instances return [ImageData(**batch) for batch in dataset] def main() -> None: uri = str(DATA_DIR) + "/.lancedb/" db = lancedb.connect(uri) table = db.create_table(TABLE_NAME, schema=ImageData, exist_ok=True) data = datagen() table.add(data) if __name__ == "__main__": main()
[ "lancedb.connect" ]
[((188, 243), 'datasets.Dataset.load_from_disk', 'Dataset.load_from_disk', (["(DATA_DIR / 'properties_dataset')"], {}), "(DATA_DIR / 'properties_dataset')\n", (210, 243), False, 'from datasets import Dataset\n'), ((397, 417), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (412, 417), False, 'import lancedb\n'), ((286, 304), 'homematch.data.types.ImageData', 'ImageData', ([], {}), '(**batch)\n', (295, 304), False, 'from homematch.data.types import ImageData\n')]
import openai import os import lancedb import pickle import requests from pathlib import Path from bs4 import BeautifulSoup import re from langchain.document_loaders import UnstructuredHTMLLoader from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import LanceDB from langchain.llms import OpenAI from langchain.chains import RetrievalQA # Function to fetch and save a page as an HTML file def save_page(url, save_dir): response = requests.get(url) if response.status_code == 200: soup = BeautifulSoup(response.content, 'html.parser') title = soup.find('title').text filename = f"{title}.html" with open(os.path.join(save_dir, filename), 'w', encoding='utf-8') as file: file.write(str(soup)) def get_document_title(document): m = str(document.metadata["source"]) title = re.findall("(.*)\.html", m) print("PRINTING TITLES") print(title) if title[0] is not None: return(title[0]) return '' # if "OPENAI_API_KEY" not in os.environ: openai.api_key = "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" assert len(openai.Model.list()["data"]) > 0 print("fetching data") # Base URL of Wikivoyage base_url = "https://en.wikivoyage.org/wiki/" # List of page titles to download page_titles = ["London", "Paris", "New_York_City"] # Add more as needed # Directory to save the HTML files save_directory = "./wikivoyage_pages" # Create the save directory if it doesn't exist if not os.path.exists(save_directory): os.makedirs(save_directory) # Loop through the page titles and download the pages for title in page_titles: url = f"{base_url}{title}" save_page(url, save_directory) docs_path = Path("cities.pkl") docs = [] if not docs_path.exists(): for p in Path("./wikivoyage_pages").rglob("*.html"): if p.is_dir(): continue loader = UnstructuredHTMLLoader(p) raw_document = loader.load() m = {} m["title"] = get_document_title(raw_document[0]) raw_document[0].metadata = raw_document[0].metadata | m raw_document[0].metadata["source"] = str(raw_document[0].metadata["source"]) docs = docs + raw_document with docs_path.open("wb") as fh: pickle.dump(docs, fh) else: with docs_path.open("rb") as fh: docs = pickle.load(fh) #split text text_splitter = RecursiveCharacterTextSplitter( chunk_size=500, chunk_overlap=50, ) documents = text_splitter.split_documents(docs) embeddings = OpenAIEmbeddings(openai_api_key="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") db = lancedb.connect('/tmp/lancedb') table = db.create_table("city_docs", data=[ {"vector": embeddings.embed_query("Hello World"), "text": "Hello World"} ], mode="overwrite") print("generated embeddings!") docsearch = LanceDB.from_documents(documents[5:], embeddings, connection=table) qa = RetrievalQA.from_chain_type(llm=OpenAI(openai_api_key="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"), chain_type="stuff", retriever=docsearch.as_retriever()) query_file = open('query.pkl', 'wb') pickle.dump(qa, query_file) query_file.close() print("returning query object")
[ "lancedb.connect" ]
[((1848, 1866), 'pathlib.Path', 'Path', (['"""cities.pkl"""'], {}), "('cities.pkl')\n", (1852, 1866), False, 'from pathlib import Path\n'), ((2545, 2609), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(50)'}), '(chunk_size=500, chunk_overlap=50)\n', (2575, 2609), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2691, 2782), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': '"""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"""'}), "(openai_api_key=\n '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')\n", (2707, 2782), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2786, 2817), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (2801, 2817), False, 'import lancedb\n'), ((3012, 3079), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents[5:]', 'embeddings'], {'connection': 'table'}), '(documents[5:], embeddings, connection=table)\n', (3034, 3079), False, 'from langchain.vectorstores import LanceDB\n'), ((3293, 3320), 'pickle.dump', 'pickle.dump', (['qa', 'query_file'], {}), '(qa, query_file)\n', (3304, 3320), False, 'import pickle\n'), ((548, 565), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (560, 565), False, 'import requests\n'), ((959, 987), 're.findall', 're.findall', (['"""(.*)\\\\.html"""', 'm'], {}), "('(.*)\\\\.html', m)\n", (969, 987), False, 'import re\n'), ((1616, 1646), 'os.path.exists', 'os.path.exists', (['save_directory'], {}), '(save_directory)\n', (1630, 1646), False, 'import os\n'), ((1653, 1680), 'os.makedirs', 'os.makedirs', (['save_directory'], {}), '(save_directory)\n', (1664, 1680), False, 'import os\n'), ((619, 665), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""html.parser"""'], {}), "(response.content, 'html.parser')\n", (632, 665), False, 'from bs4 import BeautifulSoup\n'), ((2030, 2055), 'langchain.document_loaders.UnstructuredHTMLLoader', 'UnstructuredHTMLLoader', (['p'], {}), '(p)\n', (2052, 2055), False, 'from langchain.document_loaders import UnstructuredHTMLLoader\n'), ((2414, 2435), 'pickle.dump', 'pickle.dump', (['docs', 'fh'], {}), '(docs, fh)\n', (2425, 2435), False, 'import pickle\n'), ((2497, 2512), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (2508, 2512), False, 'import pickle\n'), ((3118, 3194), 'langchain.llms.OpenAI', 'OpenAI', ([], {'openai_api_key': '"""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"""'}), "(openai_api_key='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')\n", (3124, 3194), False, 'from langchain.llms import OpenAI\n'), ((1238, 1257), 'openai.Model.list', 'openai.Model.list', ([], {}), '()\n', (1255, 1257), False, 'import openai\n'), ((1922, 1948), 'pathlib.Path', 'Path', (['"""./wikivoyage_pages"""'], {}), "('./wikivoyage_pages')\n", (1926, 1948), False, 'from pathlib import Path\n'), ((762, 794), 'os.path.join', 'os.path.join', (['save_dir', 'filename'], {}), '(save_dir, filename)\n', (774, 794), False, 'import os\n')]
import queue import threading from dataclasses import dataclass import lancedb import pyarrow as pa import numpy as np import torch import torch.nn.functional as F from safetensors import safe_open from tqdm import tqdm from .app.schemas.task import TaskCompletion from .ops.object_detectors import YOLOV8TRTEngine from .ops.ocr import PaddlePaddleOCRV4TRTEngine from .ops.optical_flow_estimators import RAFT from .ops.video_decoders import VPFVideoDecoder from .ops.clip import ClipVisionEncoder, ClipVisionEncoderTRTEngine from .utils.cvt import rgb_to_hsv_nhwc_uint8 @dataclass class FrameQueueItem: type: str task_id: str video_path: str result_queue: queue.Queue frames: torch.Tensor | None = None frame_idx: int | None = None fps: float | None = None total_frames: int | None = None width: int | None = None height: int | None = None @dataclass class PostProcessQueueItem: type: str task_id: str video_path: str batch_idx: int shape: tuple[int, int] preds: torch.Tensor result_queue: queue.Queue class Saver: def __init__(self, output_path: str): self.output_path = output_path self.db = lancedb.connect(output_path) table_names = self.db.table_names() if "clip" in table_names: self.clip_table = self.db.open_table("clip") else: schema = pa.schema([ pa.field("video_id", pa.utf8()), pa.field("frame_idx", pa.int32()), pa.field("clip_feature", pa.list_(pa.float32(), list_size=768)), ]) self.clip_table = self.db.create_table("clip", schema=schema) if "optical_flow" in table_names: self.optical_flow_table = self.db.open_table("optical_flow") else: schema = pa.schema([ pa.field("video_id", pa.utf8()), pa.field("frame_idx", pa.int32()), pa.field("optical_flow_score", pa.float32()), ]) self.optical_flow_table = self.db.create_table("optical_flow", schema=schema) if "cut_scores" in table_names: self.cut_scores_table = self.db.open_table("cut_scores") else: schema = pa.schema([ pa.field("video_id", pa.utf8()), pa.field("fps", pa.float32()), pa.field("cut_scores", pa.list_(pa.float32())), ]) self.cut_scores_table = self.db.create_table("cut_scores", schema=schema) if "ocr" in table_names: self.ocr_table = self.db.open_table("ocr") else: schema = pa.schema([ pa.field("video_id", pa.utf8()), pa.field("frame_idx", pa.int32()), pa.field("ocr_score", pa.float32()), pa.field("boxes", pa.list_(pa.list_(pa.list_(pa.int32, list_size=2), list_size=4))), pa.field("scores", pa.list_(pa.float32())), ]) self.ocr_table = self.db.create_table("ocr", schema=schema) def put_clip_features(self, clip_features: torch.Tensor): ... def put_optical_flow(self, optical_flow: torch.Tensor): ... def put_cut_scores(self, cut_scores: torch.Tensor): ... def put_ocr_results(self, ocr_score: float, boxes, scores): ... def put_det_results(self): ... class Pipeline: def __init__( self, batch_size: int, device_id: int = 0, raft_model_path: str = "./weights/raft_things.safetensors", ocr_model_path: str = "./weights/pp-ocr-v4-det-fp16.engine", det_model_path: str = "./weights/yolov8m-fp16.engine", ): self.batch_size = batch_size self.device_id = device_id device_id = 0 self.device_str = f"cuda:{device_id}" self.device = torch.device(self.device_str) self.clip_encoder = ClipVisionEncoder( model_name="ViT-L/14", device=self.device, ) self.clip_encoder = ClipVisionEncoderTRTEngine( "./weights/clip_vit_l14-fp16.engine", self.device, ) state_dict = {} with safe_open(raft_model_path, framework="pt", device=self.device_str) as f: for key in f.keys(): state_dict[key] = f.get_tensor(key) raft = RAFT() raft.load_state_dict(state_dict) raft.eval() raft.to(self.device) self.raft = raft self.pp_ocr = PaddlePaddleOCRV4TRTEngine(ocr_model_path, self.device) self.yolo_v8 = YOLOV8TRTEngine(det_model_path, self.device) self.frame_queue: queue.Queue[FrameQueueItem | None] = queue.Queue(maxsize=64) self.post_process_queue: queue.Queue[PostProcessQueueItem | None] = queue.Queue(maxsize=64) self.process_thread = threading.Thread(target=self.process_thread_fn) self.post_process_thread = threading.Thread(target=self.post_process_thread_fn) def compute_cut_scores( self, frames: torch.Tensor, last_hsv_frame: torch.Tensor | None, last_hsv_frame_2fps: torch.Tensor | None, last_hsv_frame_8fps: torch.Tensor | None, start_2fps: float, start_8fps: float, stride_2fps: float, stride_8fps: float, ): hsv_frames = rgb_to_hsv_nhwc_uint8(frames) cur = start_2fps indices_2fps = [] while round(cur) < frames.shape[0]: indices_2fps.append(round(cur)) cur += stride_2fps start_2fps = cur - frames.shape[0] indices_2fps_tensor = torch.as_tensor(indices_2fps, dtype=torch.int64, device=frames.device) cur = start_8fps indices_8fps = [] while round(cur) < frames.shape[0]: indices_8fps.append(round(cur)) cur += stride_8fps start_8fps = cur - frames.shape[0] indices_8fps_tensor = torch.as_tensor(indices_8fps, dtype=torch.int64, device=frames.device) hsv_frames_2fps = hsv_frames[indices_2fps_tensor] hsv_frames_8fps = hsv_frames[indices_8fps_tensor] if last_hsv_frame is None: diff = (hsv_frames[:-1] - hsv_frames[1:]).abs().to(torch.float32) else: prev_hsv_frames = torch.cat([last_hsv_frame[None], hsv_frames[:-1]], dim=0) diff = (prev_hsv_frames - hsv_frames).abs().to(torch.float32) if hsv_frames_2fps.shape[0] > 0: if last_hsv_frame_2fps is None: diff_2fps = (hsv_frames_2fps[:-1] - hsv_frames_2fps[1:]).abs().to(torch.float32) else: prev_hsv_frames_2fps = torch.cat([ last_hsv_frame_2fps[None], hsv_frames_2fps[:-1] ], dim=0) diff_2fps = (prev_hsv_frames_2fps - hsv_frames_2fps).abs().to(torch.float32) if hsv_frames_8fps.shape[0] > 0: if last_hsv_frame_8fps is None: diff_8fps = (hsv_frames_8fps[:-1] - hsv_frames_8fps[1:]).abs().to(torch.float32) else: prev_hsv_frames_8fps = torch.cat([ last_hsv_frame_8fps[None], hsv_frames_8fps[:-1] ], dim=0) diff_8fps = (prev_hsv_frames_8fps - hsv_frames_8fps).abs().to(torch.float32) last_hsv_frame = hsv_frames[-1] cut_scores = diff.flatten(1, 2).mean(dim=1) if hsv_frames_2fps.shape[0] > 0: cut_scores_2fps = diff_2fps.flatten(1, 2).mean(dim=1) last_hsv_frame_2fps = hsv_frames_2fps[-1] else: cut_scores_2fps = [] if hsv_frames_8fps.shape[0] > 0: cut_scores_8fps = diff_8fps.flatten(1, 2).mean(dim=1) last_hsv_frame_8fps = hsv_frames_8fps[-1] else: cut_scores_8fps = [] return ( cut_scores, cut_scores_2fps, cut_scores_8fps, last_hsv_frame, last_hsv_frame_2fps, last_hsv_frame_8fps, start_2fps, start_8fps, indices_2fps_tensor, indices_2fps, indices_8fps, ) def apply_resize(self, images: torch.Tensor, size: int) -> torch.Tensor: height, width = images.shape[-2:] if height < width: resize_to = (size, round(width * size / height)) else: resize_to = (round(height * size / width), size) return F.interpolate(images, size=resize_to, mode="bicubic") def apply_center_crop(self, images: torch.Tensor, factor: int = 32) -> torch.Tensor: height, width = images.shape[-2:] new_height = height // factor * factor new_width = width // factor * factor if new_height != height or new_width != width: start_h = (height - new_height) // 2 end_h = start_h + new_height start_w = (width - new_width) // 2 end_w = start_w + new_width images = images[..., start_h:end_h, start_w:end_w] return images @torch.no_grad() def process_thread_fn(self): while True: try: item = self.frame_queue.get(timeout=1) if item is None: break except queue.Empty: continue try: if item.type == "start": task_id = item.task_id video_path = item.video_path fps = item.fps stride_2fps = fps / 2.0 stride_8fps = fps / 8.0 frames_2fps_det_list = [] total_frames_2fps = 0 last_hsv_frame = None last_hsv_frame_2fps = None last_hsv_frame_8fps = None start_2fps = 0 start_8fps = 0 last_frame_2fps = None batch_idx_det = 0 batch_idx_flow = 0 results = [] elif item.type == "frames": frames = item.frames result_queue = item.result_queue ( cut_scores, cut_scores_2fps, cut_scores_8fps, last_hsv_frame, last_hsv_frame_2fps, last_hsv_frame_8fps, start_2fps, start_8fps, indices_2fps_tensor, indices_2fps, indices_8fps, ) = self.compute_cut_scores( frames, last_hsv_frame, last_hsv_frame_2fps, last_hsv_frame_8fps, start_2fps, start_8fps, stride_2fps, stride_8fps, ) results.append({ "type": "cut_scores", "task_id": task_id, "video_path": video_path, "frame_idx": item.frame_idx, "cut_scores": cut_scores, "cut_scores_2fps": cut_scores_2fps, "cut_scores_8fps": cut_scores_8fps, "indices_2fps": indices_2fps, "indices_8fps": indices_8fps, }) # -> b, 3, h, w frames = frames.permute(0, 3, 1, 2).float() frames.div_(255.0) frames.clamp_(0.0, 1.0) frames_2fps = frames[indices_2fps_tensor] if frames_2fps.shape[0] > 0: frames_2fps_resized_clip = self.apply_resize(frames_2fps, self.clip_encoder.input_res) height, width = frames.shape[-2:] frames_2fps_resized_det = self.apply_resize(frames_2fps, min(height, width) // 2) # clip clip_features = self.clip_encoder.encode(frames_2fps_resized_clip) clip_features = clip_features results.append({ "type": "clip", "task_id": task_id, "video_path": video_path, "frame_idx": item.frame_idx, "clip_features": clip_features, }) # center crop for det frames_2fps_det = self.apply_center_crop(frames_2fps_resized_det, factor=32) frames_2fps_det_list.append(frames_2fps_det) total_frames_2fps += frames_2fps_det.shape[0] # optical flow if total_frames_2fps >= 64: frames_2fps_det = torch.cat(frames_2fps_det_list, dim=0) total_frames = frames_2fps_det.shape[0] pp_ocr_preds = self.pp_ocr.detect(frames_2fps_det) self.post_process_queue.put( PostProcessQueueItem( type="pp_orc", task_id=task_id, video_path=video_path, batch_idx=batch_idx_det, shape=tuple(frames_2fps_det.shape[-2:]), preds=pp_ocr_preds, result_queue=result_queue, ) ) yolo_v8_preds = self.yolo_v8.detect(frames_2fps_det) self.post_process_queue.put( PostProcessQueueItem( type="yolo_v8", task_id=task_id, video_path=video_path, batch_idx=batch_idx_det, shape=tuple(frames_2fps_det.shape[-2:]), preds=yolo_v8_preds, result_queue=result_queue, ) ) batch_idx_det += 1 if last_frame_2fps is not None: frames_2fps_det = torch.cat([last_frame_2fps[None], frames_2fps_det], dim=0) offset = 1 else: offset = 0 frames_2fps_flow = frames_2fps_det * 2 - 1 batch_size = 32 for i in range(0 + offset, total_frames + offset, batch_size): if i + batch_size > total_frames + offset: break start = max(i - 1, 0) end = min(i + batch_size, total_frames) frames1 = frames_2fps_flow[start:end - 1] frames2 = frames_2fps_flow[start + 1:end] flows = self.raft(frames1, frames2, update_iters=12) mag = torch.sqrt(flows[:, 0, ...] ** 2 + flows[:, 1, ...] ** 2) optical_flow_scores = mag.flatten(1).mean(dim=1) results.append({ "type": "optical_flow", "task_id": task_id, "video_path": video_path, "batch_idx": batch_idx_flow, "optical_flow_scores": optical_flow_scores, }) batch_idx_flow += 1 last_frame_2fps = frames_2fps_det[-1] frames_2fps_det_list = [frames_2fps_det[i:]] total_frames_2fps = frames_2fps_det_list[-1].shape[0] elif item.type == "end": # optical flow if total_frames_2fps > 0: frames_2fps_det = torch.cat(frames_2fps_det_list, dim=0) total_frames = frames_2fps_det.shape[0] pp_ocr_preds = self.pp_ocr.detect(frames_2fps_det) self.post_process_queue.put( PostProcessQueueItem( type="pp_orc", task_id=task_id, video_path=video_path, batch_idx=batch_idx_det, shape=tuple(frames_2fps_det.shape[-2:]), preds=pp_ocr_preds, result_queue=result_queue, ) ) yolo_v8_preds = self.yolo_v8.detect(frames_2fps_det) self.post_process_queue.put( PostProcessQueueItem( type="yolo_v8", task_id=task_id, video_path=video_path, batch_idx=batch_idx_det, shape=tuple(frames_2fps_det.shape[-2:]), preds=yolo_v8_preds, result_queue=result_queue, ) ) batch_idx_det += 1 if last_frame_2fps is not None: frames_2fps_det = torch.cat([last_frame_2fps[None], frames_2fps_det], dim=0) offset = 1 else: offset = 0 frames_2fps_flow = frames_2fps_det * 2 - 1 batch_size = 32 if frames_2fps_det.shape[0] > 1: for i in range(0 + offset, total_frames + offset, batch_size): start = max(i - 1, 0) end = min(i + batch_size, total_frames) frames1 = frames_2fps_flow[start:end - 1] frames2 = frames_2fps_flow[start + 1:end] if frames1.shape[0] > 0 and frames2.shape[0] > 0: flows = self.raft(frames1, frames2, update_iters=12) mag = torch.sqrt(flows[:, 0, ...] ** 2 + flows[:, 1, ...] ** 2) optical_flow_scores = mag.flatten(1).mean(dim=1) results.append({ "type": "optical_flow", "task_id": task_id, "video_path": video_path, "batch_idx": batch_idx_flow, "optical_flow_scores": optical_flow_scores, }) batch_idx_flow += 1 last_frame_2fps = None frames_2fps_det_list = [] total_frames_2fps = 0 for res in results: new_res = {} for key, val in res.items(): if isinstance(val, torch.Tensor): new_res[key] = val.cpu().tolist() else: new_res[key] = val result_queue.put(new_res) torch.cuda.empty_cache() item.result_queue.put( TaskCompletion( id=task_id, status="completed", fps=item.fps, total_frames=item.total_frames, width=item.width, height=item.height, ) ) else: raise ValueError(f"unknown item type: {item.type}") except Exception as e: import io import traceback str_io = io.StringIO() traceback.print_exc(file=str_io) item.result_queue.put( TaskCompletion( id=task_id, status="failed", message=str_io.getvalue() + f"\nitem: {item}" + f"\n{frames_2fps_det.shape}", ) ) print("process_thread_fn stopped.") def post_process_thread_fn(self): while True: try: item = self.post_process_queue.get(timeout=1) if item is None: break except queue.Empty: continue if item.type == "yolo_v8": results = self.yolo_v8.post_process(item.preds.cpu().numpy()) item.result_queue.put({ "type": "det", "detector": "yolo_v8", "task_id": item.task_id, "batch_idx": item.batch_idx, "shape": item.shape, "results": results, }) elif item.type == "pp_orc": boxes, scores, ocr_scores = self.pp_ocr.post_process(item.preds.cpu().numpy()) item.result_queue.put({ "type": "ocr", "detector": "pp_orc", "task_id": item.task_id, "batch_idx": item.batch_idx, "shape": item.shape, "boxes": boxes, "scores": scores, "ocr_scores": ocr_scores, }) else: raise ValueError(f"unknown item type: {item.type}") print("post_process_thread_fn stopped.") def start(self): self.process_thread.start() self.post_process_thread.start() def close(self): self.frame_queue.put(None) self.post_process_queue.put(None) self.process_thread.join() self.post_process_thread.join() @torch.no_grad() def __call__( self, task_id: str, video_path: str, result_queue: queue.Queue, verbose: bool = False, ): print("video_path", video_path) decoder = VPFVideoDecoder( video_path=video_path, batch_size=self.batch_size, device_id=self.device_id, ) if decoder.width != 1280 or decoder.height != 720: result_queue.put( TaskCompletion( id=task_id, status="failed", message=( "video resolution is not 720x1280 " f"({decoder.height}x{decoder.width})." ), ) ) return self.frame_queue.put( FrameQueueItem( type="start", task_id=task_id, video_path=video_path, fps=decoder.fps, result_queue=result_queue, ), ) frame_idx = 0 for frames in tqdm(decoder.iter_frames(pixel_format="rgb"), disable=not verbose): self.frame_queue.put( FrameQueueItem( type="frames", task_id=task_id, video_path=video_path, frames=frames, frame_idx=frame_idx, result_queue=result_queue, ), ) frame_idx += frames.shape[0] self.frame_queue.put( FrameQueueItem( type="end", task_id=task_id, video_path=video_path, fps=decoder.fps, total_frames=decoder.total_frames, width=decoder.width, height=decoder.height, result_queue=result_queue, ) )
[ "lancedb.connect" ]
[((8969, 8984), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8982, 8984), False, 'import torch\n'), ((22228, 22243), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22241, 22243), False, 'import torch\n'), ((1191, 1219), 'lancedb.connect', 'lancedb.connect', (['output_path'], {}), '(output_path)\n', (1206, 1219), False, 'import lancedb\n'), ((3869, 3898), 'torch.device', 'torch.device', (['self.device_str'], {}), '(self.device_str)\n', (3881, 3898), False, 'import torch\n'), ((4711, 4734), 'queue.Queue', 'queue.Queue', ([], {'maxsize': '(64)'}), '(maxsize=64)\n', (4722, 4734), False, 'import queue\n'), ((4811, 4834), 'queue.Queue', 'queue.Queue', ([], {'maxsize': '(64)'}), '(maxsize=64)\n', (4822, 4834), False, 'import queue\n'), ((4866, 4913), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.process_thread_fn'}), '(target=self.process_thread_fn)\n', (4882, 4913), False, 'import threading\n'), ((4949, 5001), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.post_process_thread_fn'}), '(target=self.post_process_thread_fn)\n', (4965, 5001), False, 'import threading\n'), ((5632, 5702), 'torch.as_tensor', 'torch.as_tensor', (['indices_2fps'], {'dtype': 'torch.int64', 'device': 'frames.device'}), '(indices_2fps, dtype=torch.int64, device=frames.device)\n', (5647, 5702), False, 'import torch\n'), ((5947, 6017), 'torch.as_tensor', 'torch.as_tensor', (['indices_8fps'], {'dtype': 'torch.int64', 'device': 'frames.device'}), '(indices_8fps, dtype=torch.int64, device=frames.device)\n', (5962, 6017), False, 'import torch\n'), ((8366, 8419), 'torch.nn.functional.interpolate', 'F.interpolate', (['images'], {'size': 'resize_to', 'mode': '"""bicubic"""'}), "(images, size=resize_to, mode='bicubic')\n", (8379, 8419), True, 'import torch.nn.functional as F\n'), ((4204, 4270), 'safetensors.safe_open', 'safe_open', (['raft_model_path'], {'framework': '"""pt"""', 'device': 'self.device_str'}), "(raft_model_path, framework='pt', device=self.device_str)\n", (4213, 4270), False, 'from safetensors import safe_open\n'), ((6293, 6350), 'torch.cat', 'torch.cat', (['[last_hsv_frame[None], hsv_frames[:-1]]'], {'dim': '(0)'}), '([last_hsv_frame[None], hsv_frames[:-1]], dim=0)\n', (6302, 6350), False, 'import torch\n'), ((6665, 6732), 'torch.cat', 'torch.cat', (['[last_hsv_frame_2fps[None], hsv_frames_2fps[:-1]]'], {'dim': '(0)'}), '([last_hsv_frame_2fps[None], hsv_frames_2fps[:-1]], dim=0)\n', (6674, 6732), False, 'import torch\n'), ((7104, 7171), 'torch.cat', 'torch.cat', (['[last_hsv_frame_8fps[None], hsv_frames_8fps[:-1]]'], {'dim': '(0)'}), '([last_hsv_frame_8fps[None], hsv_frames_8fps[:-1]], dim=0)\n', (7113, 7171), False, 'import torch\n'), ((20200, 20213), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (20211, 20213), False, 'import io\n'), ((20230, 20262), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'str_io'}), '(file=str_io)\n', (20249, 20262), False, 'import traceback\n'), ((1441, 1450), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (1448, 1450), True, 'import pyarrow as pa\n'), ((1491, 1501), 'pyarrow.int32', 'pa.int32', ([], {}), '()\n', (1499, 1501), True, 'import pyarrow as pa\n'), ((1874, 1883), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (1881, 1883), True, 'import pyarrow as pa\n'), ((1924, 1934), 'pyarrow.int32', 'pa.int32', ([], {}), '()\n', (1932, 1934), True, 'import pyarrow as pa\n'), ((1984, 1996), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1994, 1996), True, 'import pyarrow as pa\n'), ((2298, 2307), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2305, 2307), True, 'import pyarrow as pa\n'), ((2342, 2354), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (2352, 2354), True, 'import pyarrow as pa\n'), ((2695, 2704), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2702, 2704), True, 'import pyarrow as pa\n'), ((2745, 2755), 'pyarrow.int32', 'pa.int32', ([], {}), '()\n', (2753, 2755), True, 'import pyarrow as pa\n'), ((2796, 2808), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (2806, 2808), True, 'import pyarrow as pa\n'), ((1554, 1566), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (1564, 1566), True, 'import pyarrow as pa\n'), ((2405, 2417), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (2415, 2417), True, 'import pyarrow as pa\n'), ((2956, 2968), 'pyarrow.float32', 'pa.float32', ([], {}), '()\n', (2966, 2968), True, 'import pyarrow as pa\n'), ((12642, 12680), 'torch.cat', 'torch.cat', (['frames_2fps_det_list'], {'dim': '(0)'}), '(frames_2fps_det_list, dim=0)\n', (12651, 12680), False, 'import torch\n'), ((19547, 19571), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (19569, 19571), False, 'import torch\n'), ((2863, 2894), 'pyarrow.list_', 'pa.list_', (['pa.int32'], {'list_size': '(2)'}), '(pa.int32, list_size=2)\n', (2871, 2894), True, 'import pyarrow as pa\n'), ((14150, 14208), 'torch.cat', 'torch.cat', (['[last_frame_2fps[None], frames_2fps_det]'], {'dim': '(0)'}), '([last_frame_2fps[None], frames_2fps_det], dim=0)\n', (14159, 14208), False, 'import torch\n'), ((14998, 15055), 'torch.sqrt', 'torch.sqrt', (['(flows[:, 0, ...] ** 2 + flows[:, 1, ...] ** 2)'], {}), '(flows[:, 0, ...] ** 2 + flows[:, 1, ...] ** 2)\n', (15008, 15055), False, 'import torch\n'), ((15935, 15973), 'torch.cat', 'torch.cat', (['frames_2fps_det_list'], {'dim': '(0)'}), '(frames_2fps_det_list, dim=0)\n', (15944, 15973), False, 'import torch\n'), ((17443, 17501), 'torch.cat', 'torch.cat', (['[last_frame_2fps[None], frames_2fps_det]'], {'dim': '(0)'}), '([last_frame_2fps[None], frames_2fps_det], dim=0)\n', (17452, 17501), False, 'import torch\n'), ((18356, 18413), 'torch.sqrt', 'torch.sqrt', (['(flows[:, 0, ...] ** 2 + flows[:, 1, ...] ** 2)'], {}), '(flows[:, 0, ...] ** 2 + flows[:, 1, ...] ** 2)\n', (18366, 18413), False, 'import torch\n')]
import os import openai import json import numpy as np from numpy.linalg import norm import re from time import time, sleep from uuid import uuid4 import datetime import lancedb import pandas as pd def open_file(filepath): with open(filepath, 'r', encoding='utf-8') as infile: return infile.read() def save_file(filepath, content): with open(filepath, 'w', encoding='utf-8') as outfile: outfile.write(content) def timestamp_to_datetime(unix_time): return datetime.datetime.fromtimestamp(unix_time).strftime("%A, %B %d, %Y at %I:%M%p %Z") def gpt3_embedding(content, engine='text-embedding-ada-002'): content = content.encode(encoding='ASCII', errors='ignore').decode() response = openai.Embedding.create(input=content, engine=engine) vector = response['data'][0]['embedding'] return vector def ai_completion(prompt, engine='gpt-3.5-turbo', temp=0.0, top_p=1.0, tokens=400, freq_pen=0.0, pres_pen=0.0, stop=['USER:', 'RAVEN:']): max_retry = 5 retry = 0 prompt = prompt.encode(encoding='ASCII',errors='ignore' ).decode() while True: try: response = openai.Completion.createChatCompletion( model=engine, prompt=prompt, temperature=temp, max_tokens=tokens, top_p=top_p, frequency_penalty=freq_pen, presence_penalty=pres_pen, stop=stop) text = response[ 'choices' ][0][ 'text' ].strip() text = re.sub( '[\r\n]+', '\n', text ) text = re.sub( '[\t ]+', ' ', text ) filename = '%s_gpt3.txt' % time() if not os.path.exists( 'gpt3_logs' ): os.makedirs( 'gpt3_logs' ) save_file( 'gpt3_logs/%s' % filename, prompt + '\n\n==========\n\n' + text ) return text except Exception as oops: retry += 1 if retry >= max_retry: return "GPT3 error: %s" % oops print( 'Error communicating with OpenAI:', oops ) sleep(1) initialization_data = { 'unique_id': '2c9a93d5-3631-4faa-8eac-a99b92e45d50', 'vector': [-0.07254597, -0.00345811, 0.038447 , 0.025837 , -0.01153462, 0.05443505, 0.04415885, -0.03636164, 0.04025393, 0.07552634, 0.05359982, 0.00822271, -0.01921194, 0.09719925, -0.05354664, 0.06897003, 0.01113722, 0.06425729, 0.04223888, -0.05898998, -0.01620383, 0.01389384, 0.02873985, -0.00392985, -0.02874645, 0.02680893, -0.01051578, -0.0792539 , -0.03293172, -0.00302758, -0.03745122, -0.02573149, -0.00473748, -0.04199643, -0.03275133, 0.00779039, 0.00624639, 0.06108246, -0.03870484, 0.06269313, -0.06609031, -0.01554973, -0.04453023, -0.00073963, 0.01021871, -0.02984073, 0.00474442, 0.00195324, -0.02518238, -0.00426692, 0.00750736, 0.10541135, 0.08878568, 0.05580394, -0.01232905, -0.04016594, 0.04829635, -0.05689557, -0.01863352, 0.03308525, 0.06468356, -0.03367596, 0.03575945, -0.02212196, -0.01714826, -0.00585904, -0.09612011, -0.00102483, 0.06920582, 0.05855923, -0.04266937, -0.03763324, -0.02187943, -0.00141346, -0.086646 , 0.02106668, 0.00786448, 0.04093482, -0.00187637, 0.02952651, -0.03702659, -0.02844533, 0.00322303, -0.02380866, -0.05954637, 0.07149482, -0.0065098 , 0.06807149, -0.00099369, 0.05040864, 0.04761266, 0.01862198, -0.05431763, 0.00940712, -0.00970824, -0.02216387, 0.024306 , 0.03772607, -0.01540066, 0.03771403, 0.01400787, -0.09354229, -0.06321603, -0.09549774, 0.00895245, -0.01175102, 0.03934404, 0.00956635, -0.04152715, 0.04295438, 0.02825363, 0.02063269, 0.02212336, -0.06888197, 0.01428573, 0.04887657, 0.00304061, 0.03196091, 0.03902192, 0.02360773, -0.02807535, 0.01558309, 0.02165642, 0.01129555, 0.0567826 , -0.00659211, -0.01081236, 0.01809447, 0.00318123, -0.01214105, -0.05691559, -0.01717793, 0.05293235, 0.01663713, 0.04678147, -0.02094 , -0.05482098, 0.05463412, 0.00163532, 0.00956752, -0.03624124, -0.02359207, 0.01571903, -0.01502842, 0.03324307, 0.01896691, 0.02235259, 0.02551061, -0.02953271, 0.05505196, -0.03115846, -0.01975026, -0.05484571, -0.01757487, -0.01038232, -0.06098176, -0.01663185, -0.06602633, -0.00643233, 0.00167366, -0.04243006, 0.01024193, -0.02288529, -0.06190364, 0.03787598, 0.03914008, -0.04915332, 0.0182827 , 0.0136188 , 0.02917461, 0.03118066, -0.03110682, -0.04193405, -0.01370175, -0.03901035, 0.00850587, 0.01056607, -0.00084098, -0.01737773, 0.00836137, 0.01500763, 0.00917414, -0.07946376, 0.02008886, 0.04600394, 0.01271509, -0.01654603, -0.04405601, 0.01442427, 0.00967625, 0.01212494, 0.01189141, 0.03507042, -0.00291006, 0.04226362, -0.0958102 , 0.04722575, -0.02520623, -0.00780957, -0.01983704, -0.02350736, -0.03137485, 0.00325953, 0.10679087, -0.08251372, 0.02922777, -0.05723861, -0.05683867, -0.04093323, -0.04769454, -0.02704669, -0.04450696, 0.03854201, 0.05599346, -0.07225747, -0.01060745, -0.01285277, -0.02004824, 0.00567907, -0.01130959, 0.03845671, -0.06483931, -0.00013804, 0.00342195, -0.00497795, 0.03194252, 0.06014316, 0.07774884, -0.02778566, -0.06470748, 0.02103901, 0.02202238, 0.02044025, 0.10802107, 0.00356093, -0.01817842, 0.09661267, -0.05937773, -0.08208849, -0.05190327, -0.0302214 , 0.05572621, -0.06395542, -0.03078226, 0.00083952, 0.09572925, -0.04516173, -0.0123177 , 0.09613901, -0.05666108, -0.00537586, 0.04220096, 0.00019196, 0.00295547, -0.07350546, -0.00707971, -0.01553643, -0.05214835, 0.00311794, 0.00742682, -0.02943217, 0.06675503, 0.04113274, -0.0809793 , 0.03398148, 0.01721729, 0.03014007, -0.04178908, 0.01025263, 0.03336379, 0.05700357, 0.10388609, 0.00663307, -0.05146715, -0.02173147, -0.02297893, -0.01923811, 0.03292958, 0.0521661 , 0.03923552, 0.01330443, 0.02524009, 0.06507587, -0.01531762, -0.04601574, 0.0499142 , 0.06374968, 0.06080135, -0.08060206, 0.03382473, -0.03596291, -0.06714796, -0.08815136, 0.02092835, 0.10282409, 0.07779143, -0.01839681, -0.03541641, 0.00666599, 0.0029895 , -0.08307225, -0.06535257, 0.01114002, -0.06142527, -0.01779631, 0.04441926, 0.02008377, 0.03211711, -0.02073815, -0.01346437, 0.02578364, -0.01888524, 0.03310522, -0.02017466, 0.0198052 , -0.01019527, -0.02200533, -0.02650121, -0.02987311, -0.04946938, -0.05915657, -0.0779579 , 0.03368903, 0.01859711, 0.02692219, 0.04209578, -0.01279042, -0.00151735, -0.03290961, 0.00719433, -0.05409581, 0.04818217, -0.00339916, 0.01444317, -0.04898094, -0.02065373, -0.04324449, -0.01409152, -0.02882394, 0.0129813 , -0.03886433, -0.08824961, 0.02457459, -0.03383131, 0.04405662, 0.03947931, 0.02983763, 0.00124698, 0.01098392, 0.05948395, 0.08565806, 0.02848131, -0.00725272, -0.04415287, -0.03293212, -0.01364554, -0.09744117, -0.05662472, 0.03124948, -0.04624591, -0.00605065, -0.06229377, 0.08636316, -0.03645795, 0.08642905, 0.03093746, -0.08031843, 0.01407037, 0.09892832, 0.03219265, 0.02964027, -0.00517425, -0.03442131, -0.01141241, -0.06644958, -0.07285954, 0.00890575, -0.01360151, 0.00057073, -0.08988309, 0.00797763, 0.0176619 , 0.00745209, -0.07096376, 0.07894821, -0.08301938, 0.0990236 , 0.03789177, -0.01905026, 0.0547296 , -0.06224509, 0.01964617, 0.08179896, -0.0852924 , 0.00475453, -0.01451678, 0.03582037, -0.04732088, -0.041508 , 0.05553002, -0.00753875, -0.02849884, 0.04659286, -0.05146529, -0.0661836 , -0.00761966, 0.01581906, 0.02444271, -0.01438573, -0.03466942, -0.06876651, -0.02311521, -0.00312491, 0.03457906, -0.04614082, 0.03010868, 0.0206049 , 0.08378315, -0.03001363, -0.00827654, 0.01580172, -0.04855691, 0.00014473, -0.01702366, 0.06371997, 0.00924862, -0.01441237, 0.0184262 , 0.03586025, 0.07453281, -0.01822053, 0.00263505, -0.07093351, -0.02956585, 0.0937797 , -0.03792839, 0.03657963, -0.01717029, 0.0077794 , 0.06886019, 0.04470135, 0.04228634, 0.06212147, -0.05456647, -0.02041842, 0.02251387, 0.06653161, -0.00503211, 0.03463385, -0.02718318, 0.00118317, -0.02953942, -0.04361469, 0.01001209, 0.01472133, -0.07398187, 0.00152049, -0.02058817, -0.03011479, -0.03247686, -0.03999605, 0.00089937, 0.06058171, -0.1016895 , 0.07500667, 0.03293885, -0.05828201, -0.01353116, 0.06867946, -0.03266895, -0.02314214, 0.03284731, 0.02857622, 0.05733896, 0.05395727, 0.06677917, -0.01256167, 0.01832761, 0.01509516, 0.08785269, -0.01094873, -0.09930896, -0.00904166, 0.01920987, 0.01392063, -0.03855692, 0.04157091, -0.05284394, 0.01217607, -0.00495155, -0.02351189, 0.03753581, 0.03075539, 0.0635642 , 0.05873286, 0.00987345, 0.05255824, -0.08698288, 0.10400596, -0.00647114, -0.00831464, 0.0055213 , 0.01613558, -0.10711982, 0.00563591, 0.03591603, 0.00221161, -0.01541905, -0.0879847 , -0.05289326, -0.04107964, -0.04039652], 'speaker': 'USER', 'time': 1695146425.0193892, 'message': 'this is a test.', 'timestring': 'Tuesday, September 19, 2023 at 02:00PM ' } import pyarrow as pa class LanceTable: def __init__(self): # Initialize lancedb self.db = lancedb.connect( "/tmp/fresh-lancedb" ) # self.schema = pa.schema([ # pa.field("unique_id", pa.string()), # pa.field("vector", pa.list_(pa.float32())), # pa.field("speaker", pa.string()), # pa.field("time", pa.float64()), # pa.field("message", pa.string()), # pa.field("timestring", pa.string()), # ]) # Create the table with the defined schema panda_data_frame = pd.DataFrame([ initialization_data ]) table_name = "lance-table" if table_name in self.db.table_names(): print( "table %s already exists" % table_name ) self.db.drop_table(table_name) # Drop the table if it already exists self.table = self.db.create_table( table_name, panda_data_frame ) else: print( "creating table: %s" % table_name ) self.table = self.db.create_table( table_name, panda_data_frame ) # Insert the provided data into the table # self.table_initialized = False # self.table = None print(json.dumps(initialization_data, indent=4)) # Ensure 'embedded_user_input' is a numpy array # embedded_user_input = np.array(initialization_data['vector']) # # Flatten the array # flattened_input = embedded_user_input.flatten().tolist() # initialization_data[ "vector" ] = flattened_input # dataframe = pd.DataFrame([ initialization_data ]) # arrow_table = pa.Table.from_pandas(dataframe, panda_data_frame) # self.table.add( arrow_table ) # self.table.add( dataframe ) def add(self, unique_id_arg, embedded_message, speaker, timestamp, message, timestring ): # Ensure 'embedded_user_input' is a numpy array # embedded_user_input = np.array( embedded_message ) # Flatten the array # flattened_input = embedded_user_input.flatten().tolist() # embedded_user_input = flattened_input # embedded_user_input = np.array(embedded_message['vector']) # Flatten the array # flattened_input = embedded_user_input.flatten().tolist() # embedded_message[ "vector" ] = flattened_input data = { "unique_id": unique_id_arg, "vector": embedded_message, "speaker": speaker, "time": timestamp, "message": message, "timestring": timestring } # print( data ) dataframe = pd.DataFrame([ data ]) # arrow_table = pa.Table.from_pandas(dataframe, panda_data_frame ) # self.table.add( arrow_table ) self.table.add( dataframe ) lanceTable = LanceTable() import tensorflow_hub as hub # Load the Universal Sentence Encoder encoder = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4') if __name__ == '__main__': openai.api_key = open_file('/home/adamsl/linuxBash/pinecone_chat_dave_s/key_openai.txt') while True: # user_input = input('\n\nUSER: ') user_input = "hi" timestamp = time() timestring = timestamp_to_datetime(timestamp) unique_id = str(uuid4()) # embedded_user_input = encoder([ user_input ]).numpy() # Convert the text into vector form # embedded_user_input = gpt3_embedding( user_input ) embedded_user_input = lanceTable.table.embedding_functions[ 'vector' ].function.compute_query_embeddings( user_input )[ 0 ] speaker = 'USER' message = user_input # embedded_user_input = np.array( embedded_user_input ) # flattened_input = [float(item) for item in embedded_user_input.flatten().tolist()] # Insert User's input to lancedb lanceTable.add( unique_id, embedded_user_input, speaker, timestamp, message, timestring ) query_builder = lanceTable.LanceVectorQueryBuilder( lanceTable.table, embedded_user_input, 'vector' ) # Search for relevant message unique ids in lancedb # results = lanceTable.table.search( embedded_user_input ).limit( 30 ).to_df() results = query_builder.to_arrow() dataframe = results.to_pandas() print ( dataframe ) chance_to_quit = input( "Press q to quit: " ) if chance_to_quit == "q": break break # print ( results ) # conversation = "\n".join(results['message'].tolist()) # prompt = open_file('prompt_response.txt').replace('<<CONVERSATION>>', conversation).replace('<<MESSAGE>>', user_input) # ai_completion_text = ai_completion(prompt) # timestamp = time() # timestring = timestamp_to_datetime(timestamp) # embedded_ai_completion = gpt3_embedding(ai_completion_text) # unique_id = str(uuid4()) # speaker = 'RAVEN' # thetimestamp = timestamp # message = ai_completion_text # timestring = timestring # Insert AI's response to lancedb # lanceTable.table.add([( unique_id, embedded_ai_completion, speaker, timestamp, timestring )]) # print('\n\nRAVEN: %s' % ai_completion_text)
[ "lancedb.connect" ]
[((12752, 12817), 'tensorflow_hub.load', 'hub.load', (['"""https://tfhub.dev/google/universal-sentence-encoder/4"""'], {}), "('https://tfhub.dev/google/universal-sentence-encoder/4')\n", (12760, 12817), True, 'import tensorflow_hub as hub\n'), ((720, 773), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': 'content', 'engine': 'engine'}), '(input=content, engine=engine)\n', (743, 773), False, 'import openai\n'), ((9917, 9954), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/fresh-lancedb"""'], {}), "('/tmp/fresh-lancedb')\n", (9932, 9954), False, 'import lancedb\n'), ((10396, 10431), 'pandas.DataFrame', 'pd.DataFrame', (['[initialization_data]'], {}), '([initialization_data])\n', (10408, 10431), True, 'import pandas as pd\n'), ((12464, 12484), 'pandas.DataFrame', 'pd.DataFrame', (['[data]'], {}), '([data])\n', (12476, 12484), True, 'import pandas as pd\n'), ((13049, 13055), 'time.time', 'time', ([], {}), '()\n', (13053, 13055), False, 'from time import time, sleep\n'), ((486, 528), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['unix_time'], {}), '(unix_time)\n', (517, 528), False, 'import datetime\n'), ((1132, 1324), 'openai.Completion.createChatCompletion', 'openai.Completion.createChatCompletion', ([], {'model': 'engine', 'prompt': 'prompt', 'temperature': 'temp', 'max_tokens': 'tokens', 'top_p': 'top_p', 'frequency_penalty': 'freq_pen', 'presence_penalty': 'pres_pen', 'stop': 'stop'}), '(model=engine, prompt=prompt,\n temperature=temp, max_tokens=tokens, top_p=top_p, frequency_penalty=\n freq_pen, presence_penalty=pres_pen, stop=stop)\n', (1170, 1324), False, 'import openai\n'), ((1526, 1555), 're.sub', 're.sub', (["'[\\r\\n]+'", '"""\n"""', 'text'], {}), "('[\\r\\n]+', '\\n', text)\n", (1532, 1555), False, 'import re\n'), ((1577, 1604), 're.sub', 're.sub', (['"""[\t ]+"""', '""" """', 'text'], {}), "('[\\t ]+', ' ', text)\n", (1583, 1604), False, 'import re\n'), ((11026, 11067), 'json.dumps', 'json.dumps', (['initialization_data'], {'indent': '(4)'}), '(initialization_data, indent=4)\n', (11036, 11067), False, 'import json\n'), ((13134, 13141), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (13139, 13141), False, 'from uuid import uuid4\n'), ((1646, 1652), 'time.time', 'time', ([], {}), '()\n', (1650, 1652), False, 'from time import time, sleep\n'), ((1672, 1699), 'os.path.exists', 'os.path.exists', (['"""gpt3_logs"""'], {}), "('gpt3_logs')\n", (1686, 1699), False, 'import os\n'), ((1719, 1743), 'os.makedirs', 'os.makedirs', (['"""gpt3_logs"""'], {}), "('gpt3_logs')\n", (1730, 1743), False, 'import os\n'), ((2072, 2080), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (2077, 2080), False, 'from time import time, sleep\n')]
import logging import chainlit as cl import lancedb import pandas as pd from langchain import LLMChain from langchain.agents.agent_toolkits import create_conversational_retrieval_agent from langchain.agents.agent_toolkits import create_retriever_tool from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from langchain.schema import SystemMessage from langchain.vectorstores import LanceDB OPENAI_MODEL = "gpt-3.5-turbo-16k" # "gpt-3.5-turbo" logger = logging.getLogger(__name__) recipes = pd.read_pickle("data/preprocessed/recipes.pkl") recipes.drop_duplicates(subset=["id"], inplace=True) # Must for this dataset recipes.drop("target", axis=1, inplace=True) uri = "dataset/chainlit-recipes-lancedb" db = lancedb.connect(uri) table = db.create_table("recipes", recipes, mode="overwrite") @cl.on_chat_start async def main(): embeddings = OpenAIEmbeddings() docsearch = await cl.make_async(LanceDB)(connection=table, embedding=embeddings) llm = ChatOpenAI(model=OPENAI_MODEL, temperature=0) tool = create_retriever_tool( docsearch.as_retriever(search_kwargs={"k": 10}), # kan kalle denne dynamisk for menyer "recommend_recipes_or_menus", "Recommends dinner recipes or menus based on user preferences. Invocations must be in norwegian.", ) tools = [tool] system_message = SystemMessage( content=( """You are a recommender chatting with the user to provide dinner recipe recommendation. You must follow the instructions below during chat. You can recommend either a recipe plan for a week or single recipes. If you do not have enough information about user preference, you should ask the user for his preference. If you have enough information about user preference, you can give recommendation. The recommendation list can contain items that the dialog mentioned before. Recommendations are given by using the tool recommend_recipes_or_menus with a query you think matches the conversation and user preferences. The query must be in norwegian.""" ) ) qa = create_conversational_retrieval_agent( llm, tools, system_message=system_message, remember_intermediate_steps=True, # max_tokens_limit=4000, verbose=True, ) # Store the chain in the user session cl.user_session.set("llm_chain", qa) @cl.on_message async def main(message: cl.Message): print(message) # Retrieve the chain from the user session llm_chain = cl.user_session.get("llm_chain") # type: LLMChain # Call the chain asynchronously res = await llm_chain.acall(message.content, callbacks=[cl.AsyncLangchainCallbackHandler()]) # Do any post-processing here await cl.Message(content=res["output"]).send() # HOW TO RUN: chainlit run app.py -w
[ "lancedb.connect" ]
[((498, 525), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (515, 525), False, 'import logging\n'), ((537, 584), 'pandas.read_pickle', 'pd.read_pickle', (['"""data/preprocessed/recipes.pkl"""'], {}), "('data/preprocessed/recipes.pkl')\n", (551, 584), True, 'import pandas as pd\n'), ((755, 775), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (770, 775), False, 'import lancedb\n'), ((893, 911), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (909, 911), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1007, 1052), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': 'OPENAI_MODEL', 'temperature': '(0)'}), '(model=OPENAI_MODEL, temperature=0)\n', (1017, 1052), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1375, 2106), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a recommender chatting with the user to provide dinner recipe recommendation. You must follow the instructions below during chat. You can recommend either a recipe plan for a week or single recipes. \n If you do not have enough information about user preference, you should ask the user for his preference.\n If you have enough information about user preference, you can give recommendation. The recommendation list can contain items that the dialog mentioned before.\n Recommendations are given by using the tool recommend_recipes_or_menus with a query you think matches the conversation and user preferences. The query must be in norwegian."""'}), '(content=\n """You are a recommender chatting with the user to provide dinner recipe recommendation. You must follow the instructions below during chat. You can recommend either a recipe plan for a week or single recipes. \n If you do not have enough information about user preference, you should ask the user for his preference.\n If you have enough information about user preference, you can give recommendation. The recommendation list can contain items that the dialog mentioned before.\n Recommendations are given by using the tool recommend_recipes_or_menus with a query you think matches the conversation and user preferences. The query must be in norwegian."""\n )\n', (1388, 2106), False, 'from langchain.schema import SystemMessage\n'), ((2145, 2278), 'langchain.agents.agent_toolkits.create_conversational_retrieval_agent', 'create_conversational_retrieval_agent', (['llm', 'tools'], {'system_message': 'system_message', 'remember_intermediate_steps': '(True)', 'verbose': '(True)'}), '(llm, tools, system_message=\n system_message, remember_intermediate_steps=True, verbose=True)\n', (2182, 2278), False, 'from langchain.agents.agent_toolkits import create_conversational_retrieval_agent\n'), ((2401, 2437), 'chainlit.user_session.set', 'cl.user_session.set', (['"""llm_chain"""', 'qa'], {}), "('llm_chain', qa)\n", (2420, 2437), True, 'import chainlit as cl\n'), ((2574, 2606), 'chainlit.user_session.get', 'cl.user_session.get', (['"""llm_chain"""'], {}), "('llm_chain')\n", (2593, 2606), True, 'import chainlit as cl\n'), ((934, 956), 'chainlit.make_async', 'cl.make_async', (['LanceDB'], {}), '(LanceDB)\n', (947, 956), True, 'import chainlit as cl\n'), ((2804, 2837), 'chainlit.Message', 'cl.Message', ([], {'content': "res['output']"}), "(content=res['output'])\n", (2814, 2837), True, 'import chainlit as cl\n'), ((2722, 2756), 'chainlit.AsyncLangchainCallbackHandler', 'cl.AsyncLangchainCallbackHandler', ([], {}), '()\n', (2754, 2756), True, 'import chainlit as cl\n')]
import streamlit as st import pandas as pd import json import requests from pathlib import Path from datetime import datetime from jinja2 import Template import lancedb import sqlite3 from services.lancedb_notes import IndexDocumentsNotes st.set_page_config(layout='wide', page_title='Notes') @st.cache_data def summarize_text(text, prompt): system_prompt = '\n'.join(prompt['system_prompt']) content_prompt = Template('\n'.join(prompt['content_prompt'])) content_prompt = content_prompt.render({'text':text}) results = requests.post('http://localhost:8001/summarize', json={'system_prompt':system_prompt, 'content_prompt':content_prompt}) return results.json()['summary'] # @st.cache_data def keyphrase_text(text, strength_cutoff=0.3): results = requests.post('http://localhost:8001/phrases', json={'text':text}) results = results.json() phrases = list() for phrase, strength in zip(results['phrases'], results['strengths']): if strength >= strength_cutoff: phrases.append(phrase) return phrases @st.cache_data def entities_extract(text): results = requests.post('http://localhost:8001/ner', json={'text':text}) # st.write(results.json()['entities']) return results.json()['entities'] @st.cache_data def store_note(note): with open('tmp_json.json','w') as f: json.dump(note, f) return note notes_folder = Path('data/notes') collections_folder = Path('data/collections') tmp_folder = Path('data/tmp') config_folder = Path('data/config') with open(config_folder.joinpath('prompt_templates.json'), 'r') as f: prompt_options = json.load(f) index_folder = Path('indexes') sqlite_location = Path('data/indexes/documents.sqlite') lance_index = lancedb.connect(index_folder) available_indexes = lance_index.table_names() selected_collections = st.multiselect('Which note collections to load', options=available_indexes) index_to_search = st.selectbox(label='Available Indexes', options=available_indexes) prompt_option_choices = [x['Name'] for x in prompt_options] for collection_idx, collection_name in enumerate(selected_collections): sqlite_conn = sqlite3.connect(sqlite_location) notes = sqlite_conn.execute(f"""SELECT * from {collection_name}""").fetchall() fields = sqlite_conn.execute(f"PRAGMA table_info({collection_name})").fetchall() fields = [x[1] for x in fields] notes = [dict(zip(fields, note)) for note in notes] for note in notes: note['metadata'] = json.loads(note['metadata']) with st.expander(collection_name): st.markdown("""Batch processing is possible. Select what you want and whether you want to overwrite the files or save to a new collection.\n\n*If you want to overwrite, leave the collection name as is.*""") batch_phrase, batch_entity, batch_summary = st.columns([1,1,1]) with batch_phrase: batch_phrase_extract = st.toggle('Batch Phrase Extract', key=f'batch_phrase_extract_{collection_name}') with batch_entity: batch_entity_extract = st.toggle('Batch Entity Extract', key=f'batch_entity_extract_{collection_name}') with batch_summary: batch_summary_extract = st.toggle('Batch Summary Extract', key=f'batch_summary_extract_{collection_name}') selected_prompt_name = st.selectbox("Which prompt template?", prompt_option_choices, index=0) selected_prompt = prompt_options[prompt_option_choices.index(selected_prompt_name)] print(selected_prompt) save_collection_name = st.text_input('Saved Notes Collection Name', value=collection_name, key=f'batch_collection_save_{collection_name}') if st.button('Batch Process!', key=f'batch_process_{collection_name}'): progress_text = "Processing Progress (May take some time if summarizing)" batch_progress_bar = st.progress(0, text=progress_text) for i, note in enumerate(notes, start=1): if batch_entity_extract: entities = entities_extract(note['text']) note['entities'] = entities if batch_summary_extract: note['summary'] = summarize_text(note['text'], selected_prompt).strip() batch_progress_bar.progress(i/len(notes), text=progress_text) st.write("Collection Processed!") with st.container(): for index, note in enumerate(notes): st.markdown(f"**:blue[{note['title']}]**") if st.toggle('Show Note', key=f'show_note_{collection_name}_{index}'): text_col, note_col = st.columns([0.6,0.4]) with text_col: st.markdown(f"**Date:** {note['date']}") st.markdown(f"**Title:** {note['title']}") if 'tags' in note and len(note['tags']) > 0: st.markdown(f"**Tags:** {note['tags']}") if 'phrases' in note['metadata']: st.markdown(f"**Keyphrases:** {note['metadata']['phrases']}") if 'entities' in note['metadata']: st.markdown(f"Entities:** {note['metadata']['entities']}") st.markdown("**Text**") st.markdown(note['text'].replace('\n','\n\n')) st.json(note['metadata'], expanded=False) with note_col: ## Create session state for the text ## add button to make rest api call to populate and then update text ## Add button to save the note save_note, local_collection = st.columns([1,3]) with save_note: _save_note = st.button('Save', key=f'save_note_{collection_name}_{index}') ### Keyphrase extraction using Keybert/Keyphrase Vectorizers/Spacy NLP if st.toggle('\nPhrase Extract', key=f'phrase_extract_{collection_name}_{index}'): phrases = keyphrase_text(note['text']) if 'phrases' not in note['metadata']: note['metadata']['phrases'] = ','.join(phrases) else: note['metadata']['phrases'] = note['metadata']['phrases'] +'\n' + ','.join(phrases) if 'phrases' in note['metadata']: note['metadata']['phrases'] = st.text_area('Keyphrases', value=note['metadata']['phrases'], height=100, key=f'phrase_input_{collection_name}_{index}') else: note['metadata']['phrases'] = st.text_area('Keyphrases', value='', height=100, key=f'phrase_input_{collection_name}_{index}') ### Entity extraction using Spacy NLP backend if st.toggle('Entity Extract', key=f'entity_extract_{collection_name}_{index}'): if 'entities' not in note['metadata']: note['metadata']['entities'] = dict() entities = entities_extract(note['text']) note['metadata']['entities'].update(entities) # st.write(note['metadata']['entities']) entities_formatted = '' if 'entities' in note['metadata']: entities_formatted = '' for ent_type, ents in note['metadata']['entities'].items(): ents_text = ', '.join(ents) entities_formatted += f'{ent_type}: {ents_text};\n\n' entities_formatted = entities_formatted.strip() entities_formatted = st.text_area('Entities', value=entities_formatted, height=200, key=f'entity_input_{collection_name}_{index}') else: entities = st.text_area('Entities', value='', height=200, key=f'entity_input_{collection_name}_{index}') note_json = dict() for entity in entities_formatted.split(';'): if len(entity) == 0: continue entity_type, entity_values = entity.split(':') entity_values = [x.strip() for x in entity_values.split(',')] note_json[entity_type.strip()] = entity_values note['metadata']['entities'] = note_json #### Summarization using Llama CPP backend selected_prompt_name = st.selectbox("Which prompt template?", prompt_option_choices, index=0, key=f'doc_prompt_template_{collection_name}_{index}') selected_prompt = prompt_options[prompt_option_choices.index(selected_prompt_name)] if st.toggle('Summarize', key=f'summary_extract_{collection_name}_{index}'): if 'summary' not in note['metadata']: note['metadata']['summary'] = '' summary = summarize_text(note['text'], selected_prompt).strip() note['metadata']['summary'] = summary if 'summary' in note['metadata']: note['metadata']['summary'] = st.text_area('Summary', value=note['metadata']['summary'], height=500, key=f'summary_input_{collection_name}_{index}') else: note['metadata']['summary'] = st.text_area('Summary', value='', height=500, key=f'summary_input_{collection_name}_{index}') if _save_note: note['metadata'] = json.dumps(note['metadata']) lance_table = lance_index.open_table(collection_name) st.write(note['uuid']) # LanceDB current can't (or more likely I don't know) how to update its metadata fields # Sqlite will be used instead as it's the document repository anyways # To create searchable notes, I'll have to think up something with lancedb_notes # lance_table.update(where=f"uuid =' {note['uuid']}'", values={'metadata':note['metadata']}) sqlite_conn.execute(f"""UPDATE {collection_name} SET metadata='{note['metadata'].replace("'","''")}' WHERE uuid='{note['uuid']}'""") sqlite_conn.commit() with st.sidebar: new_collection_name = st.text_input(label='New Collection Name', value='') if st.button('Create Collection'): collections_folder.joinpath(new_collection_name).mkdir(parents=True, exist_ok=True) st.rerun()
[ "lancedb.connect" ]
[((240, 293), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""', 'page_title': '"""Notes"""'}), "(layout='wide', page_title='Notes')\n", (258, 293), True, 'import streamlit as st\n'), ((1504, 1522), 'pathlib.Path', 'Path', (['"""data/notes"""'], {}), "('data/notes')\n", (1508, 1522), False, 'from pathlib import Path\n'), ((1544, 1568), 'pathlib.Path', 'Path', (['"""data/collections"""'], {}), "('data/collections')\n", (1548, 1568), False, 'from pathlib import Path\n'), ((1582, 1598), 'pathlib.Path', 'Path', (['"""data/tmp"""'], {}), "('data/tmp')\n", (1586, 1598), False, 'from pathlib import Path\n'), ((1615, 1634), 'pathlib.Path', 'Path', (['"""data/config"""'], {}), "('data/config')\n", (1619, 1634), False, 'from pathlib import Path\n'), ((1756, 1771), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (1760, 1771), False, 'from pathlib import Path\n'), ((1790, 1827), 'pathlib.Path', 'Path', (['"""data/indexes/documents.sqlite"""'], {}), "('data/indexes/documents.sqlite')\n", (1794, 1827), False, 'from pathlib import Path\n'), ((1843, 1872), 'lancedb.connect', 'lancedb.connect', (['index_folder'], {}), '(index_folder)\n', (1858, 1872), False, 'import lancedb\n'), ((1942, 2017), 'streamlit.multiselect', 'st.multiselect', (['"""Which note collections to load"""'], {'options': 'available_indexes'}), "('Which note collections to load', options=available_indexes)\n", (1956, 2017), True, 'import streamlit as st\n'), ((2037, 2103), 'streamlit.selectbox', 'st.selectbox', ([], {'label': '"""Available Indexes"""', 'options': 'available_indexes'}), "(label='Available Indexes', options=available_indexes)\n", (2049, 2103), True, 'import streamlit as st\n'), ((557, 682), 'requests.post', 'requests.post', (['"""http://localhost:8001/summarize"""'], {'json': "{'system_prompt': system_prompt, 'content_prompt': content_prompt}"}), "('http://localhost:8001/summarize', json={'system_prompt':\n system_prompt, 'content_prompt': content_prompt})\n", (570, 682), False, 'import requests\n'), ((821, 888), 'requests.post', 'requests.post', (['"""http://localhost:8001/phrases"""'], {'json': "{'text': text}"}), "('http://localhost:8001/phrases', json={'text': text})\n", (834, 888), False, 'import requests\n'), ((1193, 1256), 'requests.post', 'requests.post', (['"""http://localhost:8001/ner"""'], {'json': "{'text': text}"}), "('http://localhost:8001/ner', json={'text': text})\n", (1206, 1256), False, 'import requests\n'), ((1726, 1738), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1735, 1738), False, 'import json\n'), ((2256, 2288), 'sqlite3.connect', 'sqlite3.connect', (['sqlite_location'], {}), '(sqlite_location)\n', (2271, 2288), False, 'import sqlite3\n'), ((11349, 11401), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""New Collection Name"""', 'value': '""""""'}), "(label='New Collection Name', value='')\n", (11362, 11401), True, 'import streamlit as st\n'), ((11409, 11439), 'streamlit.button', 'st.button', (['"""Create Collection"""'], {}), "('Create Collection')\n", (11418, 11439), True, 'import streamlit as st\n'), ((1452, 1470), 'json.dump', 'json.dump', (['note', 'f'], {}), '(note, f)\n', (1461, 1470), False, 'import json\n'), ((2599, 2627), 'json.loads', 'json.loads', (["note['metadata']"], {}), "(note['metadata'])\n", (2609, 2627), False, 'import json\n'), ((2638, 2666), 'streamlit.expander', 'st.expander', (['collection_name'], {}), '(collection_name)\n', (2649, 2666), True, 'import streamlit as st\n'), ((2676, 2890), 'streamlit.markdown', 'st.markdown', (['"""Batch processing is possible. Select what you want and whether you want to overwrite the files or save to a new collection.\n\n*If you want to overwrite, leave the collection name as is.*"""'], {}), '(\n """Batch processing is possible. Select what you want and whether you want to overwrite the files or save to a new collection.\n\n*If you want to overwrite, leave the collection name as is.*"""\n )\n', (2687, 2890), True, 'import streamlit as st\n'), ((2935, 2956), 'streamlit.columns', 'st.columns', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (2945, 2956), True, 'import streamlit as st\n'), ((3420, 3490), 'streamlit.selectbox', 'st.selectbox', (['"""Which prompt template?"""', 'prompt_option_choices'], {'index': '(0)'}), "('Which prompt template?', prompt_option_choices, index=0)\n", (3432, 3490), True, 'import streamlit as st\n'), ((3645, 3765), 'streamlit.text_input', 'st.text_input', (['"""Saved Notes Collection Name"""'], {'value': 'collection_name', 'key': 'f"""batch_collection_save_{collection_name}"""'}), "('Saved Notes Collection Name', value=collection_name, key=\n f'batch_collection_save_{collection_name}')\n", (3658, 3765), True, 'import streamlit as st\n'), ((3773, 3840), 'streamlit.button', 'st.button', (['"""Batch Process!"""'], {'key': 'f"""batch_process_{collection_name}"""'}), "('Batch Process!', key=f'batch_process_{collection_name}')\n", (3782, 3840), True, 'import streamlit as st\n'), ((11541, 11551), 'streamlit.rerun', 'st.rerun', ([], {}), '()\n', (11549, 11551), True, 'import streamlit as st\n'), ((3017, 3102), 'streamlit.toggle', 'st.toggle', (['"""Batch Phrase Extract"""'], {'key': 'f"""batch_phrase_extract_{collection_name}"""'}), "('Batch Phrase Extract', key=f'batch_phrase_extract_{collection_name}'\n )\n", (3026, 3102), True, 'import streamlit as st\n'), ((3160, 3245), 'streamlit.toggle', 'st.toggle', (['"""Batch Entity Extract"""'], {'key': 'f"""batch_entity_extract_{collection_name}"""'}), "('Batch Entity Extract', key=f'batch_entity_extract_{collection_name}'\n )\n", (3169, 3245), True, 'import streamlit as st\n'), ((3305, 3392), 'streamlit.toggle', 'st.toggle', (['"""Batch Summary Extract"""'], {'key': 'f"""batch_summary_extract_{collection_name}"""'}), "('Batch Summary Extract', key=\n f'batch_summary_extract_{collection_name}')\n", (3314, 3392), True, 'import streamlit as st\n'), ((3961, 3995), 'streamlit.progress', 'st.progress', (['(0)'], {'text': 'progress_text'}), '(0, text=progress_text)\n', (3972, 3995), True, 'import streamlit as st\n'), ((4427, 4460), 'streamlit.write', 'st.write', (['"""Collection Processed!"""'], {}), "('Collection Processed!')\n", (4435, 4460), True, 'import streamlit as st\n'), ((4475, 4489), 'streamlit.container', 'st.container', ([], {}), '()\n', (4487, 4489), True, 'import streamlit as st\n'), ((4556, 4598), 'streamlit.markdown', 'st.markdown', (['f"""**:blue[{note[\'title\']}]**"""'], {}), '(f"**:blue[{note[\'title\']}]**")\n', (4567, 4598), True, 'import streamlit as st\n'), ((4618, 4684), 'streamlit.toggle', 'st.toggle', (['"""Show Note"""'], {'key': 'f"""show_note_{collection_name}_{index}"""'}), "('Show Note', key=f'show_note_{collection_name}_{index}')\n", (4627, 4684), True, 'import streamlit as st\n'), ((4727, 4749), 'streamlit.columns', 'st.columns', (['[0.6, 0.4]'], {}), '([0.6, 0.4])\n', (4737, 4749), True, 'import streamlit as st\n'), ((4808, 4848), 'streamlit.markdown', 'st.markdown', (['f"""**Date:** {note[\'date\']}"""'], {}), '(f"**Date:** {note[\'date\']}")\n', (4819, 4848), True, 'import streamlit as st\n'), ((4873, 4915), 'streamlit.markdown', 'st.markdown', (['f"""**Title:** {note[\'title\']}"""'], {}), '(f"**Title:** {note[\'title\']}")\n', (4884, 4915), True, 'import streamlit as st\n'), ((5373, 5396), 'streamlit.markdown', 'st.markdown', (['"""**Text**"""'], {}), "('**Text**')\n", (5384, 5396), True, 'import streamlit as st\n'), ((5492, 5533), 'streamlit.json', 'st.json', (["note['metadata']"], {'expanded': '(False)'}), "(note['metadata'], expanded=False)\n", (5499, 5533), True, 'import streamlit as st\n'), ((5833, 5851), 'streamlit.columns', 'st.columns', (['[1, 3]'], {}), '([1, 3])\n', (5843, 5851), True, 'import streamlit as st\n'), ((6118, 6196), 'streamlit.toggle', 'st.toggle', (['"""\nPhrase Extract"""'], {'key': 'f"""phrase_extract_{collection_name}_{index}"""'}), "('\\nPhrase Extract', key=f'phrase_extract_{collection_name}_{index}')\n", (6127, 6196), True, 'import streamlit as st\n'), ((7211, 7287), 'streamlit.toggle', 'st.toggle', (['"""Entity Extract"""'], {'key': 'f"""entity_extract_{collection_name}_{index}"""'}), "('Entity Extract', key=f'entity_extract_{collection_name}_{index}')\n", (7220, 7287), True, 'import streamlit as st\n'), ((9176, 9305), 'streamlit.selectbox', 'st.selectbox', (['"""Which prompt template?"""', 'prompt_option_choices'], {'index': '(0)', 'key': 'f"""doc_prompt_template_{collection_name}_{index}"""'}), "('Which prompt template?', prompt_option_choices, index=0, key=\n f'doc_prompt_template_{collection_name}_{index}')\n", (9188, 9305), True, 'import streamlit as st\n'), ((9496, 9568), 'streamlit.toggle', 'st.toggle', (['"""Summarize"""'], {'key': 'f"""summary_extract_{collection_name}_{index}"""'}), "('Summarize', key=f'summary_extract_{collection_name}_{index}')\n", (9505, 9568), True, 'import streamlit as st\n'), ((5013, 5053), 'streamlit.markdown', 'st.markdown', (['f"""**Tags:** {note[\'tags\']}"""'], {}), '(f"**Tags:** {note[\'tags\']}")\n', (5024, 5053), True, 'import streamlit as st\n'), ((5140, 5201), 'streamlit.markdown', 'st.markdown', (['f"""**Keyphrases:** {note[\'metadata\'][\'phrases\']}"""'], {}), '(f"**Keyphrases:** {note[\'metadata\'][\'phrases\']}")\n', (5151, 5201), True, 'import streamlit as st\n'), ((5289, 5347), 'streamlit.markdown', 'st.markdown', (['f"""Entities:** {note[\'metadata\'][\'entities\']}"""'], {}), '(f"Entities:** {note[\'metadata\'][\'entities\']}")\n', (5300, 5347), True, 'import streamlit as st\n'), ((5933, 5994), 'streamlit.button', 'st.button', (['"""Save"""'], {'key': 'f"""save_note_{collection_name}_{index}"""'}), "('Save', key=f'save_note_{collection_name}_{index}')\n", (5942, 5994), True, 'import streamlit as st\n'), ((6679, 6803), 'streamlit.text_area', 'st.text_area', (['"""Keyphrases"""'], {'value': "note['metadata']['phrases']", 'height': '(100)', 'key': 'f"""phrase_input_{collection_name}_{index}"""'}), "('Keyphrases', value=note['metadata']['phrases'], height=100,\n key=f'phrase_input_{collection_name}_{index}')\n", (6691, 6803), True, 'import streamlit as st\n'), ((6953, 7053), 'streamlit.text_area', 'st.text_area', (['"""Keyphrases"""'], {'value': '""""""', 'height': '(100)', 'key': 'f"""phrase_input_{collection_name}_{index}"""'}), "('Keyphrases', value='', height=100, key=\n f'phrase_input_{collection_name}_{index}')\n", (6965, 7053), True, 'import streamlit as st\n'), ((8153, 8267), 'streamlit.text_area', 'st.text_area', (['"""Entities"""'], {'value': 'entities_formatted', 'height': '(200)', 'key': 'f"""entity_input_{collection_name}_{index}"""'}), "('Entities', value=entities_formatted, height=200, key=\n f'entity_input_{collection_name}_{index}')\n", (8165, 8267), True, 'import streamlit as st\n'), ((8396, 8494), 'streamlit.text_area', 'st.text_area', (['"""Entities"""'], {'value': '""""""', 'height': '(200)', 'key': 'f"""entity_input_{collection_name}_{index}"""'}), "('Entities', value='', height=200, key=\n f'entity_input_{collection_name}_{index}')\n", (8408, 8494), True, 'import streamlit as st\n'), ((9975, 10098), 'streamlit.text_area', 'st.text_area', (['"""Summary"""'], {'value': "note['metadata']['summary']", 'height': '(500)', 'key': 'f"""summary_input_{collection_name}_{index}"""'}), "('Summary', value=note['metadata']['summary'], height=500, key=\n f'summary_input_{collection_name}_{index}')\n", (9987, 10098), True, 'import streamlit as st\n'), ((10245, 10343), 'streamlit.text_area', 'st.text_area', (['"""Summary"""'], {'value': '""""""', 'height': '(500)', 'key': 'f"""summary_input_{collection_name}_{index}"""'}), "('Summary', value='', height=500, key=\n f'summary_input_{collection_name}_{index}')\n", (10257, 10343), True, 'import streamlit as st\n'), ((10489, 10517), 'json.dumps', 'json.dumps', (["note['metadata']"], {}), "(note['metadata'])\n", (10499, 10517), False, 'import json\n'), ((10628, 10650), 'streamlit.write', 'st.write', (["note['uuid']"], {}), "(note['uuid'])\n", (10636, 10650), True, 'import streamlit as st\n')]
from langchain.vectorstores import LanceDB import lancedb from langchain.embeddings.openai import OpenAIEmbeddings from langchain.chat_models import ChatOpenAI from langchain.chains import RetrievalQA # load agents and tools modules import pandas as pd from io import StringIO from langchain.tools.python.tool import PythonAstREPLTool from langchain.agents import initialize_agent, Tool from langchain.agents import AgentType from langchain import LLMMathChain from langchain.embeddings import HuggingFaceEmbeddings from langchain.document_loaders import PyPDFLoader, DirectoryLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.llms import CTransformers from langchain.chains import RetrievalQA from langchain.document_loaders import TextLoader from scripts.load_llm import llm_openai class HRChatbot: def __init__(self, df_path, text_data_path, user): self.df_path = df_path self.text_data_path = text_data_path self.user = user self.llm = llm_openai().llm self.df = None self.timekeeping_policy = None self.agent = None self.load_data() self.initialize_tools() def load_data(self): # Load text documents loader = TextLoader(self.text_data_path) docs = loader.load() # Split documents into smaller chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=100, chunk_overlap=30, ) documents = text_splitter.split_documents(docs) # Create Hugging Face embeddings embeddings = HuggingFaceEmbeddings( model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={"device": "cpu"}, ) # create lancedb vectordb db = lancedb.connect("/tmp/lancedb") table = db.create_table( "pandas_docs", data=[ { "vector": embeddings.embed_query("Hello World"), "text": "Hello World", "id": "1", } ], mode="overwrite", ) self.vectorstore = LanceDB.from_documents( documents, embeddings, connection=table ) self.df = pd.read_csv(self.df_path) def initialize_tools(self): # Initialize retrieval question-answering model # Initialize tools for the agent timekeeping_policy = RetrievalQA.from_chain_type( llm=self.llm, chain_type="stuff", retriever=self.vectorstore.as_retriever(), ) python = PythonAstREPLTool(locals={"df": self.df}) calculator = LLMMathChain.from_llm(llm=self.llm, verbose=True) # Set up variables and descriptions for the tools user = self.user df_columns = self.df.columns.to_list() tools = [ Tool( name="Timekeeping Policies", func=timekeeping_policy.run, description=""" Useful for when you need to answer questions about employee timekeeping policies. <user>: What is the policy on unused vacation leave? <assistant>: I need to check the timekeeping policies to answer this question. <assistant>: Action: Timekeeping Policies <assistant>: Action Input: Vacation Leave Policy - Unused Leave ... """, ), Tool( name="Employee Data", func=python.run, description=f""" Useful for when you need to answer questions about employee data stored in pandas dataframe 'df'. Run python pandas operations on 'df' to help you get the right answer. 'df' has the following columns: {df_columns} <user>: How many Sick Leave do I have left? <assistant>: df[df['name'] == '{user}']['vacation_leave'] <assistant>: You have n vacation_leave left. """, ), Tool( name="Calculator", func=calculator.run, description=f""" Useful when you need to do math operations or arithmetic operations. """, ), ] # Initialize the LLM agent agent_kwargs = { "prefix": f"You are friendly HR assistant. You are tasked to assist the current user: {user} on questions related to HR. ..." } self.timekeeping_policy = timekeeping_policy self.agent = initialize_agent( tools, self.llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, agent_kwargs=agent_kwargs, ) def get_response(self, user_input): response = self.agent.run(user_input) return response
[ "lancedb.connect" ]
[((1254, 1285), 'langchain.document_loaders.TextLoader', 'TextLoader', (['self.text_data_path'], {}), '(self.text_data_path)\n', (1264, 1285), False, 'from langchain.document_loaders import TextLoader\n'), ((1386, 1450), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(100)', 'chunk_overlap': '(30)'}), '(chunk_size=100, chunk_overlap=30)\n', (1416, 1450), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1605, 1715), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-MiniLM-L6-v2"""', 'model_kwargs': "{'device': 'cpu'}"}), "(model_name='sentence-transformers/all-MiniLM-L6-v2',\n model_kwargs={'device': 'cpu'})\n", (1626, 1715), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((1795, 1826), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (1810, 1826), False, 'import lancedb\n'), ((2167, 2230), 'langchain.vectorstores.LanceDB.from_documents', 'LanceDB.from_documents', (['documents', 'embeddings'], {'connection': 'table'}), '(documents, embeddings, connection=table)\n', (2189, 2230), False, 'from langchain.vectorstores import LanceDB\n'), ((2272, 2297), 'pandas.read_csv', 'pd.read_csv', (['self.df_path'], {}), '(self.df_path)\n', (2283, 2297), True, 'import pandas as pd\n'), ((2626, 2667), 'langchain.tools.python.tool.PythonAstREPLTool', 'PythonAstREPLTool', ([], {'locals': "{'df': self.df}"}), "(locals={'df': self.df})\n", (2643, 2667), False, 'from langchain.tools.python.tool import PythonAstREPLTool\n'), ((2689, 2738), 'langchain.LLMMathChain.from_llm', 'LLMMathChain.from_llm', ([], {'llm': 'self.llm', 'verbose': '(True)'}), '(llm=self.llm, verbose=True)\n', (2710, 2738), False, 'from langchain import LLMMathChain\n'), ((4670, 4794), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'self.llm'], {'agent': 'AgentType.ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(True)', 'agent_kwargs': 'agent_kwargs'}), '(tools, self.llm, agent=AgentType.\n ZERO_SHOT_REACT_DESCRIPTION, verbose=True, agent_kwargs=agent_kwargs)\n', (4686, 4794), False, 'from langchain.agents import initialize_agent, Tool\n'), ((1018, 1030), 'scripts.load_llm.llm_openai', 'llm_openai', ([], {}), '()\n', (1028, 1030), False, 'from scripts.load_llm import llm_openai\n'), ((2900, 3430), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Timekeeping Policies"""', 'func': 'timekeeping_policy.run', 'description': '"""\n Useful for when you need to answer questions about employee timekeeping policies.\n\n <user>: What is the policy on unused vacation leave?\n <assistant>: I need to check the timekeeping policies to answer this question.\n <assistant>: Action: Timekeeping Policies\n <assistant>: Action Input: Vacation Leave Policy - Unused Leave\n ...\n """'}), '(name=\'Timekeeping Policies\', func=timekeeping_policy.run, description=\n """\n Useful for when you need to answer questions about employee timekeeping policies.\n\n <user>: What is the policy on unused vacation leave?\n <assistant>: I need to check the timekeeping policies to answer this question.\n <assistant>: Action: Timekeeping Policies\n <assistant>: Action Input: Vacation Leave Policy - Unused Leave\n ...\n """\n )\n', (2904, 3430), False, 'from langchain.agents import initialize_agent, Tool\n'), ((3497, 4078), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Employee Data"""', 'func': 'python.run', 'description': 'f"""\n Useful for when you need to answer questions about employee data stored in pandas dataframe \'df\'. \n Run python pandas operations on \'df\' to help you get the right answer.\n \'df\' has the following columns: {df_columns}\n \n <user>: How many Sick Leave do I have left?\n <assistant>: df[df[\'name\'] == \'{user}\'][\'vacation_leave\']\n <assistant>: You have n vacation_leave left. \n """'}), '(name=\'Employee Data\', func=python.run, description=\n f"""\n Useful for when you need to answer questions about employee data stored in pandas dataframe \'df\'. \n Run python pandas operations on \'df\' to help you get the right answer.\n \'df\' has the following columns: {df_columns}\n \n <user>: How many Sick Leave do I have left?\n <assistant>: df[df[\'name\'] == \'{user}\'][\'vacation_leave\']\n <assistant>: You have n vacation_leave left. \n """\n )\n', (3501, 4078), False, 'from langchain.agents import initialize_agent, Tool\n'), ((4145, 4322), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Calculator"""', 'func': 'calculator.run', 'description': 'f"""\n Useful when you need to do math operations or arithmetic operations.\n """'}), '(name=\'Calculator\', func=calculator.run, description=\n f"""\n Useful when you need to do math operations or arithmetic operations.\n """\n )\n', (4149, 4322), False, 'from langchain.agents import initialize_agent, Tool\n')]
import lancedb import numpy as np import pandas as pd global data data = [] global table table = None def get_recommendations(title): pd_data = pd.DataFrame(data) # Table Search result = ( table.search(pd_data[pd_data["title"] == title]["vector"].values[0]) .limit(5) .to_pandas() ) # Get IMDB links links = pd.read_csv( "./ml-latest-small/links.csv", header=0, names=["movie id", "imdb id", "tmdb id"], converters={"imdb id": str}, ) ret = result["title"].values.tolist() # Loop to add links for i in range(len(ret)): link = links[links["movie id"] == result["id"].values[i]]["imdb id"].values[0] link = "https://www.imdb.com/title/tt" + link ret[i] = [ret[i], link] return ret if __name__ == "__main__": # Load and prepare data ratings = pd.read_csv( "./ml-latest-small/ratings.csv", header=None, names=["user id", "movie id", "rating", "timestamp"], ) ratings = ratings.drop(columns=["timestamp"]) ratings = ratings.drop(0) ratings["rating"] = ratings["rating"].values.astype(np.float32) ratings["user id"] = ratings["user id"].values.astype(np.int32) ratings["movie id"] = ratings["movie id"].values.astype(np.int32) reviewmatrix = ratings.pivot( index="user id", columns="movie id", values="rating" ).fillna(0) # SVD matrix = reviewmatrix.values u, s, vh = np.linalg.svd(matrix, full_matrices=False) vectors = np.rot90(np.fliplr(vh)) # Metadata movies = pd.read_csv( "./ml-latest-small/movies.csv", header=0, names=["movie id", "title", "genres"] ) movies = movies[movies["movie id"].isin(reviewmatrix.columns)] data = [] for i in range(len(movies)): data.append( { "id": movies.iloc[i]["movie id"], "title": movies.iloc[i]["title"], "vector": vectors[i], "genre": movies.iloc[i]["genres"], } ) # Connect to LanceDB db_url = "your-project-name" api_key = "sk_..." region = "us-east-1" db = lancedb.connect(db_url, api_key=api_key, region=region) try: table = db.create_table("movie_set", data=data) except: table = db.open_table("movie_set") print(get_recommendations("Moana (2016)")) print(get_recommendations("Rogue One: A Star Wars Story (2016)"))
[ "lancedb.connect" ]
[((152, 170), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (164, 170), True, 'import pandas as pd\n'), ((361, 488), 'pandas.read_csv', 'pd.read_csv', (['"""./ml-latest-small/links.csv"""'], {'header': '(0)', 'names': "['movie id', 'imdb id', 'tmdb id']", 'converters': "{'imdb id': str}"}), "('./ml-latest-small/links.csv', header=0, names=['movie id',\n 'imdb id', 'tmdb id'], converters={'imdb id': str})\n", (372, 488), True, 'import pandas as pd\n'), ((880, 995), 'pandas.read_csv', 'pd.read_csv', (['"""./ml-latest-small/ratings.csv"""'], {'header': 'None', 'names': "['user id', 'movie id', 'rating', 'timestamp']"}), "('./ml-latest-small/ratings.csv', header=None, names=['user id',\n 'movie id', 'rating', 'timestamp'])\n", (891, 995), True, 'import pandas as pd\n'), ((1480, 1522), 'numpy.linalg.svd', 'np.linalg.svd', (['matrix'], {'full_matrices': '(False)'}), '(matrix, full_matrices=False)\n', (1493, 1522), True, 'import numpy as np\n'), ((1591, 1687), 'pandas.read_csv', 'pd.read_csv', (['"""./ml-latest-small/movies.csv"""'], {'header': '(0)', 'names': "['movie id', 'title', 'genres']"}), "('./ml-latest-small/movies.csv', header=0, names=['movie id',\n 'title', 'genres'])\n", (1602, 1687), True, 'import pandas as pd\n'), ((2178, 2233), 'lancedb.connect', 'lancedb.connect', (['db_url'], {'api_key': 'api_key', 'region': 'region'}), '(db_url, api_key=api_key, region=region)\n', (2193, 2233), False, 'import lancedb\n'), ((1547, 1560), 'numpy.fliplr', 'np.fliplr', (['vh'], {}), '(vh)\n', (1556, 1560), True, 'import numpy as np\n')]
import lancedb from datasets import load_dataset import pandas as pd import numpy as np from hyperdemocracy.embedding.models import BGESmallEn class Lance: def __init__(self): self.model = BGESmallEn() uri = "data/sample-lancedb" self.db = lancedb.connect(uri) def create_table(self): ds = load_dataset("hyperdemocracy/uscb.s1024.o256.bge-small-en", split="train") df = pd.DataFrame(ds) df.rename(columns={"vec": "vector"}, inplace=True) table = self.db.create_table("congress", data=df) return def query_table(self, queries, n=5) -> pd.DataFrame: q_embeddings = self.model.model.encode_queries(queries) table = self.db.open_table("congress") result = table.search(q_embeddings.reshape(384,)).limit(n).to_df() return result
[ "lancedb.connect" ]
[((203, 215), 'hyperdemocracy.embedding.models.BGESmallEn', 'BGESmallEn', ([], {}), '()\n', (213, 215), False, 'from hyperdemocracy.embedding.models import BGESmallEn\n'), ((270, 290), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (285, 290), False, 'import lancedb\n'), ((333, 407), 'datasets.load_dataset', 'load_dataset', (['"""hyperdemocracy/uscb.s1024.o256.bge-small-en"""'], {'split': '"""train"""'}), "('hyperdemocracy/uscb.s1024.o256.bge-small-en', split='train')\n", (345, 407), False, 'from datasets import load_dataset\n'), ((421, 437), 'pandas.DataFrame', 'pd.DataFrame', (['ds'], {}), '(ds)\n', (433, 437), True, 'import pandas as pd\n')]
import lancedb uri = "test_data" db = lancedb.connect(uri) tbl = db.create_table("my_table", data=[{"vector": [3.1, 4.1], "item": "foo", "price": 10.0}, {"vector": [5.9, 26.5], "item": "bar", "price": 20.0}])
[ "lancedb.connect" ]
[((38, 58), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (53, 58), False, 'import lancedb\n')]
# Copyright 2023 LanceDB Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random from unittest import mock import lancedb as ldb import numpy as np import pandas as pd import pytest pytest.importorskip("lancedb.fts") tantivy = pytest.importorskip("tantivy") @pytest.fixture def table(tmp_path) -> ldb.table.LanceTable: db = ldb.connect(tmp_path) vectors = [np.random.randn(128) for _ in range(100)] nouns = ("puppy", "car", "rabbit", "girl", "monkey") verbs = ("runs", "hits", "jumps", "drives", "barfs") adv = ("crazily.", "dutifully.", "foolishly.", "merrily.", "occasionally.") adj = ("adorable", "clueless", "dirty", "odd", "stupid") text = [ " ".join( [ nouns[random.randrange(0, 5)], verbs[random.randrange(0, 5)], adv[random.randrange(0, 5)], adj[random.randrange(0, 5)], ] ) for _ in range(100) ] table = db.create_table( "test", data=pd.DataFrame( { "vector": vectors, "id": [i % 2 for i in range(100)], "text": text, "text2": text, "nested": [{"text": t} for t in text], } ), ) return table def test_create_index(tmp_path): index = ldb.fts.create_index(str(tmp_path / "index"), ["text"]) assert isinstance(index, tantivy.Index) assert os.path.exists(str(tmp_path / "index")) def test_populate_index(tmp_path, table): index = ldb.fts.create_index(str(tmp_path / "index"), ["text"]) assert ldb.fts.populate_index(index, table, ["text"]) == len(table) def test_search_index(tmp_path, table): index = ldb.fts.create_index(str(tmp_path / "index"), ["text"]) ldb.fts.populate_index(index, table, ["text"]) index.reload() results = ldb.fts.search_index(index, query="puppy", limit=10) assert len(results) == 2 assert len(results[0]) == 10 # row_ids assert len(results[1]) == 10 # _distance def test_create_index_from_table(tmp_path, table): table.create_fts_index("text") df = table.search("puppy").limit(10).select(["text"]).to_pandas() assert len(df) <= 10 assert "text" in df.columns # Check whether it can be updated table.add( [ { "vector": np.random.randn(128), "id": 101, "text": "gorilla", "text2": "gorilla", "nested": {"text": "gorilla"}, } ] ) with pytest.raises(ValueError, match="already exists"): table.create_fts_index("text") table.create_fts_index("text", replace=True) assert len(table.search("gorilla").limit(1).to_pandas()) == 1 def test_create_index_multiple_columns(tmp_path, table): table.create_fts_index(["text", "text2"]) df = table.search("puppy").limit(10).to_pandas() assert len(df) == 10 assert "text" in df.columns assert "text2" in df.columns def test_empty_rs(tmp_path, table, mocker): table.create_fts_index(["text", "text2"]) mocker.patch("lancedb.fts.search_index", return_value=([], [])) df = table.search("puppy").limit(10).to_pandas() assert len(df) == 0 def test_nested_schema(tmp_path, table): table.create_fts_index("nested.text") rs = table.search("puppy").limit(10).to_list() assert len(rs) == 10 def test_search_index_with_filter(table): table.create_fts_index("text") orig_import = __import__ def import_mock(name, *args): if name == "duckdb": raise ImportError return orig_import(name, *args) # no duckdb with mock.patch("builtins.__import__", side_effect=import_mock): rs = table.search("puppy").where("id=1").limit(10) # test schema assert rs.to_arrow().drop("score").schema.equals(table.schema) rs = rs.to_list() for r in rs: assert r["id"] == 1 # yes duckdb rs2 = table.search("puppy").where("id=1").limit(10).to_list() for r in rs2: assert r["id"] == 1 assert rs == rs2 rs = table.search("puppy").where("id=1").with_row_id(True).limit(10).to_list() for r in rs: assert r["id"] == 1 assert r["_rowid"] is not None def test_null_input(table): table.add( [ { "vector": np.random.randn(128), "id": 101, "text": None, "text2": None, "nested": {"text": None}, } ] ) table.create_fts_index("text") def test_syntax(table): # https://github.com/lancedb/lancedb/issues/769 table.create_fts_index("text") with pytest.raises(ValueError, match="Syntax Error"): table.search("they could have been dogs OR cats").limit(10).to_list() # these should work # terms queries table.search('"they could have been dogs" OR cats').limit(10).to_list() table.search("(they AND could) OR (have AND been AND dogs) OR cats").limit( 10 ).to_list() # phrase queries table.search("they could have been dogs OR cats").phrase_query().limit(10).to_list() table.search('"they could have been dogs OR cats"').limit(10).to_list() table.search('''"the cats OR dogs were not really 'pets' at all"''').limit( 10 ).to_list() table.search('the cats OR dogs were not really "pets" at all').phrase_query().limit( 10 ).to_list() table.search('the cats OR dogs were not really "pets" at all').phrase_query().limit( 10 ).to_list()
[ "lancedb.fts.search_index", "lancedb.fts.populate_index", "lancedb.connect" ]
[((716, 750), 'pytest.importorskip', 'pytest.importorskip', (['"""lancedb.fts"""'], {}), "('lancedb.fts')\n", (735, 750), False, 'import pytest\n'), ((761, 791), 'pytest.importorskip', 'pytest.importorskip', (['"""tantivy"""'], {}), "('tantivy')\n", (780, 791), False, 'import pytest\n'), ((864, 885), 'lancedb.connect', 'ldb.connect', (['tmp_path'], {}), '(tmp_path)\n', (875, 885), True, 'import lancedb as ldb\n'), ((2318, 2364), 'lancedb.fts.populate_index', 'ldb.fts.populate_index', (['index', 'table', "['text']"], {}), "(index, table, ['text'])\n", (2340, 2364), True, 'import lancedb as ldb\n'), ((2398, 2450), 'lancedb.fts.search_index', 'ldb.fts.search_index', (['index'], {'query': '"""puppy"""', 'limit': '(10)'}), "(index, query='puppy', limit=10)\n", (2418, 2450), True, 'import lancedb as ldb\n'), ((901, 921), 'numpy.random.randn', 'np.random.randn', (['(128)'], {}), '(128)\n', (916, 921), True, 'import numpy as np\n'), ((2143, 2189), 'lancedb.fts.populate_index', 'ldb.fts.populate_index', (['index', 'table', "['text']"], {}), "(index, table, ['text'])\n", (2165, 2189), True, 'import lancedb as ldb\n'), ((3096, 3145), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""already exists"""'}), "(ValueError, match='already exists')\n", (3109, 3145), False, 'import pytest\n'), ((4216, 4274), 'unittest.mock.patch', 'mock.patch', (['"""builtins.__import__"""'], {'side_effect': 'import_mock'}), "('builtins.__import__', side_effect=import_mock)\n", (4226, 4274), False, 'from unittest import mock\n'), ((5261, 5308), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Syntax Error"""'}), "(ValueError, match='Syntax Error')\n", (5274, 5308), False, 'import pytest\n'), ((2889, 2909), 'numpy.random.randn', 'np.random.randn', (['(128)'], {}), '(128)\n', (2904, 2909), True, 'import numpy as np\n'), ((4922, 4942), 'numpy.random.randn', 'np.random.randn', (['(128)'], {}), '(128)\n', (4937, 4942), True, 'import numpy as np\n'), ((1266, 1288), 'random.randrange', 'random.randrange', (['(0)', '(5)'], {}), '(0, 5)\n', (1282, 1288), False, 'import random\n'), ((1313, 1335), 'random.randrange', 'random.randrange', (['(0)', '(5)'], {}), '(0, 5)\n', (1329, 1335), False, 'import random\n'), ((1358, 1380), 'random.randrange', 'random.randrange', (['(0)', '(5)'], {}), '(0, 5)\n', (1374, 1380), False, 'import random\n'), ((1403, 1425), 'random.randrange', 'random.randrange', (['(0)', '(5)'], {}), '(0, 5)\n', (1419, 1425), False, 'import random\n')]
import pytest import os import openai import argparse import lancedb import re import pickle import requests import zipfile from pathlib import Path from main import get_document_title from langchain.document_loaders import BSHTMLLoader from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import LanceDB from langchain.llms import OpenAI from langchain.chains import RetrievalQA # TESTING =============================================================== @pytest.fixture def mock_embed(monkeypatch): def mock_embed_query(query, x): return [0.5, 0.5] monkeypatch.setattr(OpenAIEmbeddings, "embed_query", mock_embed_query) def test_main(mock_embed): os.mkdir("./tmp") args = argparse.Namespace(query="test", openai_key="test") os.environ["OPENAI_API_KEY"] = "test" docs_path = Path("docs.pkl") docs = [] pandas_docs = requests.get( "https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip" ) with open("./tmp/pandas.documentation.zip", "wb") as f: f.write(pandas_docs.content) file = zipfile.ZipFile("./tmp/pandas.documentation.zip") file.extractall(path="./tmp/pandas_docs") if not docs_path.exists(): for p in Path("./tmp/pandas_docs/pandas.documentation").rglob("*.html"): print(p) if p.is_dir(): continue loader = BSHTMLLoader(p, open_encoding="utf8") raw_document = loader.load() m = {} m["title"] = get_document_title(raw_document[0]) m["version"] = "2.0rc0" raw_document[0].metadata = raw_document[0].metadata | m raw_document[0].metadata["source"] = str(raw_document[0].metadata["source"]) docs = docs + raw_document with docs_path.open("wb") as fh: pickle.dump(docs, fh) else: with docs_path.open("rb") as fh: docs = pickle.load(fh) text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(docs) db = lancedb.connect("./tmp/lancedb") table = db.create_table( "pandas_docs", data=[ { "vector": OpenAIEmbeddings().embed_query("Hello World"), "text": "Hello World", "id": "1", } ], mode="overwrite", ) # docsearch = LanceDB.from_documents(documents, OpenAIEmbeddings, connection=table) # qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever()) # result = qa.run(args.query) # print(result)
[ "lancedb.connect" ]
[((766, 783), 'os.mkdir', 'os.mkdir', (['"""./tmp"""'], {}), "('./tmp')\n", (774, 783), False, 'import os\n'), ((795, 846), 'argparse.Namespace', 'argparse.Namespace', ([], {'query': '"""test"""', 'openai_key': '"""test"""'}), "(query='test', openai_key='test')\n", (813, 846), False, 'import argparse\n'), ((906, 922), 'pathlib.Path', 'Path', (['"""docs.pkl"""'], {}), "('docs.pkl')\n", (910, 922), False, 'from pathlib import Path\n'), ((956, 1073), 'requests.get', 'requests.get', (['"""https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip"""'], {}), "(\n 'https://eto-public.s3.us-west-2.amazonaws.com/datasets/pandas_docs/pandas.documentation.zip'\n )\n", (968, 1073), False, 'import requests\n'), ((1187, 1236), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""./tmp/pandas.documentation.zip"""'], {}), "('./tmp/pandas.documentation.zip')\n", (1202, 1236), False, 'import zipfile\n'), ((2065, 2131), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(200)'}), '(chunk_size=1000, chunk_overlap=200)\n', (2095, 2131), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2217, 2249), 'lancedb.connect', 'lancedb.connect', (['"""./tmp/lancedb"""'], {}), "('./tmp/lancedb')\n", (2232, 2249), False, 'import lancedb\n'), ((1490, 1527), 'langchain.document_loaders.BSHTMLLoader', 'BSHTMLLoader', (['p'], {'open_encoding': '"""utf8"""'}), "(p, open_encoding='utf8')\n", (1502, 1527), False, 'from langchain.document_loaders import BSHTMLLoader\n'), ((1614, 1649), 'main.get_document_title', 'get_document_title', (['raw_document[0]'], {}), '(raw_document[0])\n', (1632, 1649), False, 'from main import get_document_title\n'), ((1936, 1957), 'pickle.dump', 'pickle.dump', (['docs', 'fh'], {}), '(docs, fh)\n', (1947, 1957), False, 'import pickle\n'), ((2028, 2043), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (2039, 2043), False, 'import pickle\n'), ((1332, 1378), 'pathlib.Path', 'Path', (['"""./tmp/pandas_docs/pandas.documentation"""'], {}), "('./tmp/pandas_docs/pandas.documentation')\n", (1336, 1378), False, 'from pathlib import Path\n'), ((2357, 2375), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (2373, 2375), False, 'from langchain.embeddings import OpenAIEmbeddings\n')]
""" AI Module This module provides an AI class that interfaces with language models to perform various tasks such as starting a conversation, advancing the conversation, and handling message serialization. It also includes backoff strategies for handling rate limit errors from the OpenAI API. Classes: AI: A class that interfaces with language models for conversation management and message serialization. Functions: serialize_messages(messages: List[Message]) -> str Serialize a list of messages to a JSON string. """ from __future__ import annotations import json import logging import os from pathlib import Path from typing import List, Optional, Union import backoff import openai import pyperclip from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.chat_models.base import BaseChatModel from langchain.schema import ( AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict, ) from langchain_anthropic import ChatAnthropic from langchain_openai import AzureChatOpenAI, ChatOpenAI from gpt_engineer.core.token_usage import TokenUsageLog # Type hint for a chat message Message = Union[AIMessage, HumanMessage, SystemMessage] # Set up logging logger = logging.getLogger(__name__) class AI: """ A class that interfaces with language models for conversation management and message serialization. This class provides methods to start and advance conversations, handle message serialization, and implement backoff strategies for rate limit errors when interacting with the OpenAI API. Attributes ---------- temperature : float The temperature setting for the language model. azure_endpoint : str The endpoint URL for the Azure-hosted language model. model_name : str The name of the language model to use. streaming : bool A flag indicating whether to use streaming for the language model. llm : BaseChatModel The language model instance for conversation management. token_usage_log : TokenUsageLog A log for tracking token usage during conversations. Methods ------- start(system: str, user: str, step_name: str) -> List[Message] Start the conversation with a system message and a user message. next(messages: List[Message], prompt: Optional[str], step_name: str) -> List[Message] Advances the conversation by sending message history to LLM and updating with the response. backoff_inference(messages: List[Message]) -> Any Perform inference using the language model with an exponential backoff strategy. serialize_messages(messages: List[Message]) -> str Serialize a list of messages to a JSON string. deserialize_messages(jsondictstr: str) -> List[Message] Deserialize a JSON string to a list of messages. _create_chat_model() -> BaseChatModel Create a chat model with the specified model name and temperature. """ def __init__( self, model_name="gpt-4-1106-preview", temperature=0.1, azure_endpoint="", streaming=True, ): """ Initialize the AI class. Parameters ---------- model_name : str, optional The name of the model to use, by default "gpt-4". temperature : float, optional The temperature to use for the model, by default 0.1. """ self.temperature = temperature self.azure_endpoint = azure_endpoint self.model_name = model_name self.streaming = streaming self.llm = self._create_chat_model() self.token_usage_log = TokenUsageLog(model_name) logger.debug(f"Using model {self.model_name}") def start(self, system: str, user: str, step_name: str) -> List[Message]: """ Start the conversation with a system message and a user message. Parameters ---------- system : str The content of the system message. user : str The content of the user message. step_name : str The name of the step. Returns ------- List[Message] The list of messages in the conversation. """ messages: List[Message] = [ SystemMessage(content=system), HumanMessage(content=user), ] return self.next(messages, step_name=step_name) def next( self, messages: List[Message], prompt: Optional[str] = None, *, step_name: str, ) -> List[Message]: """ Advances the conversation by sending message history to LLM and updating with the response. Parameters ---------- messages : List[Message] The list of messages in the conversation. prompt : Optional[str], optional The prompt to use, by default None. step_name : str The name of the step. Returns ------- List[Message] The updated list of messages in the conversation. """ if prompt: messages.append(HumanMessage(content=prompt)) logger.debug(f"Creating a new chat completion: {messages}") messages = self._collapse_messages(messages) response = self.backoff_inference(messages) self.token_usage_log.update_log( messages=messages, answer=response.content, step_name=step_name ) messages.append(response) logger.debug(f"Chat completion finished: {messages}") return messages def _collapse_messages(self, messages: List[Message]): """ Combine consecutive messages of the same type into a single message. This method iterates through the list of messages, combining consecutive messages of the same type by joining their content with a newline character. This reduces the number of messages and simplifies the conversation for processing. Parameters ---------- messages : List[Message] The list of messages to collapse. Returns ------- List[Message] The list of messages after collapsing consecutive messages of the same type. """ collapsed_messages = [] if not messages: return collapsed_messages previous_message = messages[0] combined_content = previous_message.content for current_message in messages[1:]: if current_message.type == previous_message.type: combined_content += "\n\n" + current_message.content else: collapsed_messages.append( previous_message.__class__(content=combined_content) ) previous_message = current_message combined_content = current_message.content collapsed_messages.append(previous_message.__class__(content=combined_content)) return collapsed_messages @backoff.on_exception(backoff.expo, openai.RateLimitError, max_tries=7, max_time=45) def backoff_inference(self, messages): """ Perform inference using the language model while implementing an exponential backoff strategy. This function will retry the inference in case of a rate limit error from the OpenAI API. It uses an exponential backoff strategy, meaning the wait time between retries increases exponentially. The function will attempt to retry up to 7 times within a span of 45 seconds. Parameters ---------- messages : List[Message] A list of chat messages which will be passed to the language model for processing. callbacks : List[Callable] A list of callback functions that are triggered after each inference. These functions can be used for logging, monitoring, or other auxiliary tasks. Returns ------- Any The output from the language model after processing the provided messages. Raises ------ openai.error.RateLimitError If the number of retries exceeds the maximum or if the rate limit persists beyond the allotted time, the function will ultimately raise a RateLimitError. Example ------- >>> messages = [SystemMessage(content="Hello"), HumanMessage(content="How's the weather?")] >>> response = backoff_inference(messages) """ return self.llm.invoke(messages) # type: ignore @staticmethod def serialize_messages(messages: List[Message]) -> str: """ Serialize a list of messages to a JSON string. Parameters ---------- messages : List[Message] The list of messages to serialize. Returns ------- str The serialized messages as a JSON string. """ return json.dumps(messages_to_dict(messages)) @staticmethod def deserialize_messages(jsondictstr: str) -> List[Message]: """ Deserialize a JSON string to a list of messages. Parameters ---------- jsondictstr : str The JSON string to deserialize. Returns ------- List[Message] The deserialized list of messages. """ data = json.loads(jsondictstr) # Modify implicit is_chunk property to ALWAYS false # since Langchain's Message schema is stricter prevalidated_data = [ {**item, "tools": {**item.get("tools", {}), "is_chunk": False}} for item in data ] return list(messages_from_dict(prevalidated_data)) # type: ignore def _create_chat_model(self) -> BaseChatModel: """ Create a chat model with the specified model name and temperature. Parameters ---------- model : str The name of the model to create. temperature : float The temperature to use for the model. Returns ------- BaseChatModel The created chat model. """ if self.azure_endpoint: return AzureChatOpenAI( azure_endpoint=self.azure_endpoint, openai_api_version=os.getenv("OPENAI_API_VERSION", "2023-05-15"), deployment_name=self.model_name, openai_api_type="azure", streaming=self.streaming, callbacks=[StreamingStdOutCallbackHandler()], ) if "claude" in self.model_name: return ChatAnthropic( model=self.model_name, temperature=self.temperature, callbacks=[StreamingStdOutCallbackHandler()], max_tokens_to_sample=4096, ) return ChatOpenAI( model=self.model_name, temperature=self.temperature, streaming=self.streaming, callbacks=[StreamingStdOutCallbackHandler()], ) def serialize_messages(messages: List[Message]) -> str: return AI.serialize_messages(messages) class ClipboardAI(AI): # Ignore not init superclass def __init__(self, **_): # type: ignore pass @staticmethod def serialize_messages(messages: List[Message]) -> str: return "\n\n".join([f"{m.type}:\n{m.content}" for m in messages]) @staticmethod def multiline_input(): print("Enter/Paste your content. Ctrl-D or Ctrl-Z ( windows ) to save it.") content = [] while True: try: line = input() except EOFError: break content.append(line) return "\n".join(content) def next( self, messages: List[Message], prompt: Optional[str] = None, *, step_name: str, ) -> List[Message]: """ Not yet fully supported """ if prompt: messages.append(HumanMessage(content=prompt)) logger.debug(f"Creating a new chat completion: {messages}") msgs = self.serialize_messages(messages) pyperclip.copy(msgs) Path("clipboard.txt").write_text(msgs) print( "Messages copied to clipboard and written to clipboard.txt,", len(msgs), "characters in total", ) response = self.multiline_input() messages.append(AIMessage(content=response)) logger.debug(f"Chat completion finished: {messages}") return messages
[ "langchain.schema.AIMessage", "langchain.schema.messages_to_dict", "langchain.schema.HumanMessage", "langchain.schema.SystemMessage", "langchain.schema.messages_from_dict", "langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler" ]
[((1266, 1293), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1283, 1293), False, 'import logging\n'), ((7101, 7188), 'backoff.on_exception', 'backoff.on_exception', (['backoff.expo', 'openai.RateLimitError'], {'max_tries': '(7)', 'max_time': '(45)'}), '(backoff.expo, openai.RateLimitError, max_tries=7,\n max_time=45)\n', (7121, 7188), False, 'import backoff\n'), ((3698, 3723), 'gpt_engineer.core.token_usage.TokenUsageLog', 'TokenUsageLog', (['model_name'], {}), '(model_name)\n', (3711, 3723), False, 'from gpt_engineer.core.token_usage import TokenUsageLog\n'), ((9467, 9490), 'json.loads', 'json.loads', (['jsondictstr'], {}), '(jsondictstr)\n', (9477, 9490), False, 'import json\n'), ((12276, 12296), 'pyperclip.copy', 'pyperclip.copy', (['msgs'], {}), '(msgs)\n', (12290, 12296), False, 'import pyperclip\n'), ((4343, 4372), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system'}), '(content=system)\n', (4356, 4372), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((4386, 4412), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user'}), '(content=user)\n', (4398, 4412), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((9048, 9074), 'langchain.schema.messages_to_dict', 'messages_to_dict', (['messages'], {}), '(messages)\n', (9064, 9074), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((9771, 9808), 'langchain.schema.messages_from_dict', 'messages_from_dict', (['prevalidated_data'], {}), '(prevalidated_data)\n', (9789, 9808), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((12569, 12596), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'response'}), '(content=response)\n', (12578, 12596), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((5209, 5237), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (5221, 5237), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((12119, 12147), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (12131, 12147), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((12305, 12326), 'pathlib.Path', 'Path', (['"""clipboard.txt"""'], {}), "('clipboard.txt')\n", (12309, 12326), False, 'from pathlib import Path\n'), ((10405, 10450), 'os.getenv', 'os.getenv', (['"""OPENAI_API_VERSION"""', '"""2023-05-15"""'], {}), "('OPENAI_API_VERSION', '2023-05-15')\n", (10414, 10450), False, 'import os\n'), ((11105, 11137), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (11135, 11137), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((10611, 10643), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (10641, 10643), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((10847, 10879), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (10877, 10879), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')]
from fastapi import Body from sse_starlette.sse import EventSourceResponse from configs import LLM_MODELS, TEMPERATURE from server.utils import wrap_done, get_OpenAI from langchain.chains import LLMChain from langchain.callbacks import AsyncIteratorCallbackHandler from typing import AsyncIterable, Optional import asyncio from langchain.prompts import PromptTemplate from server.utils import get_prompt_template async def completion(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]), stream: bool = Body(False, description="流式输出"), echo: bool = Body(False, description="除了输出之外,还回显输入"), model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"), temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0), max_tokens: Optional[int] = Body(1024, description="限制LLM生成Token数量,默认None代表模型最大值"), # top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0), prompt_name: str = Body("default", description="使用的prompt模板名称(在configs/prompt_config.py中配置)"), ): #todo 因ApiModelWorker 默认是按chat处理的,会对params["prompt"] 解析为messages,因此ApiModelWorker 使用时需要有相应处理 async def completion_iterator(query: str, model_name: str = LLM_MODELS[0], prompt_name: str = prompt_name, echo: bool = echo, ) -> AsyncIterable[str]: nonlocal max_tokens callback = AsyncIteratorCallbackHandler() if isinstance(max_tokens, int) and max_tokens <= 0: max_tokens = None model = get_OpenAI( model_name=model_name, temperature=temperature, max_tokens=max_tokens, callbacks=[callback], echo=echo ) prompt_template = get_prompt_template("completion", prompt_name) prompt = PromptTemplate.from_template(prompt_template) chain = LLMChain(prompt=prompt, llm=model) # Begin a task that runs in the background. task = asyncio.create_task(wrap_done( chain.acall({"input": query}), callback.done), ) if stream: async for token in callback.aiter(): # Use server-sent-events to stream the response yield token else: answer = "" async for token in callback.aiter(): answer += token yield answer await task return EventSourceResponse(completion_iterator(query=query, model_name=model_name, prompt_name=prompt_name), )
[ "langchain.callbacks.AsyncIteratorCallbackHandler", "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate.from_template" ]
[((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", (540, 567), False, 'from fastapi import Body\n'), ((603, 642), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""除了输出之外,还回显输入"""'}), "(False, description='除了输出之外,还回显输入')\n", (607, 642), False, 'from fastapi import Body\n'), ((683, 727), 'fastapi.Body', 'Body', (['LLM_MODELS[0]'], {'description': '"""LLM 模型名称。"""'}), "(LLM_MODELS[0], description='LLM 模型名称。')\n", (687, 727), False, 'from fastapi import Body\n'), ((771, 828), 'fastapi.Body', 'Body', (['TEMPERATURE'], {'description': '"""LLM 采样温度"""', 'ge': '(0.0)', 'le': '(1.0)'}), "(TEMPERATURE, description='LLM 采样温度', ge=0.0, le=1.0)\n", (775, 828), False, 'from fastapi import Body\n'), ((879, 933), 'fastapi.Body', 'Body', (['(1024)'], {'description': '"""限制LLM生成Token数量,默认None代表模型最大值"""'}), "(1024, description='限制LLM生成Token数量,默认None代表模型最大值')\n", (883, 933), False, 'from fastapi import Body\n'), ((1083, 1157), 'fastapi.Body', 'Body', (['"""default"""'], {'description': '"""使用的prompt模板名称(在configs/prompt_config.py中配置)"""'}), "('default', description='使用的prompt模板名称(在configs/prompt_config.py中配置)')\n", (1087, 1157), False, 'from fastapi import Body\n'), ((1664, 1694), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (1692, 1694), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((1802, 1921), 'server.utils.get_OpenAI', 'get_OpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'callbacks': '[callback]', 'echo': 'echo'}), '(model_name=model_name, temperature=temperature, max_tokens=\n max_tokens, callbacks=[callback], echo=echo)\n', (1812, 1921), False, 'from server.utils import wrap_done, get_OpenAI\n'), ((2014, 2060), 'server.utils.get_prompt_template', 'get_prompt_template', (['"""completion"""', 'prompt_name'], {}), "('completion', prompt_name)\n", (2033, 2060), False, 'from server.utils import get_prompt_template\n'), ((2078, 2123), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2106, 2123), False, 'from langchain.prompts import PromptTemplate\n'), ((2140, 2174), 'langchain.chains.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'model'}), '(prompt=prompt, llm=model)\n', (2148, 2174), False, 'from langchain.chains import LLMChain\n')]
# — coding: utf-8 – import openai import json import logging import sys import argparse from langchain.chat_models import ChatOpenAI from langchain.prompts import ( ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate ) from langchain import LLMChain import numpy as np import requests import os import subprocess import re import importlib.util from sklearn.metrics.pairwise import cosine_similarity import pickle from util import * from tqdm import tqdm openai.api_key = os.environ["OPENAI_API_KEY"] def get_last_processed_index(progress_file): """Retrieve the last processed index from the progress file.""" if os.path.exists(progress_file): with open(progress_file, 'r', encoding='utf-8') as f: last_index = f.read().strip() return int(last_index) if last_index else 0 else: return 0 def update_progress(progress_file, index): """Update the last processed index in the progress file.""" with open(progress_file, 'w', encoding='utf-8') as f: f.write(str(index)) def task_decompose(question, Tool_dic, model_name): chat = ChatOpenAI(model_name=model_name) template = "You are a helpful assistant." system_message_prompt = SystemMessagePromptTemplate.from_template(template) human_message_prompt = HumanMessagePromptTemplate.from_template( "We have spotify database and the following tools:\n" "{Tool_dic}" "You need to decompose a complex user's question into some simple subtasks and let the model execute it step by step with these tools.\n" "Please note that: \n" "1. you should break down tasks into appropriate subtasks to use the tools mentioned above.\n" "2. You should not only list the subtask, but also list the ID of the tool used to solve this subtask.\n" "3. If you think you do not need to use the tool to solve the subtask, just leave it as {{\"ID\": -1}}\n" "4. You must consider the logical connections, order and constraints among the tools to achieve a correct tool path." "5. You must ONLY output the ID of the tool you chose in a parsible JSON format. Two examples output look like:\n" "'''\n" "Question: Pause the player" "Example 1: [{{\"Task\":\"Get information about the user’s current playback state\", \"ID\":15}}, {{\"Task\":\"Pause playback on the user's account\", \"ID\":19}}]\n" "'''\n" "This is the user's question: {question}\n" "Output:" ) chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) chain = LLMChain(llm=chat, prompt=chat_prompt) ind = 0 while True: try: result = chain.run(question=question, Tool_dic=Tool_dic) result = eval(result.split('\n\n')[0]) break except Exception as e: print(f"task decompose fails: {e}") if ind > 10: return -1 ind += 1 continue return result def task_execution( Tool_dic, dic_tool, test_data, progress_file, start_index, total_files, retrieval_num, ind, model_name): with tqdm(total=total_files, desc="Processing files", initial=start_index) as pbar: for i, data in enumerate(test_data[start_index:], start=start_index): question = data["query"] print(question) task_path = task_decompose(question, Tool_dic, model_name) tool_choice_ls = [] for task in task_path: if isinstance(task["ID"], list): for ele in task["ID"]: tool_choice_ls.append(dic_tool[ele]['tool_usage']) elif int(task["ID"]) in dic_tool.keys(): tool_choice_ls.append(dic_tool[task["ID"]]['tool_usage']) ind = ind + 1 with open(f"restbench_{model_name}_Easytool.jsonl", 'a+', encoding='utf-8') as f: line = json.dumps({ "ID": ind, "question": question, "task_path": task_path, "tool_choice_ls": tool_choice_ls }, ensure_ascii=False) f.write(line + '\n') print(tool_choice_ls) update_progress(progress_file, i + 1) pbar.update(1)
[ "langchain.prompts.SystemMessagePromptTemplate.from_template", "langchain.chat_models.ChatOpenAI", "langchain.prompts.ChatPromptTemplate.from_messages", "langchain.prompts.HumanMessagePromptTemplate.from_template", "langchain.LLMChain" ]
[((717, 746), 'os.path.exists', 'os.path.exists', (['progress_file'], {}), '(progress_file)\n', (731, 746), False, 'import os\n'), ((1210, 1243), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (1220, 1243), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1320, 1371), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['template'], {}), '(template)\n', (1361, 1371), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((1400, 2422), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""We have spotify database and the following tools:\n{Tool_dic}You need to decompose a complex user\'s question into some simple subtasks and let the model execute it step by step with these tools.\nPlease note that: \n1. you should break down tasks into appropriate subtasks to use the tools mentioned above.\n2. You should not only list the subtask, but also list the ID of the tool used to solve this subtask.\n3. If you think you do not need to use the tool to solve the subtask, just leave it as {{"ID": -1}}\n4. You must consider the logical connections, order and constraints among the tools to achieve a correct tool path.5. You must ONLY output the ID of the tool you chose in a parsible JSON format. Two examples output look like:\n\'\'\'\nQuestion: Pause the playerExample 1: [{{"Task":"Get information about the user’s current playback state", "ID":15}}, {{"Task":"Pause playback on the user\'s account", "ID":19}}]\n\'\'\'\nThis is the user\'s question: {question}\nOutput:"""'], {}), '(\n """We have spotify database and the following tools:\n{Tool_dic}You need to decompose a complex user\'s question into some simple subtasks and let the model execute it step by step with these tools.\nPlease note that: \n1. you should break down tasks into appropriate subtasks to use the tools mentioned above.\n2. You should not only list the subtask, but also list the ID of the tool used to solve this subtask.\n3. If you think you do not need to use the tool to solve the subtask, just leave it as {{"ID": -1}}\n4. You must consider the logical connections, order and constraints among the tools to achieve a correct tool path.5. You must ONLY output the ID of the tool you chose in a parsible JSON format. Two examples output look like:\n\'\'\'\nQuestion: Pause the playerExample 1: [{{"Task":"Get information about the user’s current playback state", "ID":15}}, {{"Task":"Pause playback on the user\'s account", "ID":19}}]\n\'\'\'\nThis is the user\'s question: {question}\nOutput:"""\n )\n', (1440, 2422), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((2637, 2716), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_message_prompt, human_message_prompt]'], {}), '([system_message_prompt, human_message_prompt])\n', (2669, 2716), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((2730, 2768), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'chat', 'prompt': 'chat_prompt'}), '(llm=chat, prompt=chat_prompt)\n', (2738, 2768), False, 'from langchain import LLMChain\n'), ((3309, 3378), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total_files', 'desc': '"""Processing files"""', 'initial': 'start_index'}), "(total=total_files, desc='Processing files', initial=start_index)\n", (3313, 3378), False, 'from tqdm import tqdm\n'), ((4128, 4255), 'json.dumps', 'json.dumps', (["{'ID': ind, 'question': question, 'task_path': task_path, 'tool_choice_ls':\n tool_choice_ls}"], {'ensure_ascii': '(False)'}), "({'ID': ind, 'question': question, 'task_path': task_path,\n 'tool_choice_ls': tool_choice_ls}, ensure_ascii=False)\n", (4138, 4255), False, 'import json\n')]
from langchain.llms import Ollama input = input("What is your question?") llm = Ollama(model="llama2") res = llm.predict(input) print (res)
[ "langchain.llms.Ollama" ]
[((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')]
import os from pathlib import Path from typing import Union import cloudpickle import yaml from mlflow.exceptions import MlflowException from mlflow.langchain.utils import ( _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types, ) _STEPS_FOLDER_NAME = "steps" _RUNNABLE_STEPS_FILE_NAME = "steps.yaml" _BRANCHES_FOLDER_NAME = "branches" _MAPPER_FOLDER_NAME = "mapper" _RUNNABLE_BRANCHES_FILE_NAME = "branches.yaml" _DEFAULT_BRANCH_NAME = "default" def _load_model_from_config(path, model_config): from langchain.chains.loading import type_to_loader_dict as chains_type_to_loader_dict from langchain.llms import get_type_to_cls_dict as llms_get_type_to_cls_dict try: from langchain.prompts.loading import type_to_loader_dict as prompts_types except ImportError: prompts_types = {"prompt", "few_shot_prompt"} config_path = os.path.join(path, model_config.get(_MODEL_DATA_KEY, _MODEL_DATA_YAML_FILE_NAME)) # Load runnables from config file if config_path.endswith(".yaml"): config = _load_from_yaml(config_path) elif config_path.endswith(".json"): config = _load_from_json(config_path) else: raise MlflowException( f"Cannot load runnable without a config file. Got path {config_path}." ) _type = config.get("_type") if _type in chains_type_to_loader_dict: from langchain.chains.loading import load_chain return load_chain(config_path) elif _type in prompts_types: from langchain.prompts.loading import load_prompt return load_prompt(config_path) elif _type in llms_get_type_to_cls_dict(): from langchain.llms.loading import load_llm return load_llm(config_path) elif _type in custom_type_to_loader_dict(): return custom_type_to_loader_dict()[_type](config) raise MlflowException(f"Unsupported type {_type} for loading.") def _load_model_from_path(path: str, model_config=None): model_load_fn = model_config.get(_MODEL_LOAD_KEY) if model_load_fn == _RUNNABLE_LOAD_KEY: return _load_runnables(path, model_config) if model_load_fn == _BASE_LOAD_KEY: return _load_base_lcs(path, model_config) if model_load_fn == _CONFIG_LOAD_KEY: return _load_model_from_config(path, model_config) raise MlflowException(f"Unsupported model load key {model_load_fn}") def _load_runnable_with_steps(file_path: Union[Path, str], model_type: str): """Load the model Args: file_path: Path to file to load the model from. model_type: Type of the model to load. """ from langchain.schema.runnable import RunnableParallel, RunnableSequence # Convert file to Path object. load_path = Path(file_path) if not load_path.exists() or not load_path.is_dir(): raise MlflowException( f"File {load_path} must exist and must be a directory " "in order to load runnable with steps." ) steps_conf_file = load_path / _RUNNABLE_STEPS_FILE_NAME if not steps_conf_file.exists(): raise MlflowException( f"File {steps_conf_file} must exist in order to load runnable with steps." ) steps_conf = _load_from_yaml(steps_conf_file) steps_path = load_path / _STEPS_FOLDER_NAME if not steps_path.exists() or not steps_path.is_dir(): raise MlflowException( f"Folder {steps_path} must exist and must be a directory " "in order to load runnable with steps." ) steps = {} # ignore hidden files for step in (f for f in os.listdir(steps_path) if not f.startswith(".")): config = steps_conf.get(step) # load model from the folder of the step runnable = _load_model_from_path(os.path.join(steps_path, step), config) steps[step] = runnable if model_type == RunnableSequence.__name__: steps = [value for _, value in sorted(steps.items(), key=lambda item: int(item[0]))] return runnable_sequence_from_steps(steps) if model_type == RunnableParallel.__name__: return RunnableParallel(steps) def runnable_sequence_from_steps(steps): """Construct a RunnableSequence from steps. Args: steps: List of steps to construct the RunnableSequence from. """ from langchain.schema.runnable import RunnableSequence if len(steps) < 2: raise ValueError(f"RunnableSequence must have at least 2 steps, got {len(steps)}.") first, *middle, last = steps return RunnableSequence(first=first, middle=middle, last=last) def _load_runnable_branch(file_path: Union[Path, str]): """Load the model Args: file_path: Path to file to load the model from. """ from langchain.schema.runnable import RunnableBranch # Convert file to Path object. load_path = Path(file_path) if not load_path.exists() or not load_path.is_dir(): raise MlflowException( f"File {load_path} must exist and must be a directory " "in order to load runnable with steps." ) branches_conf_file = load_path / _RUNNABLE_BRANCHES_FILE_NAME if not branches_conf_file.exists(): raise MlflowException( f"File {branches_conf_file} must exist in order to load runnable with steps." ) branches_conf = _load_from_yaml(branches_conf_file) branches_path = load_path / _BRANCHES_FOLDER_NAME if not branches_path.exists() or not branches_path.is_dir(): raise MlflowException( f"Folder {branches_path} must exist and must be a directory " "in order to load runnable with steps." ) branches = [] for branch in os.listdir(branches_path): # load model from the folder of the branch if branch == _DEFAULT_BRANCH_NAME: default_branch_path = branches_path / _DEFAULT_BRANCH_NAME default = _load_model_from_path( default_branch_path, branches_conf.get(_DEFAULT_BRANCH_NAME) ) else: branch_tuple = [] for i in range(2): config = branches_conf.get(f"{branch}-{i}") runnable = _load_model_from_path( os.path.join(branches_path, branch, str(i)), config ) branch_tuple.append(runnable) branches.append(tuple(branch_tuple)) # default branch must be the last branch branches.append(default) return RunnableBranch(*branches) def _load_runnable_assign(file_path: Union[Path, str]): """Load the model Args: file_path: Path to file to load the model from. """ from langchain.schema.runnable.passthrough import RunnableAssign # Convert file to Path object. load_path = Path(file_path) if not load_path.exists() or not load_path.is_dir(): raise MlflowException( f"File {load_path} must exist and must be a directory in order to load runnable." ) mapper_file = load_path / _MAPPER_FOLDER_NAME if not mapper_file.exists() or not mapper_file.is_dir(): raise MlflowException( f"Folder {mapper_file} must exist and must be a directory " "in order to load runnable assign with mapper." ) mapper = _load_runnable_with_steps(mapper_file, "RunnableParallel") return RunnableAssign(mapper) def _save_internal_runnables(runnable, path, loader_fn, persist_dir): conf = {} if isinstance(runnable, lc_runnables_types()): conf[_MODEL_TYPE_KEY] = runnable.__class__.__name__ conf.update(_save_runnables(runnable, path, loader_fn, persist_dir)) elif isinstance(runnable, base_lc_types()): lc_model = _validate_and_wrap_lc_model(runnable, loader_fn) conf[_MODEL_TYPE_KEY] = lc_model.__class__.__name__ conf.update(_save_base_lcs(lc_model, path, loader_fn, persist_dir)) else: conf = { _MODEL_TYPE_KEY: runnable.__class__.__name__, _MODEL_DATA_KEY: _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY: _CONFIG_LOAD_KEY, } path = path / _MODEL_DATA_YAML_FILE_NAME # Save some simple runnables that langchain natively supports. if hasattr(runnable, "save"): runnable.save(path) # TODO: check if `dict` is enough to load it back elif hasattr(runnable, "dict"): runnable_dict = runnable.dict() with open(path, "w") as f: yaml.dump(runnable_dict, f, default_flow_style=False) else: return return conf def _save_runnable_with_steps(model, file_path: Union[Path, str], loader_fn=None, persist_dir=None): """Save the model with steps. Currently it supports saving RunnableSequence and RunnableParallel. If saving a RunnableSequence, steps is a list of Runnable objects. We save each step to the subfolder named by the step index. e.g. - model - steps - 0 - model.yaml - 1 - model.pkl - steps.yaml If saving a RunnableParallel, steps is a dictionary of key-Runnable pairs. We save each step to the subfolder named by the key. e.g. - model - steps - context - model.yaml - question - model.pkl - steps.yaml We save steps.yaml file to the model folder. It contains each step's model's configuration. Args: model: Runnable to be saved. file_path: Path to file to save the model to. """ # Convert file to Path object. save_path = Path(file_path) save_path.mkdir(parents=True, exist_ok=True) # Save steps into a folder steps_path = save_path / _STEPS_FOLDER_NAME steps_path.mkdir() steps = model.steps if isinstance(steps, list): generator = enumerate(steps) elif isinstance(steps, dict): generator = steps.items() else: raise MlflowException( f"Runnable {model} steps attribute must be either a list or a dictionary. " f"Got {type(steps).__name__}." ) unsaved_runnables = {} steps_conf = {} for key, runnable in generator: step = str(key) # Save each step into a subfolder named by step save_runnable_path = steps_path / step save_runnable_path.mkdir() if result := _save_internal_runnables(runnable, save_runnable_path, loader_fn, persist_dir): steps_conf[step] = result else: unsaved_runnables[step] = str(runnable) if unsaved_runnables: raise MlflowException( f"Failed to save runnable sequence: {unsaved_runnables}. " "Runnable must have either `save` or `dict` method." ) # save steps configs with save_path.joinpath(_RUNNABLE_STEPS_FILE_NAME).open("w") as f: yaml.dump(steps_conf, f, default_flow_style=False) def _save_runnable_branch(model, file_path, loader_fn, persist_dir): """ Save runnable branch in to path. """ save_path = Path(file_path) save_path.mkdir(parents=True, exist_ok=True) # save branches into a folder branches_path = save_path / _BRANCHES_FOLDER_NAME branches_path.mkdir() unsaved_runnables = {} branches_conf = {} for index, branch_tuple in enumerate(model.branches): # Save each branch into a subfolder named by index # and save condition and runnable into subfolder for i, runnable in enumerate(branch_tuple): save_runnable_path = branches_path / str(index) / str(i) save_runnable_path.mkdir(parents=True) branches_conf[f"{index}-{i}"] = {} if result := _save_internal_runnables( runnable, save_runnable_path, loader_fn, persist_dir ): branches_conf[f"{index}-{i}"] = result else: unsaved_runnables[f"{index}-{i}"] = str(runnable) # save default branch default_branch_path = branches_path / _DEFAULT_BRANCH_NAME default_branch_path.mkdir() if result := _save_internal_runnables( model.default, default_branch_path, loader_fn, persist_dir ): branches_conf[_DEFAULT_BRANCH_NAME] = result else: unsaved_runnables[_DEFAULT_BRANCH_NAME] = str(model.default) if unsaved_runnables: raise MlflowException( f"Failed to save runnable branch: {unsaved_runnables}. " "Runnable must have either `save` or `dict` method." ) # save branches configs with save_path.joinpath(_RUNNABLE_BRANCHES_FILE_NAME).open("w") as f: yaml.dump(branches_conf, f, default_flow_style=False) def _save_runnable_assign(model, file_path, loader_fn=None, persist_dir=None): from langchain.schema.runnable import RunnableParallel save_path = Path(file_path) save_path.mkdir(parents=True, exist_ok=True) # save mapper into a folder mapper_path = save_path / _MAPPER_FOLDER_NAME mapper_path.mkdir() if not isinstance(model.mapper, RunnableParallel): raise MlflowException( f"Failed to save model {model} with type {model.__class__.__name__}. " "RunnableAssign's mapper must be a RunnableParallel." ) _save_runnable_with_steps(model.mapper, mapper_path, loader_fn, persist_dir) def _save_picklable_runnable(model, path): if not path.endswith(".pkl"): raise ValueError(f"File path must end with .pkl, got {path}.") with open(path, "wb") as f: cloudpickle.dump(model, f) def _save_runnables(model, path, loader_fn=None, persist_dir=None): model_data_kwargs = {_MODEL_LOAD_KEY: _RUNNABLE_LOAD_KEY} if isinstance(model, lc_runnable_with_steps_types()): model_data_path = _MODEL_DATA_FOLDER_NAME _save_runnable_with_steps( model, os.path.join(path, model_data_path), loader_fn, persist_dir ) elif isinstance(model, picklable_runnable_types()): model_data_path = _MODEL_DATA_PKL_FILE_NAME _save_picklable_runnable(model, os.path.join(path, model_data_path)) elif isinstance(model, lc_runnable_branch_types()): model_data_path = _MODEL_DATA_FOLDER_NAME _save_runnable_branch(model, os.path.join(path, model_data_path), loader_fn, persist_dir) elif isinstance(model, lc_runnable_assign_types()): model_data_path = _MODEL_DATA_FOLDER_NAME _save_runnable_assign(model, os.path.join(path, model_data_path), loader_fn, persist_dir) else: raise MlflowException.invalid_parameter_value( _UNSUPPORTED_MODEL_ERROR_MESSAGE.format(instance_type=type(model).__name__) ) model_data_kwargs.update({_MODEL_DATA_KEY: model_data_path}) return model_data_kwargs def _load_runnables(path, conf): model_type = conf.get(_MODEL_TYPE_KEY) model_data = conf.get(_MODEL_DATA_KEY, _MODEL_DATA_YAML_FILE_NAME) if model_type in (x.__name__ for x in lc_runnable_with_steps_types()): return _load_runnable_with_steps(os.path.join(path, model_data), model_type) if ( model_type in (x.__name__ for x in picklable_runnable_types()) or model_data == _MODEL_DATA_PKL_FILE_NAME ): return _load_from_pickle(os.path.join(path, model_data)) if model_type in (x.__name__ for x in lc_runnable_branch_types()): return _load_runnable_branch(os.path.join(path, model_data)) if model_type in (x.__name__ for x in lc_runnable_assign_types()): return _load_runnable_assign(os.path.join(path, model_data)) raise MlflowException.invalid_parameter_value( _UNSUPPORTED_MODEL_ERROR_MESSAGE.format(instance_type=model_type) )
[ "langchain.llms.get_type_to_cls_dict", "langchain.schema.runnable.passthrough.RunnableAssign", "langchain.chains.loading.load_chain", "langchain.schema.runnable.RunnableParallel", "langchain.schema.runnable.RunnableSequence", "langchain.llms.loading.load_llm", "langchain.schema.runnable.RunnableBranch", "langchain.prompts.loading.load_prompt" ]
[((2386, 2443), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Unsupported type {_type} for loading."""'], {}), "(f'Unsupported type {_type} for loading.')\n", (2401, 2443), False, 'from mlflow.exceptions import MlflowException\n'), ((2853, 2915), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Unsupported model load key {model_load_fn}"""'], {}), "(f'Unsupported model load key {model_load_fn}')\n", (2868, 2915), False, 'from mlflow.exceptions import MlflowException\n'), ((3268, 3283), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (3272, 3283), False, 'from pathlib import Path\n'), ((3745, 3777), 'mlflow.langchain.utils._load_from_yaml', '_load_from_yaml', (['steps_conf_file'], {}), '(steps_conf_file)\n', (3760, 3777), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((5047, 5102), 'langchain.schema.runnable.RunnableSequence', 'RunnableSequence', ([], {'first': 'first', 'middle': 'middle', 'last': 'last'}), '(first=first, middle=middle, last=last)\n', (5063, 5102), False, 'from langchain.schema.runnable import RunnableSequence\n'), ((5367, 5382), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (5371, 5382), False, 'from pathlib import Path\n'), ((5859, 5894), 'mlflow.langchain.utils._load_from_yaml', '_load_from_yaml', (['branches_conf_file'], {}), '(branches_conf_file)\n', (5874, 5894), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((6218, 6243), 'os.listdir', 'os.listdir', (['branches_path'], {}), '(branches_path)\n', (6228, 6243), False, 'import os\n'), ((7003, 7028), 'langchain.schema.runnable.RunnableBranch', 'RunnableBranch', (['*branches'], {}), '(*branches)\n', (7017, 7028), False, 'from langchain.schema.runnable import RunnableBranch\n'), ((7305, 7320), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (7309, 7320), False, 'from pathlib import Path\n'), ((7881, 7903), 'langchain.schema.runnable.passthrough.RunnableAssign', 'RunnableAssign', (['mapper'], {}), '(mapper)\n', (7895, 7903), False, 'from langchain.schema.runnable.passthrough import RunnableAssign\n'), ((10183, 10198), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (10187, 10198), False, 'from pathlib import Path\n'), ((11645, 11660), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (11649, 11660), False, 'from pathlib import Path\n'), ((13438, 13453), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (13442, 13453), False, 'from pathlib import Path\n'), ((1579, 1607), 'mlflow.langchain.utils._load_from_yaml', '_load_from_yaml', (['config_path'], {}), '(config_path)\n', (1594, 1607), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((1976, 1999), 'langchain.chains.loading.load_chain', 'load_chain', (['config_path'], {}), '(config_path)\n', (1986, 1999), False, 'from langchain.chains.loading import load_chain\n'), ((2707, 2741), 'mlflow.langchain.utils._load_base_lcs', '_load_base_lcs', (['path', 'model_config'], {}), '(path, model_config)\n', (2721, 2741), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((3355, 3474), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {load_path} must exist and must be a directory in order to load runnable with steps."""'], {}), "(\n f'File {load_path} must exist and must be a directory in order to load runnable with steps.'\n )\n", (3370, 3474), False, 'from mlflow.exceptions import MlflowException\n'), ((3614, 3710), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {steps_conf_file} must exist in order to load runnable with steps."""'], {}), "(\n f'File {steps_conf_file} must exist in order to load runnable with steps.')\n", (3629, 3710), False, 'from mlflow.exceptions import MlflowException\n'), ((3899, 4021), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Folder {steps_path} must exist and must be a directory in order to load runnable with steps."""'], {}), "(\n f'Folder {steps_path} must exist and must be a directory in order to load runnable with steps.'\n )\n", (3914, 4021), False, 'from mlflow.exceptions import MlflowException\n'), ((4624, 4647), 'langchain.schema.runnable.RunnableParallel', 'RunnableParallel', (['steps'], {}), '(steps)\n', (4640, 4647), False, 'from langchain.schema.runnable import RunnableParallel\n'), ((5454, 5573), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {load_path} must exist and must be a directory in order to load runnable with steps."""'], {}), "(\n f'File {load_path} must exist and must be a directory in order to load runnable with steps.'\n )\n", (5469, 5573), False, 'from mlflow.exceptions import MlflowException\n'), ((5722, 5826), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {branches_conf_file} must exist in order to load runnable with steps."""'], {}), "(\n f'File {branches_conf_file} must exist in order to load runnable with steps.'\n )\n", (5737, 5826), False, 'from mlflow.exceptions import MlflowException\n'), ((6028, 6153), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Folder {branches_path} must exist and must be a directory in order to load runnable with steps."""'], {}), "(\n f'Folder {branches_path} must exist and must be a directory in order to load runnable with steps.'\n )\n", (6043, 6153), False, 'from mlflow.exceptions import MlflowException\n'), ((7392, 7500), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {load_path} must exist and must be a directory in order to load runnable."""'], {}), "(\n f'File {load_path} must exist and must be a directory in order to load runnable.'\n )\n", (7407, 7500), False, 'from mlflow.exceptions import MlflowException\n'), ((7639, 7770), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Folder {mapper_file} must exist and must be a directory in order to load runnable assign with mapper."""'], {}), "(\n f'Folder {mapper_file} must exist and must be a directory in order to load runnable assign with mapper.'\n )\n", (7654, 7770), False, 'from mlflow.exceptions import MlflowException\n'), ((8018, 8038), 'mlflow.langchain.utils.lc_runnables_types', 'lc_runnables_types', ([], {}), '()\n', (8036, 8038), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((11186, 11321), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Failed to save runnable sequence: {unsaved_runnables}. Runnable must have either `save` or `dict` method."""'], {}), "(\n f'Failed to save runnable sequence: {unsaved_runnables}. Runnable must have either `save` or `dict` method.'\n )\n", (11201, 11321), False, 'from mlflow.exceptions import MlflowException\n'), ((11454, 11504), 'yaml.dump', 'yaml.dump', (['steps_conf', 'f'], {'default_flow_style': '(False)'}), '(steps_conf, f, default_flow_style=False)\n', (11463, 11504), False, 'import yaml\n'), ((12955, 13088), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Failed to save runnable branch: {unsaved_runnables}. Runnable must have either `save` or `dict` method."""'], {}), "(\n f'Failed to save runnable branch: {unsaved_runnables}. Runnable must have either `save` or `dict` method.'\n )\n", (12970, 13088), False, 'from mlflow.exceptions import MlflowException\n'), ((13227, 13280), 'yaml.dump', 'yaml.dump', (['branches_conf', 'f'], {'default_flow_style': '(False)'}), '(branches_conf, f, default_flow_style=False)\n', (13236, 13280), False, 'import yaml\n'), ((13679, 13827), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Failed to save model {model} with type {model.__class__.__name__}. RunnableAssign\'s mapper must be a RunnableParallel."""'], {}), '(\n f"Failed to save model {model} with type {model.__class__.__name__}. RunnableAssign\'s mapper must be a RunnableParallel."\n )\n', (13694, 13827), False, 'from mlflow.exceptions import MlflowException\n'), ((14126, 14152), 'cloudpickle.dump', 'cloudpickle.dump', (['model', 'f'], {}), '(model, f)\n', (14142, 14152), False, 'import cloudpickle\n'), ((14310, 14340), 'mlflow.langchain.utils.lc_runnable_with_steps_types', 'lc_runnable_with_steps_types', ([], {}), '()\n', (14338, 14340), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((16218, 16283), 'mlflow.langchain.utils._UNSUPPORTED_MODEL_ERROR_MESSAGE.format', '_UNSUPPORTED_MODEL_ERROR_MESSAGE.format', ([], {'instance_type': 'model_type'}), '(instance_type=model_type)\n', (16257, 16283), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((1665, 1693), 'mlflow.langchain.utils._load_from_json', '_load_from_json', (['config_path'], {}), '(config_path)\n', (1680, 1693), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((1718, 1810), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Cannot load runnable without a config file. Got path {config_path}."""'], {}), "(\n f'Cannot load runnable without a config file. Got path {config_path}.')\n", (1733, 1810), False, 'from mlflow.exceptions import MlflowException\n'), ((2107, 2131), 'langchain.prompts.loading.load_prompt', 'load_prompt', (['config_path'], {}), '(config_path)\n', (2118, 2131), False, 'from langchain.prompts.loading import load_prompt\n'), ((4119, 4141), 'os.listdir', 'os.listdir', (['steps_path'], {}), '(steps_path)\n', (4129, 4141), False, 'import os\n'), ((4297, 4327), 'os.path.join', 'os.path.join', (['steps_path', 'step'], {}), '(steps_path, step)\n', (4309, 4327), False, 'import os\n'), ((8208, 8223), 'mlflow.langchain.utils.base_lc_types', 'base_lc_types', ([], {}), '()\n', (8221, 8223), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((8245, 8293), 'mlflow.langchain.utils._validate_and_wrap_lc_model', '_validate_and_wrap_lc_model', (['runnable', 'loader_fn'], {}), '(runnable, loader_fn)\n', (8272, 8293), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((14447, 14482), 'os.path.join', 'os.path.join', (['path', 'model_data_path'], {}), '(path, model_data_path)\n', (14459, 14482), False, 'import os\n'), ((14544, 14570), 'mlflow.langchain.utils.picklable_runnable_types', 'picklable_runnable_types', ([], {}), '()\n', (14568, 14570), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((15632, 15662), 'os.path.join', 'os.path.join', (['path', 'model_data'], {}), '(path, model_data)\n', (15644, 15662), False, 'import os\n'), ((15847, 15877), 'os.path.join', 'os.path.join', (['path', 'model_data'], {}), '(path, model_data)\n', (15859, 15877), False, 'import os\n'), ((15987, 16017), 'os.path.join', 'os.path.join', (['path', 'model_data'], {}), '(path, model_data)\n', (15999, 16017), False, 'import os\n'), ((16127, 16157), 'os.path.join', 'os.path.join', (['path', 'model_data'], {}), '(path, model_data)\n', (16139, 16157), False, 'import os\n'), ((2150, 2177), 'langchain.llms.get_type_to_cls_dict', 'llms_get_type_to_cls_dict', ([], {}), '()\n', (2175, 2177), True, 'from langchain.llms import get_type_to_cls_dict as llms_get_type_to_cls_dict\n'), ((2247, 2268), 'langchain.llms.loading.load_llm', 'load_llm', (['config_path'], {}), '(config_path)\n', (2255, 2268), False, 'from langchain.llms.loading import load_llm\n'), ((8374, 8428), 'mlflow.langchain.utils._save_base_lcs', '_save_base_lcs', (['lc_model', 'path', 'loader_fn', 'persist_dir'], {}), '(lc_model, path, loader_fn, persist_dir)\n', (8388, 8428), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((14665, 14700), 'os.path.join', 'os.path.join', (['path', 'model_data_path'], {}), '(path, model_data_path)\n', (14677, 14700), False, 'import os\n'), ((14729, 14755), 'mlflow.langchain.utils.lc_runnable_branch_types', 'lc_runnable_branch_types', ([], {}), '()\n', (14753, 14755), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((15558, 15588), 'mlflow.langchain.utils.lc_runnable_with_steps_types', 'lc_runnable_with_steps_types', ([], {}), '()\n', (15586, 15588), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((15921, 15947), 'mlflow.langchain.utils.lc_runnable_branch_types', 'lc_runnable_branch_types', ([], {}), '()\n', (15945, 15947), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((16061, 16087), 'mlflow.langchain.utils.lc_runnable_assign_types', 'lc_runnable_assign_types', ([], {}), '()\n', (16085, 16087), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((2287, 2315), 'mlflow.langchain.utils.custom_type_to_loader_dict', 'custom_type_to_loader_dict', ([], {}), '()\n', (2313, 2315), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((14845, 14880), 'os.path.join', 'os.path.join', (['path', 'model_data_path'], {}), '(path, model_data_path)\n', (14857, 14880), False, 'import os\n'), ((14933, 14959), 'mlflow.langchain.utils.lc_runnable_assign_types', 'lc_runnable_assign_types', ([], {}), '()\n', (14957, 14959), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((15728, 15754), 'mlflow.langchain.utils.picklable_runnable_types', 'picklable_runnable_types', ([], {}), '()\n', (15752, 15754), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((9016, 9069), 'yaml.dump', 'yaml.dump', (['runnable_dict', 'f'], {'default_flow_style': '(False)'}), '(runnable_dict, f, default_flow_style=False)\n', (9025, 9069), False, 'import yaml\n'), ((15049, 15084), 'os.path.join', 'os.path.join', (['path', 'model_data_path'], {}), '(path, model_data_path)\n', (15061, 15084), False, 'import os\n'), ((2332, 2360), 'mlflow.langchain.utils.custom_type_to_loader_dict', 'custom_type_to_loader_dict', ([], {}), '()\n', (2358, 2360), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n')]
import json from langchain.schema import OutputParserException def parse_json_markdown(json_string: str) -> dict: # Remove the triple backticks if present json_string = json_string.strip() start_index = json_string.find("```json") end_index = json_string.find("```", start_index + len("```json")) if start_index != -1 and end_index != -1: extracted_content = json_string[start_index + len("```json"):end_index].strip() # Parse the JSON string into a Python dictionary parsed = json.loads(extracted_content) elif start_index != -1 and end_index == -1 and json_string.endswith("``"): end_index = json_string.find("``", start_index + len("```json")) extracted_content = json_string[start_index + len("```json"):end_index].strip() # Parse the JSON string into a Python dictionary parsed = json.loads(extracted_content) elif json_string.startswith("{"): # Parse the JSON string into a Python dictionary parsed = json.loads(json_string) else: raise Exception("Could not find JSON block in the output.") return parsed def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict: try: json_obj = parse_json_markdown(text) except json.JSONDecodeError as e: raise OutputParserException(f"Got invalid JSON object. Error: {e}") for key in expected_keys: if key not in json_obj: raise OutputParserException( f"Got invalid return object. Expected key `{key}` " f"to be present, but got {json_obj}" ) return json_obj
[ "langchain.schema.OutputParserException" ]
[((526, 555), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (536, 555), False, 'import json\n'), ((871, 900), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (881, 900), False, 'import json\n'), ((1322, 1383), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Got invalid JSON object. Error: {e}"""'], {}), "(f'Got invalid JSON object. Error: {e}')\n", (1343, 1383), False, 'from langchain.schema import OutputParserException\n'), ((1464, 1581), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Got invalid return object. Expected key `{key}` to be present, but got {json_obj}"""'], {}), "(\n f'Got invalid return object. Expected key `{key}` to be present, but got {json_obj}'\n )\n", (1485, 1581), False, 'from langchain.schema import OutputParserException\n'), ((1013, 1036), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (1023, 1036), False, 'import json\n')]
import os import uuid from typing import Any, Dict, List, Optional, Tuple from langchain.agents.agent import RunnableAgent from langchain.agents.tools import tool as LangChainTool from langchain.memory import ConversationSummaryMemory from langchain.tools.render import render_text_description from langchain_core.agents import AgentAction from langchain_core.callbacks import BaseCallbackHandler from langchain_openai import ChatOpenAI from pydantic import ( UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator, ) from pydantic_core import PydanticCustomError from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler from crewai.utilities import I18N, Logger, Prompts, RPMController from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess class Agent(BaseModel): """Represents an agent in a system. Each agent has a role, a goal, a backstory, and an optional language model (llm). The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents. Attributes: agent_executor: An instance of the CrewAgentExecutor class. role: The role of the agent. goal: The objective of the agent. backstory: The backstory of the agent. config: Dict representation of agent configuration. llm: The language model that will run the agent. function_calling_llm: The language model that will the tool calling for this agent, it overrides the crew function_calling_llm. max_iter: Maximum number of iterations for an agent to execute a task. memory: Whether the agent should have memory or not. max_rpm: Maximum number of requests per minute for the agent execution to be respected. verbose: Whether the agent execution should be in verbose mode. allow_delegation: Whether the agent is allowed to delegate tasks to other agents. tools: Tools at agents disposal step_callback: Callback to be executed after each step of the agent execution. callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process """ __hash__ = object.__hash__ # type: ignore _logger: Logger = PrivateAttr() _rpm_controller: RPMController = PrivateAttr(default=None) _request_within_rpm_limit: Any = PrivateAttr(default=None) _token_process: TokenProcess = TokenProcess() formatting_errors: int = 0 model_config = ConfigDict(arbitrary_types_allowed=True) id: UUID4 = Field( default_factory=uuid.uuid4, frozen=True, description="Unique identifier for the object, not set by user.", ) role: str = Field(description="Role of the agent") goal: str = Field(description="Objective of the agent") backstory: str = Field(description="Backstory of the agent") config: Optional[Dict[str, Any]] = Field( description="Configuration for the agent", default=None, ) max_rpm: Optional[int] = Field( default=None, description="Maximum number of requests per minute for the agent execution to be respected.", ) memory: bool = Field( default=False, description="Whether the agent should have memory or not" ) verbose: bool = Field( default=False, description="Verbose mode for the Agent Execution" ) allow_delegation: bool = Field( default=True, description="Allow delegation of tasks to agents" ) tools: Optional[List[Any]] = Field( default_factory=list, description="Tools at agents disposal" ) max_iter: Optional[int] = Field( default=15, description="Maximum iterations for an agent to execute a task" ) agent_executor: InstanceOf[CrewAgentExecutor] = Field( default=None, description="An instance of the CrewAgentExecutor class." ) tools_handler: InstanceOf[ToolsHandler] = Field( default=None, description="An instance of the ToolsHandler class." ) cache_handler: InstanceOf[CacheHandler] = Field( default=CacheHandler(), description="An instance of the CacheHandler class." ) step_callback: Optional[Any] = Field( default=None, description="Callback to be executed after each step of the agent execution.", ) i18n: I18N = Field(default=I18N(), description="Internationalization settings.") llm: Any = Field( default_factory=lambda: ChatOpenAI( model=os.environ.get("OPENAI_MODEL_NAME", "gpt-4") ), description="Language model that will run the agent.", ) function_calling_llm: Optional[Any] = Field( description="Language model that will run the agent.", default=None ) callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field( default=None, description="Callback to be executed" ) def __init__(__pydantic_self__, **data): config = data.pop("config", {}) super().__init__(**config, **data) @field_validator("id", mode="before") @classmethod def _deny_user_set_id(cls, v: Optional[UUID4]) -> None: if v: raise PydanticCustomError( "may_not_set_field", "This field is not to be set by the user.", {} ) @model_validator(mode="after") def set_attributes_based_on_config(self) -> "Agent": """Set attributes based on the agent configuration.""" if self.config: for key, value in self.config.items(): setattr(self, key, value) return self @model_validator(mode="after") def set_private_attrs(self): """Set private attributes.""" self._logger = Logger(self.verbose) if self.max_rpm and not self._rpm_controller: self._rpm_controller = RPMController( max_rpm=self.max_rpm, logger=self._logger ) return self @model_validator(mode="after") def set_agent_executor(self) -> "Agent": """set agent executor is set.""" if hasattr(self.llm, "model_name"): self.llm.callbacks = [ TokenCalcHandler(self.llm.model_name, self._token_process) ] if not self.agent_executor: self.set_cache_handler(self.cache_handler) return self def execute_task( self, task: Any, context: Optional[str] = None, tools: Optional[List[Any]] = None, ) -> str: """Execute a task with the agent. Args: task: Task to execute. context: Context to execute the task in. tools: Tools to use for the task. Returns: Output of the agent """ self.tools_handler.last_used_tool = {} task_prompt = task.prompt() if context: task_prompt = self.i18n.slice("task_with_context").format( task=task_prompt, context=context ) tools = self._parse_tools(tools or self.tools) self.create_agent_executor(tools=tools) self.agent_executor.tools = tools self.agent_executor.task = task self.agent_executor.tools_description = render_text_description(tools) self.agent_executor.tools_names = self.__tools_names(tools) result = self.agent_executor.invoke( { "input": task_prompt, "tool_names": self.agent_executor.tools_names, "tools": self.agent_executor.tools_description, } )["output"] if self.max_rpm: self._rpm_controller.stop_rpm_counter() return result def set_cache_handler(self, cache_handler: CacheHandler) -> None: """Set the cache handler for the agent. Args: cache_handler: An instance of the CacheHandler class. """ self.cache_handler = cache_handler self.tools_handler = ToolsHandler(cache=self.cache_handler) self.create_agent_executor() def set_rpm_controller(self, rpm_controller: RPMController) -> None: """Set the rpm controller for the agent. Args: rpm_controller: An instance of the RPMController class. """ if not self._rpm_controller: self._rpm_controller = rpm_controller self.create_agent_executor() def create_agent_executor(self, tools=None) -> None: """Create an agent executor for the agent. Returns: An instance of the CrewAgentExecutor class. """ tools = tools or self.tools agent_args = { "input": lambda x: x["input"], "tools": lambda x: x["tools"], "tool_names": lambda x: x["tool_names"], "agent_scratchpad": lambda x: self.format_log_to_str( x["intermediate_steps"] ), } executor_args = { "llm": self.llm, "i18n": self.i18n, "tools": self._parse_tools(tools), "verbose": self.verbose, "handle_parsing_errors": True, "max_iterations": self.max_iter, "step_callback": self.step_callback, "tools_handler": self.tools_handler, "function_calling_llm": self.function_calling_llm, "callbacks": self.callbacks, } if self._rpm_controller: executor_args[ "request_within_rpm_limit" ] = self._rpm_controller.check_or_wait if self.memory: summary_memory = ConversationSummaryMemory( llm=self.llm, input_key="input", memory_key="chat_history" ) executor_args["memory"] = summary_memory agent_args["chat_history"] = lambda x: x["chat_history"] prompt = Prompts(i18n=self.i18n, tools=tools).task_execution_with_memory() else: prompt = Prompts(i18n=self.i18n, tools=tools).task_execution() execution_prompt = prompt.partial( goal=self.goal, role=self.role, backstory=self.backstory, ) bind = self.llm.bind(stop=[self.i18n.slice("observation")]) inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self) self.agent_executor = CrewAgentExecutor( agent=RunnableAgent(runnable=inner_agent), **executor_args ) def interpolate_inputs(self, inputs: Dict[str, Any]) -> None: """Interpolate inputs into the agent description and backstory.""" if inputs: self.role = self.role.format(**inputs) self.goal = self.goal.format(**inputs) self.backstory = self.backstory.format(**inputs) def increment_formatting_errors(self) -> None: """Count the formatting errors of the agent.""" self.formatting_errors += 1 def format_log_to_str( self, intermediate_steps: List[Tuple[AgentAction, str]], observation_prefix: str = "Observation: ", llm_prefix: str = "", ) -> str: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}" return thoughts def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]: """Parse tools to be used for the task.""" # tentatively try to import from crewai_tools import BaseTool as CrewAITool tools_list = [] try: from crewai_tools import BaseTool as CrewAITool for tool in tools: if isinstance(tool, CrewAITool): tools_list.append(tool.to_langchain()) else: tools_list.append(tool) except ModuleNotFoundError: for tool in tools: tools_list.append(tool) return tools_list @staticmethod def __tools_names(tools) -> str: return ", ".join([t.name for t in tools]) def __repr__(self): return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
[ "langchain.tools.render.render_text_description", "langchain.agents.agent.RunnableAgent", "langchain.memory.ConversationSummaryMemory" ]
[((2392, 2405), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2403, 2405), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2443, 2468), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {'default': 'None'}), '(default=None)\n', (2454, 2468), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2506, 2531), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {'default': 'None'}), '(default=None)\n', (2517, 2531), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2567, 2581), 'crewai.utilities.token_counter_callback.TokenProcess', 'TokenProcess', ([], {}), '()\n', (2579, 2581), False, 'from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess\n'), ((2633, 2673), 'pydantic.ConfigDict', 'ConfigDict', ([], {'arbitrary_types_allowed': '(True)'}), '(arbitrary_types_allowed=True)\n', (2643, 2673), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2690, 2807), 'pydantic.Field', 'Field', ([], {'default_factory': 'uuid.uuid4', 'frozen': '(True)', 'description': '"""Unique identifier for the object, not set by user."""'}), "(default_factory=uuid.uuid4, frozen=True, description=\n 'Unique identifier for the object, not set by user.')\n", (2695, 2807), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2850, 2888), 'pydantic.Field', 'Field', ([], {'description': '"""Role of the agent"""'}), "(description='Role of the agent')\n", (2855, 2888), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2905, 2948), 'pydantic.Field', 'Field', ([], {'description': '"""Objective of the agent"""'}), "(description='Objective of the agent')\n", (2910, 2948), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2970, 3013), 'pydantic.Field', 'Field', ([], {'description': '"""Backstory of the agent"""'}), "(description='Backstory of the agent')\n", (2975, 3013), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3053, 3115), 'pydantic.Field', 'Field', ([], {'description': '"""Configuration for the agent"""', 'default': 'None'}), "(description='Configuration for the agent', default=None)\n", (3058, 3115), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3168, 3291), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Maximum number of requests per minute for the agent execution to be respected."""'}), "(default=None, description=\n 'Maximum number of requests per minute for the agent execution to be respected.'\n )\n", (3173, 3291), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3324, 3403), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the agent should have memory or not"""'}), "(default=False, description='Whether the agent should have memory or not')\n", (3329, 3403), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3438, 3510), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Verbose mode for the Agent Execution"""'}), "(default=False, description='Verbose mode for the Agent Execution')\n", (3443, 3510), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3554, 3624), 'pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Allow delegation of tasks to agents"""'}), "(default=True, description='Allow delegation of tasks to agents')\n", (3559, 3624), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3672, 3739), 'pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': '"""Tools at agents disposal"""'}), "(default_factory=list, description='Tools at agents disposal')\n", (3677, 3739), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3784, 3871), 'pydantic.Field', 'Field', ([], {'default': '(15)', 'description': '"""Maximum iterations for an agent to execute a task"""'}), "(default=15, description=\n 'Maximum iterations for an agent to execute a task')\n", (3789, 3871), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3933, 4011), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""An instance of the CrewAgentExecutor class."""'}), "(default=None, description='An instance of the CrewAgentExecutor class.')\n", (3938, 4011), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4072, 4145), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""An instance of the ToolsHandler class."""'}), "(default=None, description='An instance of the ToolsHandler class.')\n", (4077, 4145), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4339, 4442), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Callback to be executed after each step of the agent execution."""'}), "(default=None, description=\n 'Callback to be executed after each step of the agent execution.')\n", (4344, 4442), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4797, 4871), 'pydantic.Field', 'Field', ([], {'description': '"""Language model that will run the agent."""', 'default': 'None'}), "(description='Language model that will run the agent.', default=None)\n", (4802, 4871), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4951, 5009), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Callback to be executed"""'}), "(default=None, description='Callback to be executed')\n", (4956, 5009), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5159, 5195), 'pydantic.field_validator', 'field_validator', (['"""id"""'], {'mode': '"""before"""'}), "('id', mode='before')\n", (5174, 5195), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5430, 5459), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (5445, 5459), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5723, 5752), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (5738, 5752), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((6070, 6099), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (6085, 6099), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5847, 5867), 'crewai.utilities.Logger', 'Logger', (['self.verbose'], {}), '(self.verbose)\n', (5853, 5867), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((7345, 7375), 'langchain.tools.render.render_text_description', 'render_text_description', (['tools'], {}), '(tools)\n', (7368, 7375), False, 'from langchain.tools.render import render_text_description\n'), ((8088, 8126), 'crewai.agents.ToolsHandler', 'ToolsHandler', ([], {'cache': 'self.cache_handler'}), '(cache=self.cache_handler)\n', (8100, 8126), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((4229, 4243), 'crewai.agents.CacheHandler', 'CacheHandler', ([], {}), '()\n', (4241, 4243), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((4492, 4498), 'crewai.utilities.I18N', 'I18N', ([], {}), '()\n', (4496, 4498), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((5305, 5397), 'pydantic_core.PydanticCustomError', 'PydanticCustomError', (['"""may_not_set_field"""', '"""This field is not to be set by the user."""', '{}'], {}), "('may_not_set_field',\n 'This field is not to be set by the user.', {})\n", (5324, 5397), False, 'from pydantic_core import PydanticCustomError\n'), ((5957, 6013), 'crewai.utilities.RPMController', 'RPMController', ([], {'max_rpm': 'self.max_rpm', 'logger': 'self._logger'}), '(max_rpm=self.max_rpm, logger=self._logger)\n', (5970, 6013), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((9715, 9805), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {'llm': 'self.llm', 'input_key': '"""input"""', 'memory_key': '"""chat_history"""'}), "(llm=self.llm, input_key='input', memory_key=\n 'chat_history')\n", (9740, 9805), False, 'from langchain.memory import ConversationSummaryMemory\n'), ((10407, 10434), 'crewai.agents.CrewAgentParser', 'CrewAgentParser', ([], {'agent': 'self'}), '(agent=self)\n', (10422, 10434), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((6281, 6339), 'crewai.utilities.token_counter_callback.TokenCalcHandler', 'TokenCalcHandler', (['self.llm.model_name', 'self._token_process'], {}), '(self.llm.model_name, self._token_process)\n', (6297, 6339), False, 'from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess\n'), ((10502, 10537), 'langchain.agents.agent.RunnableAgent', 'RunnableAgent', ([], {'runnable': 'inner_agent'}), '(runnable=inner_agent)\n', (10515, 10537), False, 'from langchain.agents.agent import RunnableAgent\n'), ((9974, 10010), 'crewai.utilities.Prompts', 'Prompts', ([], {'i18n': 'self.i18n', 'tools': 'tools'}), '(i18n=self.i18n, tools=tools)\n', (9981, 10010), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((10075, 10111), 'crewai.utilities.Prompts', 'Prompts', ([], {'i18n': 'self.i18n', 'tools': 'tools'}), '(i18n=self.i18n, tools=tools)\n', (10082, 10111), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((4630, 4674), 'os.environ.get', 'os.environ.get', (['"""OPENAI_MODEL_NAME"""', '"""gpt-4"""'], {}), "('OPENAI_MODEL_NAME', 'gpt-4')\n", (4644, 4674), False, 'import os\n')]
import os import logging import hashlib import PyPDF2 from tqdm import tqdm from modules.presets import * from modules.utils import * from modules.config import local_embedding def get_documents(file_src): from langchain.schema import Document from langchain.text_splitter import TokenTextSplitter text_splitter = TokenTextSplitter(chunk_size=500, chunk_overlap=30) documents = [] logging.debug("Loading documents...") logging.debug(f"file_src: {file_src}") for file in file_src: filepath = file.name filename = os.path.basename(filepath) file_type = os.path.splitext(filename)[1] logging.info(f"loading file: {filename}") texts = None try: if file_type == ".pdf": logging.debug("Loading PDF...") try: from modules.pdf_func import parse_pdf from modules.config import advance_docs two_column = advance_docs["pdf"].get("two_column", False) pdftext = parse_pdf(filepath, two_column).text except: pdftext = "" with open(filepath, "rb") as pdfFileObj: pdfReader = PyPDF2.PdfReader(pdfFileObj) for page in tqdm(pdfReader.pages): pdftext += page.extract_text() texts = [Document(page_content=pdftext, metadata={"source": filepath})] elif file_type == ".docx": logging.debug("Loading Word...") from langchain.document_loaders import UnstructuredWordDocumentLoader loader = UnstructuredWordDocumentLoader(filepath) texts = loader.load() elif file_type == ".pptx": logging.debug("Loading PowerPoint...") from langchain.document_loaders import UnstructuredPowerPointLoader loader = UnstructuredPowerPointLoader(filepath) texts = loader.load() elif file_type == ".epub": logging.debug("Loading EPUB...") from langchain.document_loaders import UnstructuredEPubLoader loader = UnstructuredEPubLoader(filepath) texts = loader.load() elif file_type == ".xlsx": logging.debug("Loading Excel...") text_list = excel_to_string(filepath) texts = [] for elem in text_list: texts.append(Document(page_content=elem, metadata={"source": filepath})) elif file_type in [".jpg", ".jpeg", ".png", ".heif", ".heic", ".webp", ".bmp", ".gif", ".tiff", ".tif"]: raise gr.Warning(i18n("不支持的文件: ") + filename + i18n(",请使用 .pdf, .docx, .pptx, .epub, .xlsx 等文档。")) else: logging.debug("Loading text file...") from langchain.document_loaders import TextLoader loader = TextLoader(filepath, "utf8") texts = loader.load() except Exception as e: import traceback logging.error(f"Error loading file: {filename}") traceback.print_exc() if texts is not None: texts = text_splitter.split_documents(texts) documents.extend(texts) logging.debug("Documents loaded.") return documents def construct_index( api_key, file_src, max_input_size=4096, num_outputs=5, max_chunk_overlap=20, chunk_size_limit=600, embedding_limit=None, separator=" ", load_from_cache_if_possible=True, ): from langchain.chat_models import ChatOpenAI from langchain.vectorstores import FAISS if api_key: os.environ["OPENAI_API_KEY"] = api_key else: # 由于一个依赖的愚蠢的设计,这里必须要有一个API KEY os.environ["OPENAI_API_KEY"] = "sk-xxxxxxx" logging.debug(f"api base: {os.environ.get('OPENAI_API_BASE', None)}") chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit embedding_limit = None if embedding_limit == 0 else embedding_limit separator = " " if separator == "" else separator index_name = get_file_hash(file_src) index_path = f"./index/{index_name}" if local_embedding: from langchain.embeddings.huggingface import HuggingFaceEmbeddings embeddings = HuggingFaceEmbeddings( model_name="sentence-transformers/distiluse-base-multilingual-cased-v2") else: from langchain.embeddings import OpenAIEmbeddings if os.environ.get("OPENAI_API_TYPE", "openai") == "openai": embeddings = OpenAIEmbeddings(openai_api_base=os.environ.get( "OPENAI_API_BASE", None), openai_api_key=os.environ.get("OPENAI_EMBEDDING_API_KEY", api_key)) else: embeddings = OpenAIEmbeddings(deployment=os.environ["AZURE_EMBEDDING_DEPLOYMENT_NAME"], openai_api_key=os.environ["AZURE_OPENAI_API_KEY"], model=os.environ["AZURE_EMBEDDING_MODEL_NAME"], openai_api_base=os.environ["AZURE_OPENAI_API_BASE_URL"], openai_api_type="azure") if os.path.exists(index_path) and load_from_cache_if_possible: logging.info(i18n("找到了缓存的索引文件,加载中……")) return FAISS.load_local(index_path, embeddings) else: documents = get_documents(file_src) logging.debug(i18n("构建索引中……")) if documents: with retrieve_proxy(): index = FAISS.from_documents(documents, embeddings) else: raise Exception(i18n("没有找到任何支持的文档。")) logging.debug(i18n("索引构建完成!")) os.makedirs("./index", exist_ok=True) index.save_local(index_path) logging.debug(i18n("索引已保存至本地!")) return index
[ "langchain.embeddings.huggingface.HuggingFaceEmbeddings", "langchain.document_loaders.UnstructuredWordDocumentLoader", "langchain.vectorstores.FAISS.from_documents", "langchain.document_loaders.UnstructuredPowerPointLoader", "langchain.schema.Document", "langchain.embeddings.OpenAIEmbeddings", "langchain.vectorstores.FAISS.load_local", "langchain.document_loaders.TextLoader", "langchain.document_loaders.UnstructuredEPubLoader", "langchain.text_splitter.TokenTextSplitter" ]
[((330, 381), 'langchain.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(30)'}), '(chunk_size=500, chunk_overlap=30)\n', (347, 381), False, 'from langchain.text_splitter import TokenTextSplitter\n'), ((406, 443), 'logging.debug', 'logging.debug', (['"""Loading documents..."""'], {}), "('Loading documents...')\n", (419, 443), False, 'import logging\n'), ((448, 486), 'logging.debug', 'logging.debug', (['f"""file_src: {file_src}"""'], {}), "(f'file_src: {file_src}')\n", (461, 486), False, 'import logging\n'), ((3415, 3449), 'logging.debug', 'logging.debug', (['"""Documents loaded."""'], {}), "('Documents loaded.')\n", (3428, 3449), False, 'import logging\n'), ((561, 587), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (577, 587), False, 'import os\n'), ((646, 687), 'logging.info', 'logging.info', (['f"""loading file: {filename}"""'], {}), "(f'loading file: {filename}')\n", (658, 687), False, 'import logging\n'), ((4440, 4539), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/distiluse-base-multilingual-cased-v2"""'}), "(model_name=\n 'sentence-transformers/distiluse-base-multilingual-cased-v2')\n", (4461, 4539), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((5212, 5238), 'os.path.exists', 'os.path.exists', (['index_path'], {}), '(index_path)\n', (5226, 5238), False, 'import os\n'), ((5334, 5374), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['index_path', 'embeddings'], {}), '(index_path, embeddings)\n', (5350, 5374), False, 'from langchain.vectorstores import FAISS\n'), ((5704, 5741), 'os.makedirs', 'os.makedirs', (['"""./index"""'], {'exist_ok': '(True)'}), "('./index', exist_ok=True)\n", (5715, 5741), False, 'import os\n'), ((608, 634), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (624, 634), False, 'import os\n'), ((4627, 4670), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_TYPE"""', '"""openai"""'], {}), "('OPENAI_API_TYPE', 'openai')\n", (4641, 4670), False, 'import os\n'), ((4907, 5176), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'deployment': "os.environ['AZURE_EMBEDDING_DEPLOYMENT_NAME']", 'openai_api_key': "os.environ['AZURE_OPENAI_API_KEY']", 'model': "os.environ['AZURE_EMBEDDING_MODEL_NAME']", 'openai_api_base': "os.environ['AZURE_OPENAI_API_BASE_URL']", 'openai_api_type': '"""azure"""'}), "(deployment=os.environ['AZURE_EMBEDDING_DEPLOYMENT_NAME'],\n openai_api_key=os.environ['AZURE_OPENAI_API_KEY'], model=os.environ[\n 'AZURE_EMBEDDING_MODEL_NAME'], openai_api_base=os.environ[\n 'AZURE_OPENAI_API_BASE_URL'], openai_api_type='azure')\n", (4923, 5176), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((774, 805), 'logging.debug', 'logging.debug', (['"""Loading PDF..."""'], {}), "('Loading PDF...')\n", (787, 805), False, 'import logging\n'), ((3204, 3252), 'logging.error', 'logging.error', (['f"""Error loading file: {filename}"""'], {}), "(f'Error loading file: {filename}')\n", (3217, 3252), False, 'import logging\n'), ((3265, 3286), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3284, 3286), False, 'import traceback\n'), ((3993, 4032), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', 'None'], {}), "('OPENAI_API_BASE', None)\n", (4007, 4032), False, 'import os\n'), ((5549, 5592), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['documents', 'embeddings'], {}), '(documents, embeddings)\n', (5569, 5592), False, 'from langchain.vectorstores import FAISS\n'), ((1418, 1479), 'langchain.schema.Document', 'Document', ([], {'page_content': 'pdftext', 'metadata': "{'source': filepath}"}), "(page_content=pdftext, metadata={'source': filepath})\n", (1426, 1479), False, 'from langchain.schema import Document\n'), ((1570, 1602), 'logging.debug', 'logging.debug', (['"""Loading Word..."""'], {}), "('Loading Word...')\n", (1583, 1602), False, 'import logging\n'), ((1714, 1754), 'langchain.document_loaders.UnstructuredWordDocumentLoader', 'UnstructuredWordDocumentLoader', (['filepath'], {}), '(filepath)\n', (1744, 1754), False, 'from langchain.document_loaders import UnstructuredWordDocumentLoader\n'), ((4742, 4781), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', 'None'], {}), "('OPENAI_API_BASE', None)\n", (4756, 4781), False, 'import os\n'), ((4815, 4866), 'os.environ.get', 'os.environ.get', (['"""OPENAI_EMBEDDING_API_KEY"""', 'api_key'], {}), "('OPENAI_EMBEDDING_API_KEY', api_key)\n", (4829, 4866), False, 'import os\n'), ((1055, 1086), 'modules.pdf_func.parse_pdf', 'parse_pdf', (['filepath', 'two_column'], {}), '(filepath, two_column)\n', (1064, 1086), False, 'from modules.pdf_func import parse_pdf\n'), ((1848, 1886), 'logging.debug', 'logging.debug', (['"""Loading PowerPoint..."""'], {}), "('Loading PowerPoint...')\n", (1861, 1886), False, 'import logging\n'), ((1996, 2034), 'langchain.document_loaders.UnstructuredPowerPointLoader', 'UnstructuredPowerPointLoader', (['filepath'], {}), '(filepath)\n', (2024, 2034), False, 'from langchain.document_loaders import UnstructuredPowerPointLoader\n'), ((1246, 1274), 'PyPDF2.PdfReader', 'PyPDF2.PdfReader', (['pdfFileObj'], {}), '(pdfFileObj)\n', (1262, 1274), False, 'import PyPDF2\n'), ((1311, 1332), 'tqdm.tqdm', 'tqdm', (['pdfReader.pages'], {}), '(pdfReader.pages)\n', (1315, 1332), False, 'from tqdm import tqdm\n'), ((2128, 2160), 'logging.debug', 'logging.debug', (['"""Loading EPUB..."""'], {}), "('Loading EPUB...')\n", (2141, 2160), False, 'import logging\n'), ((2264, 2296), 'langchain.document_loaders.UnstructuredEPubLoader', 'UnstructuredEPubLoader', (['filepath'], {}), '(filepath)\n', (2286, 2296), False, 'from langchain.document_loaders import UnstructuredEPubLoader\n'), ((2390, 2423), 'logging.debug', 'logging.debug', (['"""Loading Excel..."""'], {}), "('Loading Excel...')\n", (2403, 2423), False, 'import logging\n'), ((2936, 2973), 'logging.debug', 'logging.debug', (['"""Loading text file..."""'], {}), "('Loading text file...')\n", (2949, 2973), False, 'import logging\n'), ((3065, 3093), 'langchain.document_loaders.TextLoader', 'TextLoader', (['filepath', '"""utf8"""'], {}), "(filepath, 'utf8')\n", (3075, 3093), False, 'from langchain.document_loaders import TextLoader\n'), ((2577, 2635), 'langchain.schema.Document', 'Document', ([], {'page_content': 'elem', 'metadata': "{'source': filepath}"}), "(page_content=elem, metadata={'source': filepath})\n", (2585, 2635), False, 'from langchain.schema import Document\n')]
import re from typing import Union from langchain.agents.mrkl.output_parser import MRKLOutputParser from langchain.schema import AgentAction, AgentFinish, OutputParserException FORMAT_INSTRUCTIONS0 = """Use the following format and be sure to use new lines after each task. Question: the input question you must answer Thought: you should always think about what to do Action: Exactly only one word out of: {tool_names} Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question""" FORMAT_INSTRUCTIONS = """List of tools, use exactly one word when choosing Action: {tool_names} Only user asks a question, not you. For example user might ask: What is the latest news? Here is an example sequence you can follow: Thought: I should search online for the latest news. Action: Search Action Input: What is the latest news? Observation: X is going away. Z is again happening. Thought: That is interesting, I should search for more information about X and Z and also search about Q. Action: Search Action Input: How is X impacting things. Why is Z happening again, and what are the consequences? Observation: X is causing Y. Z may be caused by P and will lead to H. Thought: I now know the final answer Final Answer: The latest news is: * X is going away, and this is caused by Y. * Z is happening again, and the cause is P and will lead to H. Overall, X and Z are important problems. """ FORMAT_INSTRUCTIONS_PYTHON = """List of tools, use exactly one word when choosing Action: {tool_names} Only user asks a question, not you. For example user might ask: How many rows are in the dataset? Here is an example sequence you can follow. You can repeat Thoughts, but as soon as possible you should try to answer the original user question. Once you an answer the user question, just say: Thought: I now know the final answer Thought: I should use python_repl_ast tool. Action: python_repl_ast Action Input: df.shape Observation: (25, 10) Thought: I now know the final answer Final Answer: There are 25 rows in the dataset. """ FINAL_ANSWER_ACTION = "Final Answer:" MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = ( "Invalid Format: Missing 'Action:' after 'Thought:" ) MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = ( "Invalid Format: Missing 'Action Input:' after 'Action:'" ) FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = ( "Parsing LLM output produced both a final answer and a parse-able action:" ) class H2OMRKLOutputParser(MRKLOutputParser): """MRKL Output parser for the chat agent.""" def get_format_instructions(self) -> str: return FORMAT_INSTRUCTIONS def parse(self, text: str) -> Union[AgentAction, AgentFinish]: includes_answer = FINAL_ANSWER_ACTION in text regex = ( r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" ) action_match = re.search(regex, text, re.DOTALL) if includes_answer: return AgentFinish( {"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text ) elif action_match: action = action_match.group(1).strip() action_input = action_match.group(2) tool_input = action_input.strip(" ") # ensure if its a well formed SQL query we don't remove any trailing " chars if tool_input.startswith("SELECT ") is False: tool_input = tool_input.strip('"') return AgentAction(action, tool_input, text) if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL): raise OutputParserException( f"Could not parse LLM output: `{text}`", observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, llm_output=text, send_to_llm=True, ) elif not re.search( r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL ): raise OutputParserException( f"Could not parse LLM output: `{text}`", observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, llm_output=text, send_to_llm=True, ) else: raise OutputParserException(f"Could not parse LLM output: `{text}`") @property def _type(self) -> str: return "mrkl" class H2OPythonMRKLOutputParser(H2OMRKLOutputParser): def get_format_instructions(self) -> str: return FORMAT_INSTRUCTIONS_PYTHON
[ "langchain.schema.AgentAction", "langchain.schema.OutputParserException" ]
[((3055, 3088), 're.search', 're.search', (['regex', 'text', 're.DOTALL'], {}), '(regex, text, re.DOTALL)\n', (3064, 3088), False, 'import re\n'), ((3689, 3749), 're.search', 're.search', (['"""Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)"""', 'text', 're.DOTALL'], {}), "('Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)', text, re.DOTALL)\n", (3698, 3749), False, 'import re\n'), ((3766, 3928), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {'observation': 'MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE', 'llm_output': 'text', 'send_to_llm': '(True)'}), "(f'Could not parse LLM output: `{text}`', observation=\n MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, llm_output=text,\n send_to_llm=True)\n", (3787, 3928), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((3635, 3672), 'langchain.schema.AgentAction', 'AgentAction', (['action', 'tool_input', 'text'], {}), '(action, tool_input, text)\n', (3646, 3672), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((4016, 4103), 're.search', 're.search', (['"""[\\\\s]*Action\\\\s*\\\\d*\\\\s*Input\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*)"""', 'text', 're.DOTALL'], {}), "('[\\\\s]*Action\\\\s*\\\\d*\\\\s*Input\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*)', text, re.\n DOTALL)\n", (4025, 4103), False, 'import re\n'), ((4133, 4300), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {'observation': 'MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE', 'llm_output': 'text', 'send_to_llm': '(True)'}), "(f'Could not parse LLM output: `{text}`', observation=\n MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, llm_output=text,\n send_to_llm=True)\n", (4154, 4300), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((4403, 4465), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {}), "(f'Could not parse LLM output: `{text}`')\n", (4424, 4465), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n')]
from typing import Any, Callable, Dict, TypeVar from langchain import BasePromptTemplate, LLMChain from langchain.chat_models.base import BaseChatModel from langchain.schema import BaseOutputParser, OutputParserException from openai.error import ( AuthenticationError, InvalidRequestError, RateLimitError, ServiceUnavailableError, ) from reworkd_platform.schemas.agent import ModelSettings from reworkd_platform.web.api.errors import OpenAIError T = TypeVar("T") def parse_with_handling(parser: BaseOutputParser[T], completion: str) -> T: try: return parser.parse(completion) except OutputParserException as e: raise OpenAIError( e, "There was an issue parsing the response from the AI model." ) async def openai_error_handler( func: Callable[..., Any], *args: Any, settings: ModelSettings, **kwargs: Any ) -> Any: try: return await func(*args, **kwargs) except ServiceUnavailableError as e: raise OpenAIError( e, "OpenAI is experiencing issues. Visit " "https://status.openai.com/ for more info.", should_log=not settings.custom_api_key, ) except InvalidRequestError as e: if e.user_message.startswith("The model:"): raise OpenAIError( e, f"Your API key does not have access to your current model. Please use a different model.", should_log=not settings.custom_api_key, ) raise OpenAIError(e, e.user_message) except AuthenticationError as e: raise OpenAIError( e, "Authentication error: Ensure a valid API key is being used.", should_log=not settings.custom_api_key, ) except RateLimitError as e: if e.user_message.startswith("You exceeded your current quota"): raise OpenAIError( e, f"Your API key exceeded your current quota, please check your plan and billing details.", should_log=not settings.custom_api_key, ) raise OpenAIError(e, e.user_message) except Exception as e: raise OpenAIError( e, "There was an unexpected issue getting a response from the AI model." ) async def call_model_with_handling( model: BaseChatModel, prompt: BasePromptTemplate, args: Dict[str, str], settings: ModelSettings, **kwargs: Any, ) -> str: chain = LLMChain(llm=model, prompt=prompt) return await openai_error_handler(chain.arun, args, settings=settings, **kwargs)
[ "langchain.LLMChain" ]
[((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BasePromptTemplate, LLMChain\n'), ((662, 738), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an issue parsing the response from the AI model."""'], {}), "(e, 'There was an issue parsing the response from the AI model.')\n", (673, 738), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((993, 1138), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""OpenAI is experiencing issues. Visit https://status.openai.com/ for more info."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n 'OpenAI is experiencing issues. Visit https://status.openai.com/ for more info.'\n , should_log=not settings.custom_api_key)\n", (1004, 1138), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1522, 1552), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (1533, 1552), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1604, 1729), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""Authentication error: Ensure a valid API key is being used."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n 'Authentication error: Ensure a valid API key is being used.',\n should_log=not settings.custom_api_key)\n", (1615, 1729), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2114, 2144), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (2125, 2144), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2186, 2275), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an unexpected issue getting a response from the AI model."""'], {}), "(e,\n 'There was an unexpected issue getting a response from the AI model.')\n", (2197, 2275), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1299, 1453), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key does not have access to your current model. Please use a different model."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n f'Your API key does not have access to your current model. Please use a different model.'\n , should_log=not settings.custom_api_key)\n", (1310, 1453), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1892, 2045), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key exceeded your current quota, please check your plan and billing details."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n f'Your API key exceeded your current quota, please check your plan and billing details.'\n , should_log=not settings.custom_api_key)\n", (1903, 2045), False, 'from reworkd_platform.web.api.errors import OpenAIError\n')]
import json import os.path import logging import time from langchain.vectorstores import FAISS from langchain import PromptTemplate from utils.references import References from utils.knowledge import Knowledge from utils.file_operations import make_archive, copy_templates from utils.tex_processing import create_copies from utils.gpt_interaction import GPTModel from utils.prompts import SYSTEM from utils.embeddings import EMBEDDINGS from utils.gpt_interaction import get_gpt_responses TOTAL_TOKENS = 0 TOTAL_PROMPTS_TOKENS = 0 TOTAL_COMPLETION_TOKENS = 0 def log_usage(usage, generating_target, print_out=True): global TOTAL_TOKENS global TOTAL_PROMPTS_TOKENS global TOTAL_COMPLETION_TOKENS prompts_tokens = usage['prompt_tokens'] completion_tokens = usage['completion_tokens'] total_tokens = usage['total_tokens'] TOTAL_TOKENS += total_tokens TOTAL_PROMPTS_TOKENS += prompts_tokens TOTAL_COMPLETION_TOKENS += completion_tokens message = f">>USAGE>> For generating {generating_target}, {total_tokens} tokens have been used " \ f"({prompts_tokens} for prompts; {completion_tokens} for completion). " \ f"{TOTAL_TOKENS} tokens have been used in total." if print_out: print(message) logging.info(message) def _generation_setup(title, template="Default", tldr=False, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048, # generating references knowledge_database=None, max_tokens_kd=2048, query_counts=10): llm = GPTModel(model="gpt-3.5-turbo-16k") bibtex_path, destination_folder = copy_templates(template, title) logging.basicConfig(level=logging.INFO, filename=os.path.join(destination_folder, "generation.log")) #generate key words keywords, usage = llm(systems=SYSTEM["keywords"], prompts=title, return_json=True) log_usage(usage, "keywords") keywords = {keyword: max_kw_refs for keyword in keywords} print("Keywords: \n", keywords) #generate references ref = References(title, bib_refs) ref.collect_papers(keywords, tldr=tldr) references = ref.to_prompts(max_tokens=max_tokens_ref) all_paper_ids = ref.to_bibtex(bibtex_path) #product domain knowledge prompts = f"Title: {title}" preliminaries_kw, _ = llm(systems=SYSTEM["preliminaries"], prompts=prompts) # check if the database exists or not db_path = f"utils/knowledge_databases/{knowledge_database}" db_config_path = os.path.join(db_path, "db_meta.json") db_index_path = os.path.join(db_path, "faiss_index") if os.path.isdir(db_path): try: with open(db_config_path, "r", encoding="utf-8") as f: db_config = json.load(f) model_name = db_config["embedding_model"] embeddings = EMBEDDINGS[model_name] db = FAISS.load_local(db_index_path, embeddings) knowledge = Knowledge(db=db) knowledge.collect_knowledge(preliminaries_kw, max_query=query_counts) domain_knowledge = knowledge.to_prompts(max_tokens_kd) except Exception as e: domain_knowledge='' prompts = f"Title: {title}" syetem_promot = "You are an assistant designed to propose necessary components of an survey papers. Your response should follow the JSON format." components, usage = llm(systems=syetem_promot, prompts=prompts, return_json=True) log_usage(usage, "media") print(f"The paper information has been initialized. References are saved to {bibtex_path}.") paper = {} paper["title"] = title paper["references"] = references paper["bibtex"] = bibtex_path paper["components"] = components paper["domain_knowledge"] = domain_knowledge return paper, destination_folder, all_paper_ids def section_generation(paper, section, save_to_path, model, research_field="machine learning"): """ The main pipeline of generating a section. 1. Generate prompts. 2. Get responses from AI assistant. 3. Extract the section text. 4. Save the text to .tex file. :return usage """ title = paper["title"] references = paper["references"] components = paper['components'] instruction = '- Discuss three to five main related fields to this paper. For each field, select five to ten key publications from references. For each reference, analyze its strengths and weaknesses in one or two sentences. Present the related works in a logical manner, often chronologically. Consider using a taxonomy or categorization to structure the discussion. Do not use \section{...} or \subsection{...}; use \paragraph{...} to list related fields.' fundamental_subprompt = "Your task is to write the {section} section of the paper with the title '{title}'. This paper has the following content: {components}\n" instruction_subprompt = "\n" \ "Your response should follow the following instructions:\n" \ "{instruction}\n" ref_instruction_subprompt = "- Read references. " \ "Every time you use information from the references, you need to appropriately cite it (using \citep or \citet)." \ "For example of \citep, the sentence where you use information from lei2022adaptive \citep{{lei2022adaptive}}. " \ "For example of \citet, \citet{{lei2022adaptive}} claims some information.\n" \ "- Avoid citing the same reference in a same paragraph.\n" \ "\n" \ "References:\n" \ "{references}" output_subprompt = "Ensure that it can be directly compiled by LeTaX." reivew_prompts = PromptTemplate( input_variables=["title", "components", "instruction", "section", "references"], template=fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt + output_subprompt) prompts = reivew_prompts.format(title=title, components=components, instruction=instruction, section=section, references=references) SECTION_GENERATION_SYSTEM = PromptTemplate(input_variables=["research_field"], template="You are an assistant designed to write academic papers in the field of {research_field} using LaTeX." ) output, usage = get_gpt_responses(SECTION_GENERATION_SYSTEM.format(research_field=research_field), prompts, model=model, temperature=0.4) output=output[25:] tex_file = os.path.join(save_to_path, f"{section}.tex") with open(tex_file, "w", encoding="utf-8") as f: f.write(output) use_md =True use_chinese = True if use_md: system_md = 'You are an translator between the LaTeX and .MD. here is a latex file where the content is: \n \n ' + output prompts_md = 'you should transfer the latex content to the .MD format seriously, and pay attention to the correctness of the citation format (use the number). you should directly output the new content without anyoter replay. you should add reference papers at the end of the paper, and add line breaks between two reference papers. The Title should be ' + paper['title'] output_md, usage_md = get_gpt_responses(system_md, prompts_md, model=model, temperature=0.4) md_file = os.path.join(save_to_path, f"{'survey'}.md") with open(md_file, "w", encoding="utf-8") as m: m.write(output_md) if use_chinese == True: system_md_chi = 'You are an translator between the english and chinese. here is a english file where the content is: \n \n ' + output prompts_md_chi = 'you should transfer the english to chinese and dont change anything others. you should directly output the new content without anyoter replay. you should keep the reference papers unchanged.' output_md_chi, usage_md_chi = get_gpt_responses(system_md_chi, prompts_md_chi, model=model, temperature=0.4) md_file_chi = os.path.join(save_to_path, f"{'survey_chinese'}.md") with open(md_file_chi, "w", encoding="utf-8") as c: c.write(output_md_chi) return usage def generate_draft(title, tldr=True, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048, knowledge_database=None, max_tokens_kd=2048, query_counts=10, section='related works', model="gpt-3.5-turbo-16k", template="Default" , save_zip=None): print("================START================") paper, destination_folder, _ = _generation_setup(title, template, tldr, max_kw_refs, bib_refs, max_tokens_ref=max_tokens_ref, max_tokens_kd=max_tokens_kd, query_counts=query_counts, knowledge_database=knowledge_database) # main components print(f"================PROCESSING================") usage = section_generation(paper, section, destination_folder, model=model) log_usage(usage, section) create_copies(destination_folder) print("\nPROCESSING COMPLETE\n") return make_archive(destination_folder, title+".zip") print("draft has been generated in " + destination_folder) if __name__ == "__main__": import openai openai.api_key = "your key" openai.api_base = 'https://api.openai.com/v1' #openai.proxy = "socks5h://localhost:7890 # if use the vpn target_title = "Reinforcement Learning for Robot Control" generate_draft(target_title, knowledge_database="ml_textbook_test",max_kw_refs=20)
[ "langchain.vectorstores.FAISS.load_local", "langchain.PromptTemplate" ]
[((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GPTModel\n'), ((1626, 1657), 'utils.file_operations.copy_templates', 'copy_templates', (['template', 'title'], {}), '(template, title)\n', (1640, 1657), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((2042, 2069), 'utils.references.References', 'References', (['title', 'bib_refs'], {}), '(title, bib_refs)\n', (2052, 2069), False, 'from utils.references import References\n'), ((5824, 6030), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['title', 'components', 'instruction', 'section', 'references']", 'template': '(fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt +\n output_subprompt)'}), "(input_variables=['title', 'components', 'instruction',\n 'section', 'references'], template=fundamental_subprompt +\n instruction_subprompt + ref_instruction_subprompt + output_subprompt)\n", (5838, 6030), False, 'from langchain import PromptTemplate\n'), ((6353, 6526), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['research_field']", 'template': '"""You are an assistant designed to write academic papers in the field of {research_field} using LaTeX."""'}), "(input_variables=['research_field'], template=\n 'You are an assistant designed to write academic papers in the field of {research_field} using LaTeX.'\n )\n", (6367, 6526), False, 'from langchain import PromptTemplate\n'), ((9472, 9505), 'utils.tex_processing.create_copies', 'create_copies', (['destination_folder'], {}), '(destination_folder)\n', (9485, 9505), False, 'from utils.tex_processing import create_copies\n'), ((9554, 9602), 'utils.file_operations.make_archive', 'make_archive', (['destination_folder', "(title + '.zip')"], {}), "(destination_folder, title + '.zip')\n", (9566, 9602), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((7503, 7573), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md', 'prompts_md'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md, prompts_md, model=model, temperature=0.4)\n', (7520, 7573), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2856, 2899), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['db_index_path', 'embeddings'], {}), '(db_index_path, embeddings)\n', (2872, 2899), False, 'from langchain.vectorstores import FAISS\n'), ((2924, 2940), 'utils.knowledge.Knowledge', 'Knowledge', ([], {'db': 'db'}), '(db=db)\n', (2933, 2940), False, 'from utils.knowledge import Knowledge\n'), ((8211, 8289), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md_chi', 'prompts_md_chi'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md_chi, prompts_md_chi, model=model, temperature=0.4)\n', (8228, 8289), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2724, 2736), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2733, 2736), False, 'import json\n')]
import sys import os sys.path.append(os.path.dirname(os.path.realpath(__file__))) sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'NeuralSeq')) sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'text_to_audio/Make_An_Audio')) sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'audio_detection')) sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mono2binaural')) import gradio as gr import matplotlib import librosa import torch from langchain.agents.initialize import initialize_agent from langchain.agents.tools import Tool from langchain.chains.conversation.memory import ConversationBufferMemory from langchain.llms.openai import OpenAI import re import uuid import soundfile from PIL import Image import numpy as np from omegaconf import OmegaConf from einops import repeat from ldm.util import instantiate_from_config from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000 from vocoder.bigvgan.models import VocoderBigVGAN from ldm.models.diffusion.ddim import DDIMSampler import whisper from utils.hparams import set_hparams from utils.hparams import hparams as hp import scipy.io.wavfile as wavfile import librosa from audio_infer.utils import config as detection_config from audio_infer.pytorch.models import PVT import clip import numpy as np AUDIO_CHATGPT_PREFIX = """AudioGPT AudioGPT can not directly read audios, but it has a list of tools to finish different speech, audio, and singing voice tasks. Each audio will have a file name formed as "audio/xxx.wav". When talking about audios, AudioGPT is very strict to the file name and will never fabricate nonexistent files. AudioGPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the audio content and audio file name. It will remember to provide the file name from the last tool observation, if a new audio is generated. Human may provide new audios to AudioGPT with a description. The description helps AudioGPT to understand this audio, but AudioGPT should use tools to finish following tasks, rather than directly imagine from the description. Overall, AudioGPT is a powerful audio dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. TOOLS: ------ AudioGPT has access to the following tools:""" AUDIO_CHATGPT_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format: ``` Thought: Do I need to use a tool? Yes Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ``` When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format: ``` Thought: Do I need to use a tool? No {ai_prefix}: [your response here] ``` """ AUDIO_CHATGPT_SUFFIX = """You are very strict to the filename correctness and will never fake a file name if not exists. You will remember to provide the audio file name loyally if it's provided in the last tool observation. Begin! Previous conversation history: {chat_history} New input: {input} Thought: Do I need to use a tool? {agent_scratchpad}""" def cut_dialogue_history(history_memory, keep_last_n_words = 500): tokens = history_memory.split() n_tokens = len(tokens) print(f"history_memory:{history_memory}, n_tokens: {n_tokens}") if n_tokens < keep_last_n_words: return history_memory else: paragraphs = history_memory.split('\n') last_n_tokens = n_tokens while last_n_tokens >= keep_last_n_words: last_n_tokens = last_n_tokens - len(paragraphs[0].split(' ')) paragraphs = paragraphs[1:] return '\n' + '\n'.join(paragraphs) def merge_audio(audio_path_1, audio_path_2): merged_signal = [] sr_1, signal_1 = wavfile.read(audio_path_1) sr_2, signal_2 = wavfile.read(audio_path_2) merged_signal.append(signal_1) merged_signal.append(signal_2) merged_signal = np.hstack(merged_signal) merged_signal = np.asarray(merged_signal, dtype=np.int16) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") wavfile.write(audio_filename, sr_2, merged_signal) return audio_filename class T2I: def __init__(self, device): from transformers import AutoModelForCausalLM, AutoTokenizer from diffusers import StableDiffusionPipeline from transformers import pipeline print("Initializing T2I to %s" % device) self.device = device self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) self.text_refine_tokenizer = AutoTokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion") self.text_refine_model = AutoModelForCausalLM.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion") self.text_refine_gpt2_pipe = pipeline("text-generation", model=self.text_refine_model, tokenizer=self.text_refine_tokenizer, device=self.device) self.pipe.to(device) def inference(self, text): image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png") refined_text = self.text_refine_gpt2_pipe(text)[0]["generated_text"] print(f'{text} refined to {refined_text}') image = self.pipe(refined_text).images[0] image.save(image_filename) print(f"Processed T2I.run, text: {text}, image_filename: {image_filename}") return image_filename class ImageCaptioning: def __init__(self, device): from transformers import BlipProcessor, BlipForConditionalGeneration print("Initializing ImageCaptioning to %s" % device) self.device = device self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(self.device) def inference(self, image_path): inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device) out = self.model.generate(**inputs) captions = self.processor.decode(out[0], skip_special_tokens=True) return captions class T2A: def __init__(self, device): print("Initializing Make-An-Audio to %s" % device) self.device = device self.sampler = self._initialize_model('text_to_audio/Make_An_Audio/configs/text_to_audio/txt2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/ta40multi_epoch=000085.ckpt', device=device) self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device) def _initialize_model(self, config, ckpt, device): config = OmegaConf.load(config) model = instantiate_from_config(config.model) model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False) model = model.to(device) model.cond_stage_model.to(model.device) model.cond_stage_model.device = model.device sampler = DDIMSampler(model) return sampler def txt2audio(self, text, seed = 55, scale = 1.5, ddim_steps = 100, n_samples = 3, W = 624, H = 80): SAMPLE_RATE = 16000 prng = np.random.RandomState(seed) start_code = prng.randn(n_samples, self.sampler.model.first_stage_model.embed_dim, H // 8, W // 8) start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32) uc = self.sampler.model.get_learned_conditioning(n_samples * [""]) c = self.sampler.model.get_learned_conditioning(n_samples * [text]) shape = [self.sampler.model.first_stage_model.embed_dim, H//8, W//8] # (z_dim, 80//2^x, 848//2^x) samples_ddim, _ = self.sampler.sample(S = ddim_steps, conditioning = c, batch_size = n_samples, shape = shape, verbose = False, unconditional_guidance_scale = scale, unconditional_conditioning = uc, x_T = start_code) x_samples_ddim = self.sampler.model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) # [0, 1] wav_list = [] for idx,spec in enumerate(x_samples_ddim): wav = self.vocoder.vocode(spec) wav_list.append((SAMPLE_RATE,wav)) best_wav = self.select_best_audio(text, wav_list) return best_wav def select_best_audio(self, prompt, wav_list): from wav_evaluation.models.CLAPWrapper import CLAPWrapper clap_model = CLAPWrapper('text_to_audio/Make_An_Audio/useful_ckpts/CLAP/CLAP_weights_2022.pth', 'text_to_audio/Make_An_Audio/useful_ckpts/CLAP/config.yml', use_cuda=torch.cuda.is_available()) text_embeddings = clap_model.get_text_embeddings([prompt]) score_list = [] for data in wav_list: sr, wav = data audio_embeddings = clap_model.get_audio_embeddings([(torch.FloatTensor(wav), sr)], resample=True) score = clap_model.compute_similarity(audio_embeddings, text_embeddings, use_logit_scale=False).squeeze().cpu().numpy() score_list.append(score) max_index = np.array(score_list).argmax() print(score_list, max_index) return wav_list[max_index] def inference(self, text, seed = 55, scale = 1.5, ddim_steps = 100, n_samples = 3, W = 624, H = 80): melbins,mel_len = 80,624 with torch.no_grad(): result = self.txt2audio( text = text, H = melbins, W = mel_len ) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") soundfile.write(audio_filename, result[1], samplerate = 16000) print(f"Processed T2I.run, text: {text}, audio_filename: {audio_filename}") return audio_filename class I2A: def __init__(self, device): print("Initializing Make-An-Audio-Image to %s" % device) self.device = device self.sampler = self._initialize_model('text_to_audio/Make_An_Audio/configs/img_to_audio/img2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/ta54_epoch=000216.ckpt', device=device) self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device) def _initialize_model(self, config, ckpt, device): config = OmegaConf.load(config) model = instantiate_from_config(config.model) model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False) model = model.to(device) model.cond_stage_model.to(model.device) model.cond_stage_model.device = model.device sampler = DDIMSampler(model) return sampler def img2audio(self, image, seed = 55, scale = 3, ddim_steps = 100, W = 624, H = 80): SAMPLE_RATE = 16000 n_samples = 1 # only support 1 sample prng = np.random.RandomState(seed) start_code = prng.randn(n_samples, self.sampler.model.first_stage_model.embed_dim, H // 8, W // 8) start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32) uc = self.sampler.model.get_learned_conditioning(n_samples * [""]) #image = Image.fromarray(image) image = Image.open(image) image = self.sampler.model.cond_stage_model.preprocess(image).unsqueeze(0) image_embedding = self.sampler.model.cond_stage_model.forward_img(image) c = image_embedding.repeat(n_samples, 1, 1) shape = [self.sampler.model.first_stage_model.embed_dim, H//8, W//8] # (z_dim, 80//2^x, 848//2^x) samples_ddim, _ = self.sampler.sample(S=ddim_steps, conditioning=c, batch_size=n_samples, shape=shape, verbose=False, unconditional_guidance_scale=scale, unconditional_conditioning=uc, x_T=start_code) x_samples_ddim = self.sampler.model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) # [0, 1] wav_list = [] for idx,spec in enumerate(x_samples_ddim): wav = self.vocoder.vocode(spec) wav_list.append((SAMPLE_RATE,wav)) best_wav = wav_list[0] return best_wav def inference(self, image, seed = 55, scale = 3, ddim_steps = 100, W = 624, H = 80): melbins,mel_len = 80,624 with torch.no_grad(): result = self.img2audio( image=image, H=melbins, W=mel_len ) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") soundfile.write(audio_filename, result[1], samplerate = 16000) print(f"Processed I2a.run, image_filename: {image}, audio_filename: {audio_filename}") return audio_filename class TTS: def __init__(self, device=None): from inference.tts.PortaSpeech import TTSInference if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' print("Initializing PortaSpeech to %s" % device) self.device = device self.exp_name = 'checkpoints/ps_adv_baseline' self.set_model_hparams() self.inferencer = TTSInference(self.hp, device) def set_model_hparams(self): set_hparams(exp_name=self.exp_name, print_hparams=False) self.hp = hp def inference(self, text): self.set_model_hparams() inp = {"text": text} out = self.inferencer.infer_once(inp) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") soundfile.write(audio_filename, out, samplerate=22050) return audio_filename class T2S: def __init__(self, device= None): from inference.svs.ds_e2e import DiffSingerE2EInfer if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' print("Initializing DiffSinger to %s" % device) self.device = device self.exp_name = 'checkpoints/0831_opencpop_ds1000' self.config= 'NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds1000.yaml' self.set_model_hparams() self.pipe = DiffSingerE2EInfer(self.hp, device) self.default_inp = { 'text': '你 说 你 不 SP 懂 为 何 在 这 时 牵 手 AP', 'notes': 'D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | rest | D#4/Eb4 | D4 | D4 | D4 | D#4/Eb4 | F4 | D#4/Eb4 | D4 | rest', 'notes_duration': '0.113740 | 0.329060 | 0.287950 | 0.133480 | 0.150900 | 0.484730 | 0.242010 | 0.180820 | 0.343570 | 0.152050 | 0.266720 | 0.280310 | 0.633300 | 0.444590' } def set_model_hparams(self): set_hparams(config=self.config, exp_name=self.exp_name, print_hparams=False) self.hp = hp def inference(self, inputs): self.set_model_hparams() val = inputs.split(",") key = ['text', 'notes', 'notes_duration'] try: inp = {k: v for k, v in zip(key, val)} wav = self.pipe.infer_once(inp) except: print('Error occurs. Generate default audio sample.\n') inp = self.default_inp wav = self.pipe.infer_once(inp) #if inputs == '' or len(val) < len(key): # inp = self.default_inp #else: # inp = {k:v for k,v in zip(key,val)} #wav = self.pipe.infer_once(inp) wav *= 32767 audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") wavfile.write(audio_filename, self.hp['audio_sample_rate'], wav.astype(np.int16)) print(f"Processed T2S.run, audio_filename: {audio_filename}") return audio_filename class t2s_VISinger: def __init__(self, device=None): from espnet2.bin.svs_inference import SingingGenerate if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' print("Initializing VISingere to %s" % device) tag = 'AQuarterMile/opencpop_visinger1' self.model = SingingGenerate.from_pretrained( model_tag=str_or_none(tag), device=device, ) phn_dur = [[0. , 0.219 ], [0.219 , 0.50599998], [0.50599998, 0.71399999], [0.71399999, 1.097 ], [1.097 , 1.28799999], [1.28799999, 1.98300004], [1.98300004, 7.10500002], [7.10500002, 7.60400009]] phn = ['sh', 'i', 'q', 'v', 'n', 'i', 'SP', 'AP'] score = [[0, 0.50625, 'sh_i', 58, 'sh_i'], [0.50625, 1.09728, 'q_v', 56, 'q_v'], [1.09728, 1.9832100000000001, 'n_i', 53, 'n_i'], [1.9832100000000001, 7.105360000000001, 'SP', 0, 'SP'], [7.105360000000001, 7.604390000000001, 'AP', 0, 'AP']] tempo = 70 tmp = {} tmp["label"] = phn_dur, phn tmp["score"] = tempo, score self.default_inp = tmp def inference(self, inputs): val = inputs.split(",") key = ['text', 'notes', 'notes_duration'] try: # TODO: input will be update inp = {k: v for k, v in zip(key, val)} wav = self.model(text=inp)["wav"] except: print('Error occurs. Generate default audio sample.\n') inp = self.default_inp wav = self.model(text=inp)["wav"] audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") soundfile.write(audio_filename, wav, samplerate=self.model.fs) return audio_filename class TTS_OOD: def __init__(self, device): from inference.tts.GenerSpeech import GenerSpeechInfer if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' print("Initializing GenerSpeech to %s" % device) self.device = device self.exp_name = 'checkpoints/GenerSpeech' self.config = 'NeuralSeq/modules/GenerSpeech/config/generspeech.yaml' self.set_model_hparams() self.pipe = GenerSpeechInfer(self.hp, device) def set_model_hparams(self): set_hparams(config=self.config, exp_name=self.exp_name, print_hparams=False) f0_stats_fn = f'{hp["binary_data_dir"]}/train_f0s_mean_std.npy' if os.path.exists(f0_stats_fn): hp['f0_mean'], hp['f0_std'] = np.load(f0_stats_fn) hp['f0_mean'] = float(hp['f0_mean']) hp['f0_std'] = float(hp['f0_std']) hp['emotion_encoder_path'] = 'checkpoints/Emotion_encoder.pt' self.hp = hp def inference(self, inputs): self.set_model_hparams() key = ['ref_audio', 'text'] val = inputs.split(",") inp = {k: v for k, v in zip(key, val)} wav = self.pipe.infer_once(inp) wav *= 32767 audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") wavfile.write(audio_filename, self.hp['audio_sample_rate'], wav.astype(np.int16)) print( f"Processed GenerSpeech.run. Input text:{val[1]}. Input reference audio: {val[0]}. Output Audio_filename: {audio_filename}") return audio_filename class Inpaint: def __init__(self, device): print("Initializing Make-An-Audio-inpaint to %s" % device) self.device = device self.sampler = self._initialize_model_inpaint('text_to_audio/Make_An_Audio/configs/inpaint/txt2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/inpaint7_epoch00047.ckpt') self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device) self.cmap_transform = matplotlib.cm.viridis def _initialize_model_inpaint(self, config, ckpt): config = OmegaConf.load(config) model = instantiate_from_config(config.model) model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False) device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model = model.to(device) print(model.device, device, model.cond_stage_model.device) sampler = DDIMSampler(model) return sampler def make_batch_sd(self, mel, mask, num_samples=1): mel = torch.from_numpy(mel)[None,None,...].to(dtype=torch.float32) mask = torch.from_numpy(mask)[None,None,...].to(dtype=torch.float32) masked_mel = (1 - mask) * mel mel = mel * 2 - 1 mask = mask * 2 - 1 masked_mel = masked_mel * 2 -1 batch = { "mel": repeat(mel.to(device=self.device), "1 ... -> n ...", n=num_samples), "mask": repeat(mask.to(device=self.device), "1 ... -> n ...", n=num_samples), "masked_mel": repeat(masked_mel.to(device=self.device), "1 ... -> n ...", n=num_samples), } return batch def gen_mel(self, input_audio_path): SAMPLE_RATE = 16000 sr, ori_wav = wavfile.read(input_audio_path) print("gen_mel") print(sr,ori_wav.shape,ori_wav) ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0 if len(ori_wav.shape)==2:# stereo ori_wav = librosa.to_mono(ori_wav.T) print(sr,ori_wav.shape,ori_wav) ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE) mel_len,hop_size = 848,256 input_len = mel_len * hop_size if len(ori_wav) < input_len: input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0) else: input_wav = ori_wav[:input_len] mel = TRANSFORMS_16000(input_wav) return mel def gen_mel_audio(self, input_audio): SAMPLE_RATE = 16000 sr,ori_wav = input_audio print("gen_mel_audio") print(sr,ori_wav.shape,ori_wav) ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0 if len(ori_wav.shape)==2:# stereo ori_wav = librosa.to_mono(ori_wav.T) print(sr,ori_wav.shape,ori_wav) ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE) mel_len,hop_size = 848,256 input_len = mel_len * hop_size if len(ori_wav) < input_len: input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0) else: input_wav = ori_wav[:input_len] mel = TRANSFORMS_16000(input_wav) return mel def show_mel_fn(self, input_audio_path): crop_len = 500 crop_mel = self.gen_mel(input_audio_path)[:,:crop_len] color_mel = self.cmap_transform(crop_mel) image = Image.fromarray((color_mel*255).astype(np.uint8)) image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png") image.save(image_filename) return image_filename def inpaint(self, batch, seed, ddim_steps, num_samples=1, W=512, H=512): model = self.sampler.model prng = np.random.RandomState(seed) start_code = prng.randn(num_samples, model.first_stage_model.embed_dim, H // 8, W // 8) start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32) c = model.get_first_stage_encoding(model.encode_first_stage(batch["masked_mel"])) cc = torch.nn.functional.interpolate(batch["mask"], size=c.shape[-2:]) c = torch.cat((c, cc), dim=1) # (b,c+1,h,w) 1 is mask shape = (c.shape[1]-1,)+c.shape[2:] samples_ddim, _ = self.sampler.sample(S=ddim_steps, conditioning=c, batch_size=c.shape[0], shape=shape, verbose=False) x_samples_ddim = model.decode_first_stage(samples_ddim) mel = torch.clamp((batch["mel"]+1.0)/2.0,min=0.0, max=1.0) mask = torch.clamp((batch["mask"]+1.0)/2.0,min=0.0, max=1.0) predicted_mel = torch.clamp((x_samples_ddim+1.0)/2.0,min=0.0, max=1.0) inpainted = (1-mask)*mel+mask*predicted_mel inpainted = inpainted.cpu().numpy().squeeze() inapint_wav = self.vocoder.vocode(inpainted) return inpainted, inapint_wav def inference(self, input_audio, mel_and_mask, seed = 55, ddim_steps = 100): SAMPLE_RATE = 16000 torch.set_grad_enabled(False) mel_img = Image.open(mel_and_mask['image']) mask_img = Image.open(mel_and_mask["mask"]) show_mel = np.array(mel_img.convert("L"))/255 mask = np.array(mask_img.convert("L"))/255 mel_bins,mel_len = 80,848 input_mel = self.gen_mel_audio(input_audio)[:,:mel_len] mask = np.pad(mask,((0,0),(0,mel_len-mask.shape[1])),mode='constant',constant_values=0) print(mask.shape,input_mel.shape) with torch.no_grad(): batch = self.make_batch_sd(input_mel,mask,num_samples=1) inpainted,gen_wav = self.inpaint( batch=batch, seed=seed, ddim_steps=ddim_steps, num_samples=1, H=mel_bins, W=mel_len ) inpainted = inpainted[:,:show_mel.shape[1]] color_mel = self.cmap_transform(inpainted) input_len = int(input_audio[1].shape[0] * SAMPLE_RATE / input_audio[0]) gen_wav = (gen_wav * 32768).astype(np.int16)[:input_len] image = Image.fromarray((color_mel*255).astype(np.uint8)) image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png") image.save(image_filename) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") soundfile.write(audio_filename, gen_wav, samplerate = 16000) return image_filename, audio_filename class ASR: def __init__(self, device): print("Initializing Whisper to %s" % device) self.device = device self.model = whisper.load_model("base", device=device) def inference(self, audio_path): audio = whisper.load_audio(audio_path) audio = whisper.pad_or_trim(audio) mel = whisper.log_mel_spectrogram(audio).to(self.device) _, probs = self.model.detect_language(mel) options = whisper.DecodingOptions() result = whisper.decode(self.model, mel, options) return result.text def translate_english(self, audio_path): audio = self.model.transcribe(audio_path, language='English') return audio['text'] class A2T: def __init__(self, device): from audio_to_text.inference_waveform import AudioCapModel print("Initializing Audio-To-Text Model to %s" % device) self.device = device self.model = AudioCapModel("audio_to_text/audiocaps_cntrstv_cnn14rnn_trm") def inference(self, audio_path): audio = whisper.load_audio(audio_path) caption_text = self.model(audio) return caption_text[0] class GeneFace: def __init__(self, device=None): print("Initializing GeneFace model to %s" % device) from audio_to_face.GeneFace_binding import GeneFaceInfer if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' self.device = device self.geneface_model = GeneFaceInfer(device) print("Loaded GeneFace model") def inference(self, audio_path): audio_base_name = os.path.basename(audio_path)[:-4] out_video_name = audio_path.replace("audio","video").replace(".wav", ".mp4") inp = { 'audio_source_name': audio_path, 'out_npy_name': f'geneface/tmp/{audio_base_name}.npy', 'cond_name': f'geneface/tmp/{audio_base_name}.npy', 'out_video_name': out_video_name, 'tmp_imgs_dir': f'video/tmp_imgs', } self.geneface_model.infer_once(inp) return out_video_name class SoundDetection: def __init__(self, device): self.device = device self.sample_rate = 32000 self.window_size = 1024 self.hop_size = 320 self.mel_bins = 64 self.fmin = 50 self.fmax = 14000 self.model_type = 'PVT' self.checkpoint_path = 'audio_detection/audio_infer/useful_ckpts/audio_detection.pth' self.classes_num = detection_config.classes_num self.labels = detection_config.labels self.frames_per_second = self.sample_rate // self.hop_size # Model = eval(self.model_type) self.model = PVT(sample_rate=self.sample_rate, window_size=self.window_size, hop_size=self.hop_size, mel_bins=self.mel_bins, fmin=self.fmin, fmax=self.fmax, classes_num=self.classes_num) checkpoint = torch.load(self.checkpoint_path, map_location=self.device) self.model.load_state_dict(checkpoint['model']) self.model.to(device) def inference(self, audio_path): # Forward (waveform, _) = librosa.core.load(audio_path, sr=self.sample_rate, mono=True) waveform = waveform[None, :] # (1, audio_length) waveform = torch.from_numpy(waveform) waveform = waveform.to(self.device) # Forward with torch.no_grad(): self.model.eval() batch_output_dict = self.model(waveform, None) framewise_output = batch_output_dict['framewise_output'].data.cpu().numpy()[0] """(time_steps, classes_num)""" # print('Sound event detection result (time_steps x classes_num): {}'.format( # framewise_output.shape)) import numpy as np import matplotlib.pyplot as plt sorted_indexes = np.argsort(np.max(framewise_output, axis=0))[::-1] top_k = 10 # Show top results top_result_mat = framewise_output[:, sorted_indexes[0 : top_k]] """(time_steps, top_k)""" # Plot result stft = librosa.core.stft(y=waveform[0].data.cpu().numpy(), n_fft=self.window_size, hop_length=self.hop_size, window='hann', center=True) frames_num = stft.shape[-1] fig, axs = plt.subplots(2, 1, sharex=True, figsize=(10, 4)) axs[0].matshow(np.log(np.abs(stft)), origin='lower', aspect='auto', cmap='jet') axs[0].set_ylabel('Frequency bins') axs[0].set_title('Log spectrogram') axs[1].matshow(top_result_mat.T, origin='upper', aspect='auto', cmap='jet', vmin=0, vmax=1) axs[1].xaxis.set_ticks(np.arange(0, frames_num, self.frames_per_second)) axs[1].xaxis.set_ticklabels(np.arange(0, frames_num / self.frames_per_second)) axs[1].yaxis.set_ticks(np.arange(0, top_k)) axs[1].yaxis.set_ticklabels(np.array(self.labels)[sorted_indexes[0 : top_k]]) axs[1].yaxis.grid(color='k', linestyle='solid', linewidth=0.3, alpha=0.3) axs[1].set_xlabel('Seconds') axs[1].xaxis.set_ticks_position('bottom') plt.tight_layout() image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png") plt.savefig(image_filename) return image_filename class SoundExtraction: def __init__(self, device): from sound_extraction.model.LASSNet import LASSNet from sound_extraction.utils.stft import STFT import torch.nn as nn self.device = device self.model_file = 'sound_extraction/useful_ckpts/LASSNet.pt' self.stft = STFT() self.model = nn.DataParallel(LASSNet(device)).to(device) checkpoint = torch.load(self.model_file) self.model.load_state_dict(checkpoint['model']) self.model.eval() def inference(self, inputs): #key = ['ref_audio', 'text'] from sound_extraction.utils.wav_io import load_wav, save_wav val = inputs.split(",") audio_path = val[0] # audio_path, text text = val[1] waveform = load_wav(audio_path) waveform = torch.tensor(waveform).transpose(1,0) mixed_mag, mixed_phase = self.stft.transform(waveform) text_query = ['[CLS] ' + text] mixed_mag = mixed_mag.transpose(2,1).unsqueeze(0).to(self.device) est_mask = self.model(mixed_mag, text_query) est_mag = est_mask * mixed_mag est_mag = est_mag.squeeze(1) est_mag = est_mag.permute(0, 2, 1) est_wav = self.stft.inverse(est_mag.cpu().detach(), mixed_phase) est_wav = est_wav.squeeze(0).squeeze(0).numpy() #est_path = f'output/est{i}.wav' audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") print('audio_filename ', audio_filename) save_wav(est_wav, audio_filename) return audio_filename class Binaural: def __init__(self, device): from src.models import BinauralNetwork self.device = device self.model_file = 'mono2binaural/useful_ckpts/m2b/binaural_network.net' self.position_file = ['mono2binaural/useful_ckpts/m2b/tx_positions.txt', 'mono2binaural/useful_ckpts/m2b/tx_positions2.txt', 'mono2binaural/useful_ckpts/m2b/tx_positions3.txt', 'mono2binaural/useful_ckpts/m2b/tx_positions4.txt', 'mono2binaural/useful_ckpts/m2b/tx_positions5.txt'] self.net = BinauralNetwork(view_dim=7, warpnet_layers=4, warpnet_channels=64, ) self.net.load_from_file(self.model_file) self.sr = 48000 def inference(self, audio_path): mono, sr = librosa.load(path=audio_path, sr=self.sr, mono=True) mono = torch.from_numpy(mono) mono = mono.unsqueeze(0) import numpy as np import random rand_int = random.randint(0,4) view = np.loadtxt(self.position_file[rand_int]).transpose().astype(np.float32) view = torch.from_numpy(view) if not view.shape[-1] * 400 == mono.shape[-1]: mono = mono[:,:(mono.shape[-1]//400)*400] # if view.shape[1]*400 > mono.shape[1]: m_a = view.shape[1] - mono.shape[-1]//400 rand_st = random.randint(0,m_a) view = view[:,m_a:m_a+(mono.shape[-1]//400)] # # binauralize and save output self.net.eval().to(self.device) mono, view = mono.to(self.device), view.to(self.device) chunk_size = 48000 # forward in chunks of 1s rec_field = 1000 # add 1000 samples as "safe bet" since warping has undefined rec. field rec_field -= rec_field % 400 # make sure rec_field is a multiple of 400 to match audio and view frequencies chunks = [ { "mono": mono[:, max(0, i-rec_field):i+chunk_size], "view": view[:, max(0, i-rec_field)//400:(i+chunk_size)//400] } for i in range(0, mono.shape[-1], chunk_size) ] for i, chunk in enumerate(chunks): with torch.no_grad(): mono = chunk["mono"].unsqueeze(0) view = chunk["view"].unsqueeze(0) binaural = self.net(mono, view).squeeze(0) if i > 0: binaural = binaural[:, -(mono.shape[-1]-rec_field):] chunk["binaural"] = binaural binaural = torch.cat([chunk["binaural"] for chunk in chunks], dim=-1) binaural = torch.clamp(binaural, min=-1, max=1).cpu() #binaural = chunked_forwarding(net, mono, view) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") import torchaudio torchaudio.save(audio_filename, binaural, sr) #soundfile.write(audio_filename, binaural, samplerate = 48000) print(f"Processed Binaural.run, audio_filename: {audio_filename}") return audio_filename class TargetSoundDetection: def __init__(self, device): from target_sound_detection.src import models as tsd_models from target_sound_detection.src.models import event_labels self.device = device self.MEL_ARGS = { 'n_mels': 64, 'n_fft': 2048, 'hop_length': int(22050 * 20 / 1000), 'win_length': int(22050 * 40 / 1000) } self.EPS = np.spacing(1) self.clip_model, _ = clip.load("ViT-B/32", device=self.device) self.event_labels = event_labels self.id_to_event = {i : label for i, label in enumerate(self.event_labels)} config = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth', map_location='cpu') config_parameters = dict(config) config_parameters['tao'] = 0.6 if 'thres' not in config_parameters.keys(): config_parameters['thres'] = 0.5 if 'time_resolution' not in config_parameters.keys(): config_parameters['time_resolution'] = 125 model_parameters = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt' , map_location=lambda storage, loc: storage) # load parameter self.model = getattr(tsd_models, config_parameters['model'])(config_parameters, inputdim=64, outputdim=2, time_resolution=config_parameters['time_resolution'], **config_parameters['model_args']) self.model.load_state_dict(model_parameters) self.model = self.model.to(self.device).eval() self.re_embeds = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth') self.ref_mel = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth') def extract_feature(self, fname): import soundfile as sf y, sr = sf.read(fname, dtype='float32') print('y ', y.shape) ti = y.shape[0]/sr if y.ndim > 1: y = y.mean(1) y = librosa.resample(y, sr, 22050) lms_feature = np.log(librosa.feature.melspectrogram(y, **self.MEL_ARGS) + self.EPS).T return lms_feature,ti def build_clip(self, text): text = clip.tokenize(text).to(self.device) # ["a diagram with dog", "a dog", "a cat"] text_features = self.clip_model.encode_text(text) return text_features def cal_similarity(self, target, retrievals): ans = [] #target =torch.from_numpy(target) for name in retrievals.keys(): tmp = retrievals[name] #tmp = torch.from_numpy(tmp) s = torch.cosine_similarity(target.squeeze(), tmp.squeeze(), dim=0) ans.append(s.item()) return ans.index(max(ans)) def inference(self, text, audio_path): from target_sound_detection.src.utils import median_filter, decode_with_timestamps target_emb = self.build_clip(text) # torch type idx = self.cal_similarity(target_emb, self.re_embeds) target_event = self.id_to_event[idx] embedding = self.ref_mel[target_event] embedding = torch.from_numpy(embedding) embedding = embedding.unsqueeze(0).to(self.device).float() #print('embedding ', embedding.shape) inputs,ti = self.extract_feature(audio_path) #print('ti ', ti) inputs = torch.from_numpy(inputs) inputs = inputs.unsqueeze(0).to(self.device).float() #print('inputs ', inputs.shape) decision, decision_up, logit = self.model(inputs, embedding) pred = decision_up.detach().cpu().numpy() pred = pred[:,:,0] frame_num = decision_up.shape[1] time_ratio = ti / frame_num filtered_pred = median_filter(pred, window_size=1, threshold=0.5) #print('filtered_pred ', filtered_pred) time_predictions = [] for index_k in range(filtered_pred.shape[0]): decoded_pred = [] decoded_pred_ = decode_with_timestamps(target_event, filtered_pred[index_k,:]) if len(decoded_pred_) == 0: # neg deal decoded_pred_.append((target_event, 0, 0)) decoded_pred.append(decoded_pred_) for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1 cur_pred = pred[num_batch] # Save each frame output, for later visualization label_prediction = decoded_pred[num_batch] # frame predict # print(label_prediction) for event_label, onset, offset in label_prediction: time_predictions.append({ 'onset': onset*time_ratio, 'offset': offset*time_ratio,}) ans = '' for i,item in enumerate(time_predictions): ans = ans + 'segment' + str(i+1) + ' start_time: ' + str(item['onset']) + ' end_time: ' + str(item['offset']) + '\t' #print(ans) return ans # class Speech_Enh_SS_SC: # """Speech Enhancement or Separation in single-channel # Example usage: # enh_model = Speech_Enh_SS("cuda") # enh_wav = enh_model.inference("./test_chime4_audio_M05_440C0213_PED_REAL.wav") # """ # def __init__(self, device="cuda", model_name="lichenda/chime4_fasnet_dprnn_tac"): # self.model_name = model_name # self.device = device # print("Initializing ESPnet Enh to %s" % device) # self._initialize_model() # def _initialize_model(self): # from espnet_model_zoo.downloader import ModelDownloader # from espnet2.bin.enh_inference import SeparateSpeech # d = ModelDownloader() # cfg = d.download_and_unpack(self.model_name) # self.separate_speech = SeparateSpeech( # train_config=cfg["train_config"], # model_file=cfg["model_file"], # # for segment-wise process on long speech # segment_size=2.4, # hop_size=0.8, # normalize_segment_scale=False, # show_progressbar=True, # ref_channel=None, # normalize_output_wav=True, # device=self.device, # ) # def inference(self, speech_path, ref_channel=0): # speech, sr = soundfile.read(speech_path) # speech = speech[:, ref_channel] # assert speech.dim() == 1 # enh_speech = self.separate_speech(speech[None, ], fs=sr) # if len(enh_speech) == 1: # return enh_speech[0] # return enh_speech # class Speech_Enh_SS_MC: # """Speech Enhancement or Separation in multi-channel""" # def __init__(self, device="cuda", model_name=None, ref_channel=4): # self.model_name = model_name # self.ref_channel = ref_channel # self.device = device # print("Initializing ESPnet Enh to %s" % device) # self._initialize_model() # def _initialize_model(self): # from espnet_model_zoo.downloader import ModelDownloader # from espnet2.bin.enh_inference import SeparateSpeech # d = ModelDownloader() # cfg = d.download_and_unpack(self.model_name) # self.separate_speech = SeparateSpeech( # train_config=cfg["train_config"], # model_file=cfg["model_file"], # # for segment-wise process on long speech # segment_size=2.4, # hop_size=0.8, # normalize_segment_scale=False, # show_progressbar=True, # ref_channel=self.ref_channel, # normalize_output_wav=True, # device=self.device, # ) # def inference(self, speech_path): # speech, sr = soundfile.read(speech_path) # speech = speech.T # enh_speech = self.separate_speech(speech[None, ...], fs=sr) # if len(enh_speech) == 1: # return enh_speech[0] # return enh_speech class Speech_Enh_SS_SC: """Speech Enhancement or Separation in single-channel Example usage: enh_model = Speech_Enh_SS("cuda") enh_wav = enh_model.inference("./test_chime4_audio_M05_440C0213_PED_REAL.wav") """ def __init__(self, device="cuda", model_name="espnet/Wangyou_Zhang_chime4_enh_train_enh_conv_tasnet_raw"): self.model_name = model_name self.device = device print("Initializing ESPnet Enh to %s" % device) self._initialize_model() def _initialize_model(self): from espnet_model_zoo.downloader import ModelDownloader from espnet2.bin.enh_inference import SeparateSpeech d = ModelDownloader() cfg = d.download_and_unpack(self.model_name) self.separate_speech = SeparateSpeech( train_config=cfg["train_config"], model_file=cfg["model_file"], # for segment-wise process on long speech segment_size=2.4, hop_size=0.8, normalize_segment_scale=False, show_progressbar=True, ref_channel=None, normalize_output_wav=True, device=self.device, ) def inference(self, speech_path, ref_channel=0): speech, sr = soundfile.read(speech_path) speech = speech[:, ref_channel] # speech = torch.from_numpy(speech) # assert speech.dim() == 1 enh_speech = self.separate_speech(speech[None, ...], fs=sr) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") # if len(enh_speech) == 1: soundfile.write(audio_filename, enh_speech[0].squeeze(), samplerate=sr) # return enh_speech[0] # return enh_speech # else: # print("############") # audio_filename_1 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") # soundfile.write(audio_filename_1, enh_speech[0].squeeze(), samplerate=sr) # audio_filename_2 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") # soundfile.write(audio_filename_2, enh_speech[1].squeeze(), samplerate=sr) # audio_filename = merge_audio(audio_filename_1, audio_filename_2) return audio_filename class Speech_SS: def __init__(self, device="cuda", model_name="lichenda/wsj0_2mix_skim_noncausal"): self.model_name = model_name self.device = device print("Initializing ESPnet SS to %s" % device) self._initialize_model() def _initialize_model(self): from espnet_model_zoo.downloader import ModelDownloader from espnet2.bin.enh_inference import SeparateSpeech d = ModelDownloader() cfg = d.download_and_unpack(self.model_name) self.separate_speech = SeparateSpeech( train_config=cfg["train_config"], model_file=cfg["model_file"], # for segment-wise process on long speech segment_size=2.4, hop_size=0.8, normalize_segment_scale=False, show_progressbar=True, ref_channel=None, normalize_output_wav=True, device=self.device, ) def inference(self, speech_path): speech, sr = soundfile.read(speech_path) enh_speech = self.separate_speech(speech[None, ...], fs=sr) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") if len(enh_speech) == 1: soundfile.write(audio_filename, enh_speech[0], samplerate=sr) else: # print("############") audio_filename_1 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") soundfile.write(audio_filename_1, enh_speech[0].squeeze(), samplerate=sr) audio_filename_2 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") soundfile.write(audio_filename_2, enh_speech[1].squeeze(), samplerate=sr) audio_filename = merge_audio(audio_filename_1, audio_filename_2) return audio_filename class ConversationBot: def __init__(self): print("Initializing AudioGPT") self.llm = OpenAI(temperature=0) self.t2i = T2I(device="cuda:1") self.i2t = ImageCaptioning(device="cuda:0") self.t2a = T2A(device="cuda:0") self.tts = TTS(device="cpu") self.t2s = T2S(device="cpu") self.i2a = I2A(device="cuda:0") self.a2t = A2T(device="cpu") self.asr = ASR(device="cuda:0") self.SE_SS_SC = Speech_Enh_SS_SC(device="cuda:0") # self.SE_SS_MC = Speech_Enh_SS_MC(device="cuda:0") self.SS = Speech_SS(device="cuda:0") self.inpaint = Inpaint(device="cuda:0") self.tts_ood = TTS_OOD(device="cpu") self.geneface = GeneFace(device="cuda:0") self.detection = SoundDetection(device="cpu") self.binaural = Binaural(device="cuda:0") self.extraction = SoundExtraction(device="cuda:0") self.TSD = TargetSoundDetection(device="cuda:0") self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output') def init_tools(self, interaction_type): if interaction_type == 'text': self.tools = [ Tool(name="Generate Image From User Input Text", func=self.t2i.inference, description="useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. " "The input to this tool should be a string, representing the text used to generate image. "), Tool(name="Get Photo Description", func=self.i2t.inference, description="useful for when you want to know what is inside the photo. receives image_path as input. " "The input to this tool should be a string, representing the image_path. "), Tool(name="Generate Audio From User Input Text", func=self.t2a.inference, description="useful for when you want to generate an audio from a user input text and it saved it to a file." "The input to this tool should be a string, representing the text used to generate audio."), Tool( name="Style Transfer", func= self.tts_ood.inference, description="useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice." "Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx." "The input to this tool should be a comma seperated string of two, representing reference audio path and input text."), Tool(name="Generate Singing Voice From User Input Text, Note and Duration Sequence", func= self.t2s.inference, description="useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file." "If Like: Generate a piece of singing voice, the input to this tool should be \"\" since there is no User Input Text, Note and Duration Sequence ." "If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. " "Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx." "The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."), Tool(name="Synthesize Speech Given the User Input Text", func=self.tts.inference, description="useful for when you want to convert a user input text into speech audio it saved it to a file." "The input to this tool should be a string, representing the text used to be converted to speech."), # Tool(name="Speech Enhancement Or Separation In Single-Channel", func=self.SE_SS_SC.inference, # description="useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), " # "or separate each speech from the speech mixture (single-channel), receives audio_path as input." # "The input to this tool should be a string, representing the audio_path."), Tool(name="Speech Enhancement In Single-Channel", func=self.SE_SS_SC.inference, description="useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input." "The input to this tool should be a string, representing the audio_path."), Tool(name="Speech Separation In Single-Channel", func=self.SS.inference, description="useful for when you want to separate each speech from the speech mixture, receives audio_path as input." "The input to this tool should be a string, representing the audio_path."), # Tool(name="Speech Enhancement In Multi-Channel", func=self.SE_SS_MC.inference, # description="useful for when you want to enhance the quality of the speech signal by reducing background noise (multi-channel), receives audio_path as input." # "The input to this tool should be a string, representing the audio_path."), Tool(name="Generate Audio From The Image", func=self.i2a.inference, description="useful for when you want to generate an audio based on an image." "The input to this tool should be a string, representing the image_path. "), Tool(name="Generate Text From The Audio", func=self.a2t.inference, description="useful for when you want to describe an audio in text, receives audio_path as input." "The input to this tool should be a string, representing the audio_path."), Tool(name="Audio Inpainting", func=self.inpaint.show_mel_fn, description="useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, " "The input to this tool should be a string, representing the audio_path."), Tool(name="Transcribe Speech", func=self.asr.inference, description="useful for when you want to know the text corresponding to a human speech, receives audio_path as input." "The input to this tool should be a string, representing the audio_path."), Tool(name="Generate a talking human portrait video given a input Audio", func=self.geneface.inference, description="useful for when you want to generate a talking human portrait video given a input audio." "The input to this tool should be a string, representing the audio_path."), Tool(name="Detect The Sound Event From The Audio", func=self.detection.inference, description="useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. " "The input to this tool should be a string, representing the audio_path. "), Tool(name="Sythesize Binaural Audio From A Mono Audio Input", func=self.binaural.inference, description="useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. " "The input to this tool should be a string, representing the audio_path. "), Tool(name="Extract Sound Event From Mixture Audio Based On Language Description", func=self.extraction.inference, description="useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. " "The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."), Tool(name="Target Sound Detection", func=self.TSD.inference, description="useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. " "The input to this tool should be a comma seperated string of two, representing audio path and the text description. ")] self.agent = initialize_agent( self.tools, self.llm, agent="conversational-react-description", verbose=True, memory=self.memory, return_intermediate_steps=True, agent_kwargs={'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions': AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}, ) return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False) else: self.tools = [ Tool(name="Generate Audio From User Input Text", func=self.t2a.inference, description="useful for when you want to generate an audio from a user input text and it saved it to a file." "The input to this tool should be a string, representing the text used to generate audio."), Tool( name="Style Transfer", func= self.tts_ood.inference, description="useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice." "Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx." "The input to this tool should be a comma seperated string of two, representing reference audio path and input text."), Tool(name="Generate Singing Voice From User Input Text, Note and Duration Sequence", func= self.t2s.inference, description="useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file." "If Like: Generate a piece of singing voice, the input to this tool should be \"\" since there is no User Input Text, Note and Duration Sequence ." "If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. " "Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx." "The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."), Tool(name="Synthesize Speech Given the User Input Text", func=self.tts.inference, description="useful for when you want to convert a user input text into speech audio it saved it to a file." "The input to this tool should be a string, representing the text used to be converted to speech."), Tool(name="Generate Text From The Audio", func=self.a2t.inference, description="useful for when you want to describe an audio in text, receives audio_path as input." "The input to this tool should be a string, representing the audio_path."), Tool(name="Generate a talking human portrait video given a input Audio", func=self.geneface.inference, description="useful for when you want to generate a talking human portrait video given a input audio." "The input to this tool should be a string, representing the audio_path."), Tool(name="Generate Binaural Audio From A Mono Audio Input", func=self.binaural.inference, description="useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. " "The input to this tool should be a string, representing the audio_path. "), Tool(name="Extract Sound Event From Mixture Audio Based On Language Description", func=self.extraction.inference, description="useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. " "The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."), Tool(name="Target Sound Detection", func=self.TSD.inference, description="useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. " "The input to this tool should be a comma seperated string of two, representing audio path and the text description. ")] self.agent = initialize_agent( self.tools, self.llm, agent="conversational-react-description", verbose=True, memory=self.memory, return_intermediate_steps=True, agent_kwargs={'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions': AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}, ) return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True) def run_text(self, text, state): print("===============Running run_text =============") print("Inputs:", text, state) print("======>Previous memory:\n %s" % self.agent.memory) self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500) res = self.agent({"input": text}) if res['intermediate_steps'] == []: print("======>Current memory:\n %s" % self.agent.memory) response = res['output'] state = state + [(text, response)] print("Outputs:", state) return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False) else: tool = res['intermediate_steps'][0][0].tool if tool == "Generate Image From User Input Text" or tool == "Generate Text From The Audio" or tool == "Target Sound Detection": print("======>Current memory:\n %s" % self.agent.memory) response = re.sub('(image/\S*png)', lambda m: f'![](/file={m.group(0)})*{m.group(0)}*', res['output']) state = state + [(text, response)] print("Outputs:", state) return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False) elif tool == "Transcribe Speech": response = res['output'] state = state + [(text, response)] print("Outputs:", state) return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False) elif tool == "Detect The Sound Event From The Audio": image_filename = res['intermediate_steps'][0][1] response = res['output'] + f"![](/file={image_filename})*{image_filename}*" state = state + [(text, response)] print("Outputs:", state) return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False) elif tool == "Audio Inpainting": audio_filename = res['intermediate_steps'][0][0].tool_input image_filename = res['intermediate_steps'][0][1] print("======>Current memory:\n %s" % self.agent.memory) response = res['output'] state = state + [(text, response)] print("Outputs:", state) return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False), gr.Image.update(value=image_filename,visible=True), gr.Button.update(visible=True) elif tool == "Generate a talking human portrait video given a input Audio": video_filename = res['intermediate_steps'][0][1] print("======>Current memory:\n %s" % self.agent.memory) response = res['output'] state = state + [(text, response)] print("Outputs:", state) return state, state, gr.Audio.update(visible=False), gr.Video.update(value=video_filename,visible=True), gr.Image.update(visible=False), gr.Button.update(visible=False) print("======>Current memory:\n %s" % self.agent.memory) response = re.sub('(image/\S*png)', lambda m: f'![](/file={m.group(0)})*{m.group(0)}*', res['output']) audio_filename = res['intermediate_steps'][0][1] state = state + [(text, response)] print("Outputs:", state) return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False) def run_image_or_audio(self, file, state, txt): file_type = file.name[-3:] if file_type == "wav": print("===============Running run_audio =============") print("Inputs:", file, state) print("======>Previous memory:\n %s" % self.agent.memory) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") # audio_load = whisper.load_audio(file.name) audio_load, sr = soundfile.read(file.name) soundfile.write(audio_filename, audio_load, samplerate = sr) description = self.a2t.inference(audio_filename) Human_prompt = "\nHuman: provide an audio named {}. The description is: {}. This information helps you to understand this audio, but you should use tools to finish following tasks, " \ "rather than directly imagine from my description. If you understand, say \"Received\". \n".format(audio_filename, description) AI_prompt = "Received. " self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt print("======>Current memory:\n %s" % self.agent.memory) #state = state + [(f"<audio src=audio_filename controls=controls></audio>*{audio_filename}*", AI_prompt)] state = state + [(f"*{audio_filename}*", AI_prompt)] print("Outputs:", state) return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False) else: print("===============Running run_image =============") print("Inputs:", file, state) print("======>Previous memory:\n %s" % self.agent.memory) image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png") print("======>Auto Resize Image...") img = Image.open(file.name) width, height = img.size ratio = min(512 / width, 512 / height) width_new, height_new = (round(width * ratio), round(height * ratio)) img = img.resize((width_new, height_new)) img = img.convert('RGB') img.save(image_filename, "PNG") print(f"Resize image form {width}x{height} to {width_new}x{height_new}") description = self.i2t.inference(image_filename) Human_prompt = "\nHuman: provide a figure named {}. The description is: {}. This information helps you to understand this image, but you should use tools to finish following tasks, " \ "rather than directly imagine from my description. If you understand, say \"Received\". \n".format(image_filename, description) AI_prompt = "Received. " self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt print("======>Current memory:\n %s" % self.agent.memory) state = state + [(f"![](/file={image_filename})*{image_filename}*", AI_prompt)] print("Outputs:", state) return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False) def speech(self, speech_input, state): input_audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") text = self.asr.translate_english(speech_input) print("Inputs:", text, state) print("======>Previous memory:\n %s" % self.agent.memory) self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500) res = self.agent({"input": text}) if res['intermediate_steps'] == []: print("======>Current memory:\n %s" % self.agent.memory) response = res['output'] output_audio_filename = self.tts.inference(response) state = state + [(text, response)] print("Outputs:", state) return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False) else: tool = res['intermediate_steps'][0][0].tool if tool == "Generate Image From User Input Text" or tool == "Generate Text From The Audio" or tool == "Target Sound Detection": print("======>Current memory:\n %s" % self.agent.memory) response = re.sub('(image/\S*png)', lambda m: f'![](/file={m.group(0)})*{m.group(0)}*', res['output']) output_audio_filename = self.tts.inference(res['output']) state = state + [(text, response)] print("Outputs:", state) return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False) elif tool == "Transcribe Speech": print("======>Current memory:\n %s" % self.agent.memory) output_audio_filename = self.tts.inference(res['output']) response = res['output'] state = state + [(text, response)] print("Outputs:", state) return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False) elif tool == "Detect The Sound Event From The Audio": print("======>Current memory:\n %s" % self.agent.memory) image_filename = res['intermediate_steps'][0][1] output_audio_filename = self.tts.inference(res['output']) response = res['output'] + f"![](/file={image_filename})*{image_filename}*" state = state + [(text, response)] print("Outputs:", state) return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False) elif tool == "Generate a talking human portrait video given a input Audio": video_filename = res['intermediate_steps'][0][1] print("======>Current memory:\n %s" % self.agent.memory) response = res['output'] output_audio_filename = self.tts.inference(res['output']) state = state + [(text, response)] print("Outputs:", state) return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(value=video_filename,visible=True) print("======>Current memory:\n %s" % self.agent.memory) response = re.sub('(image/\S*png)', lambda m: f'![](/file={m.group(0)})*{m.group(0)}*', res['output']) audio_filename = res['intermediate_steps'][0][1] Res = "The audio file has been generated and the audio is " output_audio_filename = merge_audio(self.tts.inference(Res), audio_filename) print(output_audio_filename) state = state + [(text, response)] response = res['output'] print("Outputs:", state) return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False) def inpainting(self, state, audio_filename, image_filename): print("===============Running inpainting =============") print("Inputs:", state) print("======>Previous memory:\n %s" % self.agent.memory) new_image_filename, new_audio_filename = self.inpaint.inference(audio_filename, image_filename) AI_prompt = "Here are the predict audio and the mel spectrum." + f"*{new_audio_filename}*" + f"![](/file={new_image_filename})*{new_image_filename}*" output_audio_filename = self.tts.inference(AI_prompt) self.agent.memory.buffer = self.agent.memory.buffer + 'AI: ' + AI_prompt print("======>Current memory:\n %s" % self.agent.memory) state = state + [(f"Audio Inpainting", AI_prompt)] print("Outputs:", state) return state, state, gr.Image.update(visible=False), gr.Audio.update(value=new_audio_filename, visible=True), gr.Video.update(visible=False), gr.Button.update(visible=False) def clear_audio(self): return gr.Audio.update(value=None, visible=False) def clear_input_audio(self): return gr.Audio.update(value=None) def clear_image(self): return gr.Image.update(value=None, visible=False) def clear_video(self): return gr.Video.update(value=None, visible=False) def clear_button(self): return gr.Button.update(visible=False) if __name__ == '__main__': bot = ConversationBot() with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo: with gr.Row(): gr.Markdown("## AudioGPT") chatbot = gr.Chatbot(elem_id="chatbot", label="AudioGPT", visible=False) state = gr.State([]) with gr.Row() as select_raws: with gr.Column(scale=0.7): interaction_type = gr.Radio(choices=['text', 'speech'], value='text', label='Interaction Type') with gr.Column(scale=0.3, min_width=0): select = gr.Button("Select") with gr.Row(visible=False) as text_input_raws: with gr.Column(scale=0.7): txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False) with gr.Column(scale=0.1, min_width=0): run = gr.Button("🏃‍♂️Run") with gr.Column(scale=0.1, min_width=0): clear_txt = gr.Button("🔄Clear️") with gr.Column(scale=0.1, min_width=0): btn = gr.UploadButton("🖼️Upload", file_types=["image","audio"]) with gr.Row(): outaudio = gr.Audio(visible=False) with gr.Row(): with gr.Column(scale=0.3, min_width=0): outvideo = gr.Video(visible=False) with gr.Row(): show_mel = gr.Image(type="filepath",tool='sketch',visible=False) with gr.Row(): run_button = gr.Button("Predict Masked Place",visible=False) with gr.Row(visible=False) as speech_input_raws: with gr.Column(scale=0.7): speech_input = gr.Audio(source="microphone", type="filepath", label="Input") with gr.Column(scale=0.15, min_width=0): submit_btn = gr.Button("🏃‍♂️Submit") with gr.Column(scale=0.15, min_width=0): clear_speech = gr.Button("🔄Clear️") with gr.Row(): speech_output = gr.Audio(label="Output",visible=False) select.click(bot.init_tools, [interaction_type], [chatbot, select_raws, text_input_raws, speech_input_raws]) txt.submit(bot.run_text, [txt, state], [chatbot, state, outaudio, outvideo, show_mel, run_button]) txt.submit(lambda: "", None, txt) run.click(bot.run_text, [txt, state], [chatbot, state, outaudio, outvideo, show_mel, run_button]) run.click(lambda: "", None, txt) btn.upload(bot.run_image_or_audio, [btn, state, txt], [chatbot, state, outaudio, outvideo]) run_button.click(bot.inpainting, [state, outaudio, show_mel], [chatbot, state, show_mel, outaudio, outvideo, run_button]) clear_txt.click(bot.memory.clear) clear_txt.click(lambda: [], None, chatbot) clear_txt.click(lambda: [], None, state) clear_txt.click(lambda:None, None, txt) clear_txt.click(bot.clear_button, None, run_button) clear_txt.click(bot.clear_image, None, show_mel) clear_txt.click(bot.clear_audio, None, outaudio) clear_txt.click(bot.clear_video, None, outvideo) submit_btn.click(bot.speech, [speech_input, state], [speech_input, speech_output, state, outvideo]) clear_speech.click(bot.clear_input_audio, None, speech_input) clear_speech.click(bot.clear_audio, None, speech_output) clear_speech.click(lambda: [], None, state) clear_speech.click(bot.clear_video, None, outvideo) demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
[ "langchain.llms.openai.OpenAI", "langchain.chains.conversation.memory.ConversationBufferMemory", "langchain.agents.initialize.initialize_agent", "langchain.agents.tools.Tool" ]
[((3966, 3992), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_1'], {}), '(audio_path_1)\n', (3978, 3992), True, 'import scipy.io.wavfile as wavfile\n'), ((4014, 4040), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_2'], {}), '(audio_path_2)\n', (4026, 4040), True, 'import scipy.io.wavfile as wavfile\n'), ((4131, 4155), 'numpy.hstack', 'np.hstack', (['merged_signal'], {}), '(merged_signal)\n', (4140, 4155), True, 'import numpy as np\n'), ((4176, 4217), 'numpy.asarray', 'np.asarray', (['merged_signal'], {'dtype': 'np.int16'}), '(merged_signal, dtype=np.int16)\n', (4186, 4217), True, 'import numpy as np\n'), ((4298, 4348), 'scipy.io.wavfile.write', 'wavfile.write', (['audio_filename', 'sr_2', 'merged_signal'], {}), '(audio_filename, sr_2, merged_signal)\n', (4311, 4348), True, 'import scipy.io.wavfile as wavfile\n'), ((53, 79), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (69, 79), False, 'import os\n'), ((4682, 4786), 'diffusers.StableDiffusionPipeline.from_pretrained', 'StableDiffusionPipeline.from_pretrained', (['"""runwayml/stable-diffusion-v1-5"""'], {'torch_dtype': 'torch.float16'}), "('runwayml/stable-diffusion-v1-5',\n torch_dtype=torch.float16)\n", (4721, 4786), False, 'from diffusers import StableDiffusionPipeline\n'), ((4820, 4892), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""Gustavosta/MagicPrompt-Stable-Diffusion"""'], {}), "('Gustavosta/MagicPrompt-Stable-Diffusion')\n", (4849, 4892), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer\n'), ((4926, 5005), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['"""Gustavosta/MagicPrompt-Stable-Diffusion"""'], {}), "('Gustavosta/MagicPrompt-Stable-Diffusion')\n", (4962, 5005), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer\n'), ((5043, 5163), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'self.text_refine_model', 'tokenizer': 'self.text_refine_tokenizer', 'device': 'self.device'}), "('text-generation', model=self.text_refine_model, tokenizer=self.\n text_refine_tokenizer, device=self.device)\n", (5051, 5163), False, 'from transformers import pipeline\n'), ((5875, 5945), 'transformers.BlipProcessor.from_pretrained', 'BlipProcessor.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "('Salesforce/blip-image-captioning-base')\n", (5904, 5945), False, 'from transformers import BlipProcessor, BlipForConditionalGeneration\n'), ((6704, 6792), 'vocoder.bigvgan.models.VocoderBigVGAN', 'VocoderBigVGAN', (['"""text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w"""'], {'device': 'device'}), "('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',\n device=device)\n", (6718, 6792), False, 'from vocoder.bigvgan.models import VocoderBigVGAN\n'), ((6861, 6883), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['config'], {}), '(config)\n', (6875, 6883), False, 'from omegaconf import OmegaConf\n'), ((6900, 6937), 'ldm.util.instantiate_from_config', 'instantiate_from_config', (['config.model'], {}), '(config.model)\n', (6923, 6937), False, 'from ldm.util import instantiate_from_config\n'), ((7187, 7205), 'ldm.models.diffusion.ddim.DDIMSampler', 'DDIMSampler', (['model'], {}), '(model)\n', (7198, 7205), False, 'from ldm.models.diffusion.ddim import DDIMSampler\n'), ((7378, 7405), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (7399, 7405), True, 'import numpy as np\n'), ((8501, 8560), 'torch.clamp', 'torch.clamp', (['((x_samples_ddim + 1.0) / 2.0)'], {'min': '(0.0)', 'max': '(1.0)'}), '((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)\n', (8512, 8560), False, 'import torch\n'), ((10157, 10217), 'soundfile.write', 'soundfile.write', (['audio_filename', 'result[1]'], {'samplerate': '(16000)'}), '(audio_filename, result[1], samplerate=16000)\n', (10172, 10217), False, 'import soundfile\n'), ((10695, 10783), 'vocoder.bigvgan.models.VocoderBigVGAN', 'VocoderBigVGAN', (['"""text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w"""'], {'device': 'device'}), "('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',\n device=device)\n", (10709, 10783), False, 'from vocoder.bigvgan.models import VocoderBigVGAN\n'), ((10852, 10874), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['config'], {}), '(config)\n', (10866, 10874), False, 'from omegaconf import OmegaConf\n'), ((10891, 10928), 'ldm.util.instantiate_from_config', 'instantiate_from_config', (['config.model'], {}), '(config.model)\n', (10914, 10928), False, 'from ldm.util import instantiate_from_config\n'), ((11178, 11196), 'ldm.models.diffusion.ddim.DDIMSampler', 'DDIMSampler', (['model'], {}), '(model)\n', (11189, 11196), False, 'from ldm.models.diffusion.ddim import DDIMSampler\n'), ((11399, 11426), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (11420, 11426), True, 'import numpy as np\n'), ((11759, 11776), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (11769, 11776), False, 'from PIL import Image\n'), ((12720, 12779), 'torch.clamp', 'torch.clamp', (['((x_samples_ddim + 1.0) / 2.0)'], {'min': '(0.0)', 'max': '(1.0)'}), '((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)\n', (12731, 12779), False, 'import torch\n'), ((13377, 13437), 'soundfile.write', 'soundfile.write', (['audio_filename', 'result[1]'], {'samplerate': '(16000)'}), '(audio_filename, result[1], samplerate=16000)\n', (13392, 13437), False, 'import soundfile\n'), ((13967, 13996), 'inference.tts.PortaSpeech.TTSInference', 'TTSInference', (['self.hp', 'device'], {}), '(self.hp, device)\n', (13979, 13996), False, 'from inference.tts.PortaSpeech import TTSInference\n'), ((14039, 14095), 'utils.hparams.set_hparams', 'set_hparams', ([], {'exp_name': 'self.exp_name', 'print_hparams': '(False)'}), '(exp_name=self.exp_name, print_hparams=False)\n', (14050, 14095), False, 'from utils.hparams import set_hparams\n'), ((14345, 14399), 'soundfile.write', 'soundfile.write', (['audio_filename', 'out'], {'samplerate': '(22050)'}), '(audio_filename, out, samplerate=22050)\n', (14360, 14399), False, 'import soundfile\n'), ((14913, 14948), 'inference.svs.ds_e2e.DiffSingerE2EInfer', 'DiffSingerE2EInfer', (['self.hp', 'device'], {}), '(self.hp, device)\n', (14931, 14948), False, 'from inference.svs.ds_e2e import DiffSingerE2EInfer\n'), ((15398, 15474), 'utils.hparams.set_hparams', 'set_hparams', ([], {'config': 'self.config', 'exp_name': 'self.exp_name', 'print_hparams': '(False)'}), '(config=self.config, exp_name=self.exp_name, print_hparams=False)\n', (15409, 15474), False, 'from utils.hparams import set_hparams\n'), ((18112, 18174), 'soundfile.write', 'soundfile.write', (['audio_filename', 'wav'], {'samplerate': 'self.model.fs'}), '(audio_filename, wav, samplerate=self.model.fs)\n', (18127, 18174), False, 'import soundfile\n'), ((18678, 18711), 'inference.tts.GenerSpeech.GenerSpeechInfer', 'GenerSpeechInfer', (['self.hp', 'device'], {}), '(self.hp, device)\n', (18694, 18711), False, 'from inference.tts.GenerSpeech import GenerSpeechInfer\n'), ((18754, 18830), 'utils.hparams.set_hparams', 'set_hparams', ([], {'config': 'self.config', 'exp_name': 'self.exp_name', 'print_hparams': '(False)'}), '(config=self.config, exp_name=self.exp_name, print_hparams=False)\n', (18765, 18830), False, 'from utils.hparams import set_hparams\n'), ((18914, 18941), 'os.path.exists', 'os.path.exists', (['f0_stats_fn'], {}), '(f0_stats_fn)\n', (18928, 18941), False, 'import os\n'), ((20145, 20233), 'vocoder.bigvgan.models.VocoderBigVGAN', 'VocoderBigVGAN', (['"""text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w"""'], {'device': 'device'}), "('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',\n device=device)\n", (20159, 20233), False, 'from vocoder.bigvgan.models import VocoderBigVGAN\n'), ((20354, 20376), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['config'], {}), '(config)\n', (20368, 20376), False, 'from omegaconf import OmegaConf\n'), ((20393, 20430), 'ldm.util.instantiate_from_config', 'instantiate_from_config', (['config.model'], {}), '(config.model)\n', (20416, 20430), False, 'from ldm.util import instantiate_from_config\n'), ((20737, 20755), 'ldm.models.diffusion.ddim.DDIMSampler', 'DDIMSampler', (['model'], {}), '(model)\n', (20748, 20755), False, 'from ldm.models.diffusion.ddim import DDIMSampler\n'), ((21544, 21574), 'scipy.io.wavfile.read', 'wavfile.read', (['input_audio_path'], {}), '(input_audio_path)\n', (21556, 21574), True, 'import scipy.io.wavfile as wavfile\n'), ((21855, 21915), 'librosa.resample', 'librosa.resample', (['ori_wav'], {'orig_sr': 'sr', 'target_sr': 'SAMPLE_RATE'}), '(ori_wav, orig_sr=sr, target_sr=SAMPLE_RATE)\n', (21871, 21915), False, 'import librosa\n'), ((22182, 22209), 'ldm.data.extract_mel_spectrogram.TRANSFORMS_16000', 'TRANSFORMS_16000', (['input_wav'], {}), '(input_wav)\n', (22198, 22209), False, 'from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000\n'), ((22619, 22679), 'librosa.resample', 'librosa.resample', (['ori_wav'], {'orig_sr': 'sr', 'target_sr': 'SAMPLE_RATE'}), '(ori_wav, orig_sr=sr, target_sr=SAMPLE_RATE)\n', (22635, 22679), False, 'import librosa\n'), ((22945, 22972), 'ldm.data.extract_mel_spectrogram.TRANSFORMS_16000', 'TRANSFORMS_16000', (['input_wav'], {}), '(input_wav)\n', (22961, 22972), False, 'from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000\n'), ((23512, 23539), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (23533, 23539), True, 'import numpy as np\n'), ((23834, 23899), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (["batch['mask']"], {'size': 'c.shape[-2:]'}), "(batch['mask'], size=c.shape[-2:])\n", (23865, 23899), False, 'import torch\n'), ((23960, 23985), 'torch.cat', 'torch.cat', (['(c, cc)'], {'dim': '(1)'}), '((c, cc), dim=1)\n', (23969, 23985), False, 'import torch\n'), ((24438, 24495), 'torch.clamp', 'torch.clamp', (["((batch['mel'] + 1.0) / 2.0)"], {'min': '(0.0)', 'max': '(1.0)'}), "((batch['mel'] + 1.0) / 2.0, min=0.0, max=1.0)\n", (24449, 24495), False, 'import torch\n'), ((24506, 24564), 'torch.clamp', 'torch.clamp', (["((batch['mask'] + 1.0) / 2.0)"], {'min': '(0.0)', 'max': '(1.0)'}), "((batch['mask'] + 1.0) / 2.0, min=0.0, max=1.0)\n", (24517, 24564), False, 'import torch\n'), ((24584, 24643), 'torch.clamp', 'torch.clamp', (['((x_samples_ddim + 1.0) / 2.0)'], {'min': '(0.0)', 'max': '(1.0)'}), '((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)\n', (24595, 24643), False, 'import torch\n'), ((24954, 24983), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (24976, 24983), False, 'import torch\n'), ((25002, 25035), 'PIL.Image.open', 'Image.open', (["mel_and_mask['image']"], {}), "(mel_and_mask['image'])\n", (25012, 25035), False, 'from PIL import Image\n'), ((25055, 25087), 'PIL.Image.open', 'Image.open', (["mel_and_mask['mask']"], {}), "(mel_and_mask['mask'])\n", (25065, 25087), False, 'from PIL import Image\n'), ((25306, 25398), 'numpy.pad', 'np.pad', (['mask', '((0, 0), (0, mel_len - mask.shape[1]))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(mask, ((0, 0), (0, mel_len - mask.shape[1])), mode='constant',\n constant_values=0)\n", (25312, 25398), True, 'import numpy as np\n'), ((26269, 26327), 'soundfile.write', 'soundfile.write', (['audio_filename', 'gen_wav'], {'samplerate': '(16000)'}), '(audio_filename, gen_wav, samplerate=16000)\n', (26284, 26327), False, 'import soundfile\n'), ((26527, 26568), 'whisper.load_model', 'whisper.load_model', (['"""base"""'], {'device': 'device'}), "('base', device=device)\n", (26545, 26568), False, 'import whisper\n'), ((26623, 26653), 'whisper.load_audio', 'whisper.load_audio', (['audio_path'], {}), '(audio_path)\n', (26641, 26653), False, 'import whisper\n'), ((26670, 26696), 'whisper.pad_or_trim', 'whisper.pad_or_trim', (['audio'], {}), '(audio)\n', (26689, 26696), False, 'import whisper\n'), ((26831, 26856), 'whisper.DecodingOptions', 'whisper.DecodingOptions', ([], {}), '()\n', (26854, 26856), False, 'import whisper\n'), ((26874, 26914), 'whisper.decode', 'whisper.decode', (['self.model', 'mel', 'options'], {}), '(self.model, mel, options)\n', (26888, 26914), False, 'import whisper\n'), ((27312, 27373), 'audio_to_text.inference_waveform.AudioCapModel', 'AudioCapModel', (['"""audio_to_text/audiocaps_cntrstv_cnn14rnn_trm"""'], {}), "('audio_to_text/audiocaps_cntrstv_cnn14rnn_trm')\n", (27325, 27373), False, 'from audio_to_text.inference_waveform import AudioCapModel\n'), ((27427, 27457), 'whisper.load_audio', 'whisper.load_audio', (['audio_path'], {}), '(audio_path)\n', (27445, 27457), False, 'import whisper\n'), ((27863, 27884), 'audio_to_face.GeneFace_binding.GeneFaceInfer', 'GeneFaceInfer', (['device'], {}), '(device)\n', (27876, 27884), False, 'from audio_to_face.GeneFace_binding import GeneFaceInfer\n'), ((29085, 29267), 'audio_infer.pytorch.models.PVT', 'PVT', ([], {'sample_rate': 'self.sample_rate', 'window_size': 'self.window_size', 'hop_size': 'self.hop_size', 'mel_bins': 'self.mel_bins', 'fmin': 'self.fmin', 'fmax': 'self.fmax', 'classes_num': 'self.classes_num'}), '(sample_rate=self.sample_rate, window_size=self.window_size, hop_size=\n self.hop_size, mel_bins=self.mel_bins, fmin=self.fmin, fmax=self.fmax,\n classes_num=self.classes_num)\n', (29088, 29267), False, 'from audio_infer.pytorch.models import PVT\n'), ((29306, 29364), 'torch.load', 'torch.load', (['self.checkpoint_path'], {'map_location': 'self.device'}), '(self.checkpoint_path, map_location=self.device)\n', (29316, 29364), False, 'import torch\n'), ((29531, 29592), 'librosa.core.load', 'librosa.core.load', (['audio_path'], {'sr': 'self.sample_rate', 'mono': '(True)'}), '(audio_path, sr=self.sample_rate, mono=True)\n', (29548, 29592), False, 'import librosa\n'), ((29672, 29698), 'torch.from_numpy', 'torch.from_numpy', (['waveform'], {}), '(waveform)\n', (29688, 29698), False, 'import torch\n'), ((30663, 30711), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)', 'figsize': '(10, 4)'}), '(2, 1, sharex=True, figsize=(10, 4))\n', (30675, 30711), True, 'import matplotlib.pyplot as plt\n'), ((31471, 31489), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (31487, 31489), True, 'import matplotlib.pyplot as plt\n'), ((31578, 31605), 'matplotlib.pyplot.savefig', 'plt.savefig', (['image_filename'], {}), '(image_filename)\n', (31589, 31605), True, 'import matplotlib.pyplot as plt\n'), ((31952, 31958), 'sound_extraction.utils.stft.STFT', 'STFT', ([], {}), '()\n', (31956, 31958), False, 'from sound_extraction.utils.stft import STFT\n'), ((32046, 32073), 'torch.load', 'torch.load', (['self.model_file'], {}), '(self.model_file)\n', (32056, 32073), False, 'import torch\n'), ((32416, 32436), 'sound_extraction.utils.wav_io.load_wav', 'load_wav', (['audio_path'], {}), '(audio_path)\n', (32424, 32436), False, 'from sound_extraction.utils.wav_io import load_wav, save_wav\n'), ((33156, 33189), 'sound_extraction.utils.wav_io.save_wav', 'save_wav', (['est_wav', 'audio_filename'], {}), '(est_wav, audio_filename)\n', (33164, 33189), False, 'from sound_extraction.utils.wav_io import load_wav, save_wav\n'), ((33854, 33920), 'src.models.BinauralNetwork', 'BinauralNetwork', ([], {'view_dim': '(7)', 'warpnet_layers': '(4)', 'warpnet_channels': '(64)'}), '(view_dim=7, warpnet_layers=4, warpnet_channels=64)\n', (33869, 33920), False, 'from src.models import BinauralNetwork\n'), ((34119, 34171), 'librosa.load', 'librosa.load', ([], {'path': 'audio_path', 'sr': 'self.sr', 'mono': '(True)'}), '(path=audio_path, sr=self.sr, mono=True)\n', (34131, 34171), False, 'import librosa\n'), ((34187, 34209), 'torch.from_numpy', 'torch.from_numpy', (['mono'], {}), '(mono)\n', (34203, 34209), False, 'import torch\n'), ((34311, 34331), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (34325, 34331), False, 'import random\n'), ((34433, 34455), 'torch.from_numpy', 'torch.from_numpy', (['view'], {}), '(view)\n', (34449, 34455), False, 'import torch\n'), ((35860, 35918), 'torch.cat', 'torch.cat', (["[chunk['binaural'] for chunk in chunks]"], {'dim': '(-1)'}), "([chunk['binaural'] for chunk in chunks], dim=-1)\n", (35869, 35918), False, 'import torch\n'), ((36151, 36196), 'torchaudio.save', 'torchaudio.save', (['audio_filename', 'binaural', 'sr'], {}), '(audio_filename, binaural, sr)\n', (36166, 36196), False, 'import torchaudio\n'), ((36806, 36819), 'numpy.spacing', 'np.spacing', (['(1)'], {}), '(1)\n', (36816, 36819), True, 'import numpy as np\n'), ((36849, 36890), 'clip.load', 'clip.load', (['"""ViT-B/32"""'], {'device': 'self.device'}), "('ViT-B/32', device=self.device)\n", (36858, 36890), False, 'import clip\n'), ((37034, 37147), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth"""'], {'map_location': '"""cpu"""'}), "(\n 'audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth',\n map_location='cpu')\n", (37044, 37147), False, 'import torch\n'), ((37460, 37610), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt"""'], {'map_location': '(lambda storage, loc: storage)'}), "(\n 'audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt'\n , map_location=lambda storage, loc: storage)\n", (37470, 37610), False, 'import torch\n'), ((38016, 38103), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth"""'], {}), "(\n 'audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth')\n", (38026, 38103), False, 'import torch\n'), ((38122, 38208), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth"""'], {}), "(\n 'audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth')\n", (38132, 38208), False, 'import torch\n'), ((38290, 38321), 'soundfile.read', 'sf.read', (['fname'], {'dtype': '"""float32"""'}), "(fname, dtype='float32')\n", (38297, 38321), True, 'import soundfile as sf\n'), ((38439, 38469), 'librosa.resample', 'librosa.resample', (['y', 'sr', '(22050)'], {}), '(y, sr, 22050)\n', (38455, 38469), False, 'import librosa\n'), ((39559, 39586), 'torch.from_numpy', 'torch.from_numpy', (['embedding'], {}), '(embedding)\n', (39575, 39586), False, 'import torch\n'), ((39796, 39820), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (39812, 39820), False, 'import torch\n'), ((40169, 40218), 'target_sound_detection.src.utils.median_filter', 'median_filter', (['pred'], {'window_size': '(1)', 'threshold': '(0.5)'}), '(pred, window_size=1, threshold=0.5)\n', (40182, 40218), False, 'from target_sound_detection.src.utils import median_filter, decode_with_timestamps\n'), ((45032, 45049), 'espnet_model_zoo.downloader.ModelDownloader', 'ModelDownloader', ([], {}), '()\n', (45047, 45049), False, 'from espnet_model_zoo.downloader import ModelDownloader\n'), ((45135, 45379), 'espnet2.bin.enh_inference.SeparateSpeech', 'SeparateSpeech', ([], {'train_config': "cfg['train_config']", 'model_file': "cfg['model_file']", 'segment_size': '(2.4)', 'hop_size': '(0.8)', 'normalize_segment_scale': '(False)', 'show_progressbar': '(True)', 'ref_channel': 'None', 'normalize_output_wav': '(True)', 'device': 'self.device'}), "(train_config=cfg['train_config'], model_file=cfg[\n 'model_file'], segment_size=2.4, hop_size=0.8, normalize_segment_scale=\n False, show_progressbar=True, ref_channel=None, normalize_output_wav=\n True, device=self.device)\n", (45149, 45379), False, 'from espnet2.bin.enh_inference import SeparateSpeech\n'), ((45613, 45640), 'soundfile.read', 'soundfile.read', (['speech_path'], {}), '(speech_path)\n', (45627, 45640), False, 'import soundfile\n'), ((47031, 47048), 'espnet_model_zoo.downloader.ModelDownloader', 'ModelDownloader', ([], {}), '()\n', (47046, 47048), False, 'from espnet_model_zoo.downloader import ModelDownloader\n'), ((47134, 47378), 'espnet2.bin.enh_inference.SeparateSpeech', 'SeparateSpeech', ([], {'train_config': "cfg['train_config']", 'model_file': "cfg['model_file']", 'segment_size': '(2.4)', 'hop_size': '(0.8)', 'normalize_segment_scale': '(False)', 'show_progressbar': '(True)', 'ref_channel': 'None', 'normalize_output_wav': '(True)', 'device': 'self.device'}), "(train_config=cfg['train_config'], model_file=cfg[\n 'model_file'], segment_size=2.4, hop_size=0.8, normalize_segment_scale=\n False, show_progressbar=True, ref_channel=None, normalize_output_wav=\n True, device=self.device)\n", (47148, 47378), False, 'from espnet2.bin.enh_inference import SeparateSpeech\n'), ((47597, 47624), 'soundfile.read', 'soundfile.read', (['speech_path'], {}), '(speech_path)\n', (47611, 47624), False, 'import soundfile\n'), ((48487, 48508), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (48493, 48508), False, 'from langchain.llms.openai import OpenAI\n'), ((49380, 49452), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(memory_key='chat_history', output_key='output')\n", (49404, 49452), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((75007, 75049), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None', 'visible': '(False)'}), '(value=None, visible=False)\n', (75022, 75049), True, 'import gradio as gr\n'), ((75098, 75125), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (75113, 75125), True, 'import gradio as gr\n'), ((75168, 75210), 'gradio.Image.update', 'gr.Image.update', ([], {'value': 'None', 'visible': '(False)'}), '(value=None, visible=False)\n', (75183, 75210), True, 'import gradio as gr\n'), ((75253, 75295), 'gradio.Video.update', 'gr.Video.update', ([], {'value': 'None', 'visible': '(False)'}), '(value=None, visible=False)\n', (75268, 75295), True, 'import gradio as gr\n'), ((75339, 75370), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (75355, 75370), True, 'import gradio as gr\n'), ((75437, 75493), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#chatbot .overflow-y-auto{height:500px}"""'}), "(css='#chatbot .overflow-y-auto{height:500px}')\n", (75446, 75493), True, 'import gradio as gr\n'), ((75583, 75645), 'gradio.Chatbot', 'gr.Chatbot', ([], {'elem_id': '"""chatbot"""', 'label': '"""AudioGPT"""', 'visible': '(False)'}), "(elem_id='chatbot', label='AudioGPT', visible=False)\n", (75593, 75645), True, 'import gradio as gr\n'), ((75663, 75675), 'gradio.State', 'gr.State', (['[]'], {}), '([])\n', (75671, 75675), True, 'import gradio as gr\n'), ((130, 156), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (146, 156), False, 'import os\n'), ((205, 231), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (221, 231), False, 'import os\n'), ((293, 319), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (309, 319), False, 'import os\n'), ((399, 425), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (415, 425), False, 'import os\n'), ((493, 519), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (509, 519), False, 'import os\n'), ((9915, 9930), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9928, 9930), False, 'import torch\n'), ((13139, 13154), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13152, 13154), False, 'import torch\n'), ((18985, 19005), 'numpy.load', 'np.load', (['f0_stats_fn'], {}), '(f0_stats_fn)\n', (18992, 19005), True, 'import numpy as np\n'), ((20568, 20593), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (20591, 20593), False, 'import torch\n'), ((20544, 20564), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (20556, 20564), False, 'import torch\n'), ((20599, 20618), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (20611, 20618), False, 'import torch\n'), ((21770, 21796), 'librosa.to_mono', 'librosa.to_mono', (['ori_wav.T'], {}), '(ori_wav.T)\n', (21785, 21796), False, 'import librosa\n'), ((22054, 22113), 'numpy.pad', 'np.pad', (['ori_wav', '(0, mel_len * hop_size)'], {'constant_values': '(0)'}), '(ori_wav, (0, mel_len * hop_size), constant_values=0)\n', (22060, 22113), True, 'import numpy as np\n'), ((22534, 22560), 'librosa.to_mono', 'librosa.to_mono', (['ori_wav.T'], {}), '(ori_wav.T)\n', (22549, 22560), False, 'import librosa\n'), ((22818, 22877), 'numpy.pad', 'np.pad', (['ori_wav', '(0, mel_len * hop_size)'], {'constant_values': '(0)'}), '(ori_wav, (0, mel_len * hop_size), constant_values=0)\n', (22824, 22877), True, 'import numpy as np\n'), ((25442, 25457), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25455, 25457), False, 'import torch\n'), ((27988, 28016), 'os.path.basename', 'os.path.basename', (['audio_path'], {}), '(audio_path)\n', (28004, 28016), False, 'import os\n'), ((29774, 29789), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (29787, 29789), False, 'import torch\n'), ((31019, 31067), 'numpy.arange', 'np.arange', (['(0)', 'frames_num', 'self.frames_per_second'], {}), '(0, frames_num, self.frames_per_second)\n', (31028, 31067), True, 'import numpy as np\n'), ((31105, 31154), 'numpy.arange', 'np.arange', (['(0)', '(frames_num / self.frames_per_second)'], {}), '(0, frames_num / self.frames_per_second)\n', (31114, 31154), True, 'import numpy as np\n'), ((31187, 31206), 'numpy.arange', 'np.arange', (['(0)', 'top_k'], {}), '(0, top_k)\n', (31196, 31206), True, 'import numpy as np\n'), ((40409, 40472), 'target_sound_detection.src.utils.decode_with_timestamps', 'decode_with_timestamps', (['target_event', 'filtered_pred[index_k, :]'], {}), '(target_event, filtered_pred[index_k, :])\n', (40431, 40472), False, 'from target_sound_detection.src.utils import median_filter, decode_with_timestamps\n'), ((47818, 47879), 'soundfile.write', 'soundfile.write', (['audio_filename', 'enh_speech[0]'], {'samplerate': 'sr'}), '(audio_filename, enh_speech[0], samplerate=sr)\n', (47833, 47879), False, 'import soundfile\n'), ((57651, 57951), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions':\n AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}"}), "(self.tools, self.llm, agent=\n 'conversational-react-description', verbose=True, memory=self.memory,\n return_intermediate_steps=True, agent_kwargs={'prefix':\n AUDIO_CHATGPT_PREFIX, 'format_instructions':\n AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX})\n", (57667, 57951), False, 'from langchain.agents.initialize import initialize_agent\n'), ((62445, 62745), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions':\n AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}"}), "(self.tools, self.llm, agent=\n 'conversational-react-description', verbose=True, memory=self.memory,\n return_intermediate_steps=True, agent_kwargs={'prefix':\n AUDIO_CHATGPT_PREFIX, 'format_instructions':\n AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX})\n", (62461, 62745), False, 'from langchain.agents.initialize import initialize_agent\n'), ((67323, 67348), 'soundfile.read', 'soundfile.read', (['file.name'], {}), '(file.name)\n', (67337, 67348), False, 'import soundfile\n'), ((67361, 67419), 'soundfile.write', 'soundfile.write', (['audio_filename', 'audio_load'], {'samplerate': 'sr'}), '(audio_filename, audio_load, samplerate=sr)\n', (67376, 67419), False, 'import soundfile\n'), ((68723, 68744), 'PIL.Image.open', 'Image.open', (['file.name'], {}), '(file.name)\n', (68733, 68744), False, 'from PIL import Image\n'), ((74811, 74841), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (74826, 74841), True, 'import gradio as gr\n'), ((74843, 74898), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'new_audio_filename', 'visible': '(True)'}), '(value=new_audio_filename, visible=True)\n', (74858, 74898), True, 'import gradio as gr\n'), ((74900, 74930), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (74915, 74930), True, 'import gradio as gr\n'), ((74932, 74963), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (74948, 74963), True, 'import gradio as gr\n'), ((75516, 75524), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (75522, 75524), True, 'import gradio as gr\n'), ((75538, 75564), 'gradio.Markdown', 'gr.Markdown', (['"""## AudioGPT"""'], {}), "('## AudioGPT')\n", (75549, 75564), True, 'import gradio as gr\n'), ((75690, 75698), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (75696, 75698), True, 'import gradio as gr\n'), ((75985, 76006), 'gradio.Row', 'gr.Row', ([], {'visible': '(False)'}), '(visible=False)\n', (75991, 76006), True, 'import gradio as gr\n'), ((76544, 76552), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76550, 76552), True, 'import gradio as gr\n'), ((76577, 76600), 'gradio.Audio', 'gr.Audio', ([], {'visible': '(False)'}), '(visible=False)\n', (76585, 76600), True, 'import gradio as gr\n'), ((76614, 76622), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76620, 76622), True, 'import gradio as gr\n'), ((76740, 76748), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76746, 76748), True, 'import gradio as gr\n'), ((76773, 76828), 'gradio.Image', 'gr.Image', ([], {'type': '"""filepath"""', 'tool': '"""sketch"""', 'visible': '(False)'}), "(type='filepath', tool='sketch', visible=False)\n", (76781, 76828), True, 'import gradio as gr\n'), ((76840, 76848), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76846, 76848), True, 'import gradio as gr\n'), ((76875, 76923), 'gradio.Button', 'gr.Button', (['"""Predict Masked Place"""'], {'visible': '(False)'}), "('Predict Masked Place', visible=False)\n", (76884, 76923), True, 'import gradio as gr\n'), ((76945, 76966), 'gradio.Row', 'gr.Row', ([], {'visible': '(False)'}), '(visible=False)\n', (76951, 76966), True, 'import gradio as gr\n'), ((5967, 6057), 'transformers.BlipForConditionalGeneration.from_pretrained', 'BlipForConditionalGeneration.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "(\n 'Salesforce/blip-image-captioning-base')\n", (6011, 6057), False, 'from transformers import BlipProcessor, BlipForConditionalGeneration\n'), ((6968, 7004), 'torch.load', 'torch.load', (['ckpt'], {'map_location': '"""cpu"""'}), "(ckpt, map_location='cpu')\n", (6978, 7004), False, 'import torch\n'), ((7534, 7562), 'torch.from_numpy', 'torch.from_numpy', (['start_code'], {}), '(start_code)\n', (7550, 7562), False, 'import torch\n'), ((9137, 9162), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9160, 9162), False, 'import torch\n'), ((9661, 9681), 'numpy.array', 'np.array', (['score_list'], {}), '(score_list)\n', (9669, 9681), True, 'import numpy as np\n'), ((10959, 10995), 'torch.load', 'torch.load', (['ckpt'], {'map_location': '"""cpu"""'}), "(ckpt, map_location='cpu')\n", (10969, 10995), False, 'import torch\n'), ((11555, 11583), 'torch.from_numpy', 'torch.from_numpy', (['start_code'], {}), '(start_code)\n', (11571, 11583), False, 'import torch\n'), ((13731, 13756), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13754, 13756), False, 'import torch\n'), ((14598, 14623), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14621, 14623), False, 'import torch\n'), ((16575, 16600), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16598, 16600), False, 'import torch\n'), ((18374, 18399), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (18397, 18399), False, 'import torch\n'), ((20461, 20497), 'torch.load', 'torch.load', (['ckpt'], {'map_location': '"""cpu"""'}), "(ckpt, map_location='cpu')\n", (20471, 20497), False, 'import torch\n'), ((23657, 23685), 'torch.from_numpy', 'torch.from_numpy', (['start_code'], {}), '(start_code)\n', (23673, 23685), False, 'import torch\n'), ((26711, 26745), 'whisper.log_mel_spectrogram', 'whisper.log_mel_spectrogram', (['audio'], {}), '(audio)\n', (26738, 26745), False, 'import whisper\n'), ((27767, 27792), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (27790, 27792), False, 'import torch\n'), ((30235, 30267), 'numpy.max', 'np.max', (['framewise_output'], {'axis': '(0)'}), '(framewise_output, axis=0)\n', (30241, 30267), True, 'import numpy as np\n'), ((30742, 30754), 'numpy.abs', 'np.abs', (['stft'], {}), '(stft)\n', (30748, 30754), True, 'import numpy as np\n'), ((31244, 31265), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (31252, 31265), True, 'import numpy as np\n'), ((32456, 32478), 'torch.tensor', 'torch.tensor', (['waveform'], {}), '(waveform)\n', (32468, 32478), False, 'import torch\n'), ((34703, 34725), 'random.randint', 'random.randint', (['(0)', 'm_a'], {}), '(0, m_a)\n', (34717, 34725), False, 'import random\n'), ((35521, 35536), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (35534, 35536), False, 'import torch\n'), ((35938, 35974), 'torch.clamp', 'torch.clamp', (['binaural'], {'min': '(-1)', 'max': '(1)'}), '(binaural, min=-1, max=1)\n', (35949, 35974), False, 'import torch\n'), ((38646, 38665), 'clip.tokenize', 'clip.tokenize', (['text'], {}), '(text)\n', (38659, 38665), False, 'import clip\n'), ((49580, 49968), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image From User Input Text"""', 'func': 'self.t2i.inference', 'description': '"""useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. The input to this tool should be a string, representing the text used to generate image. """'}), "(name='Generate Image From User Input Text', func=self.t2i.inference,\n description=\n 'useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. The input to this tool should be a string, representing the text used to generate image. '\n )\n", (49584, 49968), False, 'from langchain.agents.tools import Tool\n'), ((50029, 50275), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Get Photo Description"""', 'func': 'self.i2t.inference', 'description': '"""useful for when you want to know what is inside the photo. receives image_path as input. The input to this tool should be a string, representing the image_path. """'}), "(name='Get Photo Description', func=self.i2t.inference, description=\n 'useful for when you want to know what is inside the photo. receives image_path as input. The input to this tool should be a string, representing the image_path. '\n )\n", (50033, 50275), False, 'from langchain.agents.tools import Tool\n'), ((50340, 50626), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Audio From User Input Text"""', 'func': 'self.t2a.inference', 'description': '"""useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio."""'}), "(name='Generate Audio From User Input Text', func=self.t2a.inference,\n description=\n 'useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio.'\n )\n", (50344, 50626), False, 'from langchain.agents.tools import Tool\n'), ((50687, 51161), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Style Transfer"""', 'func': 'self.tts_ood.inference', 'description': '"""useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text."""'}), "(name='Style Transfer', func=self.tts_ood.inference, description=\n 'useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text.'\n )\n", (50691, 51161), False, 'from langchain.agents.tools import Tool\n'), ((51281, 52061), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Singing Voice From User Input Text, Note and Duration Sequence"""', 'func': 'self.t2s.inference', 'description': '"""useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."""'}), '(name=\n \'Generate Singing Voice From User Input Text, Note and Duration Sequence\',\n func=self.t2s.inference, description=\n \'useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided.\'\n )\n', (51285, 52061), False, 'from langchain.agents.tools import Tool\n'), ((52228, 52530), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Synthesize Speech Given the User Input Text"""', 'func': 'self.tts.inference', 'description': '"""useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech."""'}), "(name='Synthesize Speech Given the User Input Text', func=self.tts.\n inference, description=\n 'useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech.'\n )\n", (52232, 52530), False, 'from langchain.agents.tools import Tool\n'), ((53100, 53426), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Speech Enhancement In Single-Channel"""', 'func': 'self.SE_SS_SC.inference', 'description': '"""useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Speech Enhancement In Single-Channel', func=self.SE_SS_SC.\n inference, description=\n 'useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n )\n", (53104, 53426), False, 'from langchain.agents.tools import Tool\n'), ((53486, 53762), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Speech Separation In Single-Channel"""', 'func': 'self.SS.inference', 'description': '"""useful for when you want to separate each speech from the speech mixture, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Speech Separation In Single-Channel', func=self.SS.inference,\n description=\n 'useful for when you want to separate each speech from the speech mixture, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n )\n", (53490, 53762), False, 'from langchain.agents.tools import Tool\n'), ((54246, 54479), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Audio From The Image"""', 'func': 'self.i2a.inference', 'description': '"""useful for when you want to generate an audio based on an image.The input to this tool should be a string, representing the image_path. """'}), "(name='Generate Audio From The Image', func=self.i2a.inference,\n description=\n 'useful for when you want to generate an audio based on an image.The input to this tool should be a string, representing the image_path. '\n )\n", (54250, 54479), False, 'from langchain.agents.tools import Tool\n'), ((54541, 54792), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Text From The Audio"""', 'func': 'self.a2t.inference', 'description': '"""useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate Text From The Audio', func=self.a2t.inference,\n description=\n 'useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n )\n", (54545, 54792), False, 'from langchain.agents.tools import Tool\n'), ((54854, 55191), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Audio Inpainting"""', 'func': 'self.inpaint.show_mel_fn', 'description': '"""useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, The input to this tool should be a string, representing the audio_path."""'}), "(name='Audio Inpainting', func=self.inpaint.show_mel_fn, description=\n 'useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, The input to this tool should be a string, representing the audio_path.'\n )\n", (54858, 55191), False, 'from langchain.agents.tools import Tool\n'), ((55257, 55513), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Transcribe Speech"""', 'func': 'self.asr.inference', 'description': '"""useful for when you want to know the text corresponding to a human speech, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Transcribe Speech', func=self.asr.inference, description=\n 'useful for when you want to know the text corresponding to a human speech, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n )\n", (55261, 55513), False, 'from langchain.agents.tools import Tool\n'), ((55578, 55869), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate a talking human portrait video given a input Audio"""', 'func': 'self.geneface.inference', 'description': '"""useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate a talking human portrait video given a input Audio',\n func=self.geneface.inference, description=\n 'useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path.'\n )\n", (55582, 55869), False, 'from langchain.agents.tools import Tool\n'), ((55930, 56296), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Detect The Sound Event From The Audio"""', 'func': 'self.detection.inference', 'description': '"""useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. The input to this tool should be a string, representing the audio_path. """'}), "(name='Detect The Sound Event From The Audio', func=self.detection.\n inference, description=\n 'useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. The input to this tool should be a string, representing the audio_path. '\n )\n", (55934, 56296), False, 'from langchain.agents.tools import Tool\n'), ((56356, 56654), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Sythesize Binaural Audio From A Mono Audio Input"""', 'func': 'self.binaural.inference', 'description': '"""useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. """'}), "(name='Sythesize Binaural Audio From A Mono Audio Input', func=self.\n binaural.inference, description=\n 'useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. '\n )\n", (56360, 56654), False, 'from langchain.agents.tools import Tool\n'), ((56714, 57120), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Extract Sound Event From Mixture Audio Based On Language Description"""', 'func': 'self.extraction.inference', 'description': '"""useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."""'}), "(name=\n 'Extract Sound Event From Mixture Audio Based On Language Description',\n func=self.extraction.inference, description=\n 'useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text.'\n )\n", (56718, 57120), False, 'from langchain.agents.tools import Tool\n'), ((57176, 57569), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Target Sound Detection"""', 'func': 'self.TSD.inference', 'description': '"""useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. """'}), "(name='Target Sound Detection', func=self.TSD.inference, description=\n 'useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. '\n )\n", (57180, 57569), False, 'from langchain.agents.tools import Tool\n'), ((58070, 58093), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (58079, 58093), True, 'import gradio as gr\n'), ((58095, 58119), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (58104, 58119), True, 'import gradio as gr\n'), ((58121, 58144), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (58130, 58144), True, 'import gradio as gr\n'), ((58146, 58170), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (58155, 58170), True, 'import gradio as gr\n'), ((58228, 58514), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Audio From User Input Text"""', 'func': 'self.t2a.inference', 'description': '"""useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio."""'}), "(name='Generate Audio From User Input Text', func=self.t2a.inference,\n description=\n 'useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio.'\n )\n", (58232, 58514), False, 'from langchain.agents.tools import Tool\n'), ((58575, 59049), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Style Transfer"""', 'func': 'self.tts_ood.inference', 'description': '"""useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text."""'}), "(name='Style Transfer', func=self.tts_ood.inference, description=\n 'useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text.'\n )\n", (58579, 59049), False, 'from langchain.agents.tools import Tool\n'), ((59169, 59949), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Singing Voice From User Input Text, Note and Duration Sequence"""', 'func': 'self.t2s.inference', 'description': '"""useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."""'}), '(name=\n \'Generate Singing Voice From User Input Text, Note and Duration Sequence\',\n func=self.t2s.inference, description=\n \'useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided.\'\n )\n', (59173, 59949), False, 'from langchain.agents.tools import Tool\n'), ((60116, 60418), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Synthesize Speech Given the User Input Text"""', 'func': 'self.tts.inference', 'description': '"""useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech."""'}), "(name='Synthesize Speech Given the User Input Text', func=self.tts.\n inference, description=\n 'useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech.'\n )\n", (60120, 60418), False, 'from langchain.agents.tools import Tool\n'), ((60478, 60729), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Text From The Audio"""', 'func': 'self.a2t.inference', 'description': '"""useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate Text From The Audio', func=self.a2t.inference,\n description=\n 'useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n )\n", (60482, 60729), False, 'from langchain.agents.tools import Tool\n'), ((60791, 61082), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate a talking human portrait video given a input Audio"""', 'func': 'self.geneface.inference', 'description': '"""useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate a talking human portrait video given a input Audio',\n func=self.geneface.inference, description=\n 'useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path.'\n )\n", (60795, 61082), False, 'from langchain.agents.tools import Tool\n'), ((61143, 61440), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Binaural Audio From A Mono Audio Input"""', 'func': 'self.binaural.inference', 'description': '"""useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. """'}), "(name='Generate Binaural Audio From A Mono Audio Input', func=self.\n binaural.inference, description=\n 'useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. '\n )\n", (61147, 61440), False, 'from langchain.agents.tools import Tool\n'), ((61500, 61906), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Extract Sound Event From Mixture Audio Based On Language Description"""', 'func': 'self.extraction.inference', 'description': '"""useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."""'}), "(name=\n 'Extract Sound Event From Mixture Audio Based On Language Description',\n func=self.extraction.inference, description=\n 'useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text.'\n )\n", (61504, 61906), False, 'from langchain.agents.tools import Tool\n'), ((61962, 62355), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Target Sound Detection"""', 'func': 'self.TSD.inference', 'description': '"""useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. """'}), "(name='Target Sound Detection', func=self.TSD.inference, description=\n 'useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. '\n )\n", (61966, 62355), False, 'from langchain.agents.tools import Tool\n'), ((62864, 62888), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (62873, 62888), True, 'import gradio as gr\n'), ((62890, 62914), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (62899, 62914), True, 'import gradio as gr\n'), ((62916, 62940), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (62925, 62940), True, 'import gradio as gr\n'), ((62942, 62965), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (62951, 62965), True, 'import gradio as gr\n'), ((63585, 63615), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63600, 63615), True, 'import gradio as gr\n'), ((63617, 63647), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63632, 63647), True, 'import gradio as gr\n'), ((63649, 63679), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63664, 63679), True, 'import gradio as gr\n'), ((63681, 63712), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63697, 63712), True, 'import gradio as gr\n'), ((66706, 66757), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'audio_filename', 'visible': '(True)'}), '(value=audio_filename, visible=True)\n', (66721, 66757), True, 'import gradio as gr\n'), ((66758, 66788), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66773, 66788), True, 'import gradio as gr\n'), ((66790, 66820), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66805, 66820), True, 'import gradio as gr\n'), ((66822, 66853), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66838, 66853), True, 'import gradio as gr\n'), ((68295, 68346), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'audio_filename', 'visible': '(True)'}), '(value=audio_filename, visible=True)\n', (68310, 68346), True, 'import gradio as gr\n'), ((68347, 68377), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (68362, 68377), True, 'import gradio as gr\n'), ((69917, 69947), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (69932, 69947), True, 'import gradio as gr\n'), ((69949, 69979), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (69964, 69979), True, 'import gradio as gr\n'), ((70735, 70762), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (70750, 70762), True, 'import gradio as gr\n'), ((70764, 70822), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (70779, 70822), True, 'import gradio as gr\n'), ((70830, 70860), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (70845, 70860), True, 'import gradio as gr\n'), ((73858, 73885), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (73873, 73885), True, 'import gradio as gr\n'), ((73887, 73945), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (73902, 73945), True, 'import gradio as gr\n'), ((73953, 73983), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (73968, 73983), True, 'import gradio as gr\n'), ((75732, 75752), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (75741, 75752), True, 'import gradio as gr\n'), ((75789, 75865), 'gradio.Radio', 'gr.Radio', ([], {'choices': "['text', 'speech']", 'value': '"""text"""', 'label': '"""Interaction Type"""'}), "(choices=['text', 'speech'], value='text', label='Interaction Type')\n", (75797, 75865), True, 'import gradio as gr\n'), ((75883, 75916), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.3)', 'min_width': '(0)'}), '(scale=0.3, min_width=0)\n', (75892, 75916), True, 'import gradio as gr\n'), ((75943, 75962), 'gradio.Button', 'gr.Button', (['"""Select"""'], {}), "('Select')\n", (75952, 75962), True, 'import gradio as gr\n'), ((76044, 76064), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (76053, 76064), True, 'import gradio as gr\n'), ((76219, 76252), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.1)', 'min_width': '(0)'}), '(scale=0.1, min_width=0)\n', (76228, 76252), True, 'import gradio as gr\n'), ((76276, 76301), 'gradio.Button', 'gr.Button', (['"""🏃\u200d♂️Run"""'], {}), "('🏃\\u200d♂️Run')\n", (76285, 76301), True, 'import gradio as gr\n'), ((76314, 76347), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.1)', 'min_width': '(0)'}), '(scale=0.1, min_width=0)\n', (76323, 76347), True, 'import gradio as gr\n'), ((76377, 76397), 'gradio.Button', 'gr.Button', (['"""🔄Clear️"""'], {}), "('🔄Clear️')\n", (76386, 76397), True, 'import gradio as gr\n'), ((76415, 76448), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.1)', 'min_width': '(0)'}), '(scale=0.1, min_width=0)\n', (76424, 76448), True, 'import gradio as gr\n'), ((76472, 76530), 'gradio.UploadButton', 'gr.UploadButton', (['"""🖼️Upload"""'], {'file_types': "['image', 'audio']"}), "('🖼️Upload', file_types=['image', 'audio'])\n", (76487, 76530), True, 'import gradio as gr\n'), ((76641, 76674), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.3)', 'min_width': '(0)'}), '(scale=0.3, min_width=0)\n', (76650, 76674), True, 'import gradio as gr\n'), ((76703, 76726), 'gradio.Video', 'gr.Video', ([], {'visible': '(False)'}), '(visible=False)\n', (76711, 76726), True, 'import gradio as gr\n'), ((77007, 77027), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (77016, 77027), True, 'import gradio as gr\n'), ((77060, 77121), 'gradio.Audio', 'gr.Audio', ([], {'source': '"""microphone"""', 'type': '"""filepath"""', 'label': '"""Input"""'}), "(source='microphone', type='filepath', label='Input')\n", (77068, 77121), True, 'import gradio as gr\n'), ((77139, 77173), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (77148, 77173), True, 'import gradio as gr\n'), ((77204, 77232), 'gradio.Button', 'gr.Button', (['"""🏃\u200d♂️Submit"""'], {}), "('🏃\\u200d♂️Submit')\n", (77213, 77232), True, 'import gradio as gr\n'), ((77245, 77279), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (77254, 77279), True, 'import gradio as gr\n'), ((77312, 77332), 'gradio.Button', 'gr.Button', (['"""🔄Clear️"""'], {}), "('🔄Clear️')\n", (77321, 77332), True, 'import gradio as gr\n'), ((77350, 77358), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (77356, 77358), True, 'import gradio as gr\n'), ((77392, 77431), 'gradio.Audio', 'gr.Audio', ([], {'label': '"""Output"""', 'visible': '(False)'}), "(label='Output', visible=False)\n", (77400, 77431), True, 'import gradio as gr\n'), ((4265, 4277), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4275, 4277), False, 'import uuid\n'), ((6139, 6161), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (6149, 6161), False, 'from PIL import Image\n'), ((20850, 20871), 'torch.from_numpy', 'torch.from_numpy', (['mel'], {}), '(mel)\n', (20866, 20871), False, 'import torch\n'), ((20926, 20948), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (20942, 20948), False, 'import torch\n'), ((31997, 32012), 'sound_extraction.model.LASSNet.LASSNet', 'LASSNet', (['device'], {}), '(device)\n', (32004, 32012), False, 'from sound_extraction.model.LASSNet import LASSNet\n'), ((38499, 38549), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['y'], {}), '(y, **self.MEL_ARGS)\n', (38529, 38549), False, 'import librosa\n'), ((64244, 64274), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64259, 64274), True, 'import gradio as gr\n'), ((64276, 64306), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64291, 64306), True, 'import gradio as gr\n'), ((64308, 64338), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64323, 64338), True, 'import gradio as gr\n'), ((64340, 64371), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64356, 64371), True, 'import gradio as gr\n'), ((71452, 71479), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (71467, 71479), True, 'import gradio as gr\n'), ((71481, 71539), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (71496, 71539), True, 'import gradio as gr\n'), ((71547, 71577), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (71562, 71577), True, 'import gradio as gr\n'), ((5271, 5283), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5281, 5283), False, 'import uuid\n'), ((9377, 9399), 'torch.FloatTensor', 'torch.FloatTensor', (['wav'], {}), '(wav)\n', (9394, 9399), False, 'import torch\n'), ((10120, 10132), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10130, 10132), False, 'import uuid\n'), ((13340, 13352), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (13350, 13352), False, 'import uuid\n'), ((14308, 14320), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (14318, 14320), False, 'import uuid\n'), ((16178, 16190), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (16188, 16190), False, 'import uuid\n'), ((18075, 18087), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (18085, 18087), False, 'import uuid\n'), ((19487, 19499), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (19497, 19499), False, 'import uuid\n'), ((23290, 23302), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (23300, 23302), False, 'import uuid\n'), ((26117, 26129), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (26127, 26129), False, 'import uuid\n'), ((26232, 26244), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (26242, 26244), False, 'import uuid\n'), ((31541, 31553), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (31551, 31553), False, 'import uuid\n'), ((33070, 33082), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (33080, 33082), False, 'import uuid\n'), ((34346, 34386), 'numpy.loadtxt', 'np.loadtxt', (['self.position_file[rand_int]'], {}), '(self.position_file[rand_int])\n', (34356, 34386), True, 'import numpy as np\n'), ((36088, 36100), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (36098, 36100), False, 'import uuid\n'), ((45879, 45891), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (45889, 45891), False, 'import uuid\n'), ((47744, 47756), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (47754, 47756), False, 'import uuid\n'), ((64588, 64618), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64603, 64618), True, 'import gradio as gr\n'), ((64620, 64650), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64635, 64650), True, 'import gradio as gr\n'), ((64652, 64682), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64667, 64682), True, 'import gradio as gr\n'), ((64684, 64715), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64700, 64715), True, 'import gradio as gr\n'), ((70081, 70093), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (70091, 70093), False, 'import uuid\n'), ((71927, 71954), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (71942, 71954), True, 'import gradio as gr\n'), ((71956, 72014), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (71971, 72014), True, 'import gradio as gr\n'), ((72022, 72052), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (72037, 72052), True, 'import gradio as gr\n'), ((76088, 76183), 'gradio.Textbox', 'gr.Textbox', ([], {'show_label': '(False)', 'placeholder': '"""Enter text and press enter, or upload an image"""'}), "(show_label=False, placeholder=\n 'Enter text and press enter, or upload an image')\n", (76098, 76183), True, 'import gradio as gr\n'), ((47987, 47999), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (47997, 47999), False, 'import uuid\n'), ((48159, 48171), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (48169, 48171), False, 'import uuid\n'), ((65068, 65098), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65083, 65098), True, 'import gradio as gr\n'), ((65100, 65130), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65115, 65130), True, 'import gradio as gr\n'), ((65132, 65162), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65147, 65162), True, 'import gradio as gr\n'), ((65164, 65195), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65180, 65195), True, 'import gradio as gr\n'), ((67208, 67220), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (67218, 67220), False, 'import uuid\n'), ((68627, 68639), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (68637, 68639), False, 'import uuid\n'), ((72538, 72565), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (72553, 72565), True, 'import gradio as gr\n'), ((72567, 72625), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (72582, 72625), True, 'import gradio as gr\n'), ((72633, 72663), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (72648, 72663), True, 'import gradio as gr\n'), ((65632, 65683), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'audio_filename', 'visible': '(True)'}), '(value=audio_filename, visible=True)\n', (65647, 65683), True, 'import gradio as gr\n'), ((65684, 65714), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65699, 65714), True, 'import gradio as gr\n'), ((65716, 65767), 'gradio.Image.update', 'gr.Image.update', ([], {'value': 'image_filename', 'visible': '(True)'}), '(value=image_filename, visible=True)\n', (65731, 65767), True, 'import gradio as gr\n'), ((65768, 65798), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(True)'}), '(visible=True)\n', (65784, 65798), True, 'import gradio as gr\n'), ((73124, 73151), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (73139, 73151), True, 'import gradio as gr\n'), ((73153, 73211), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (73168, 73211), True, 'import gradio as gr\n'), ((73219, 73270), 'gradio.Video.update', 'gr.Video.update', ([], {'value': 'video_filename', 'visible': '(True)'}), '(value=video_filename, visible=True)\n', (73234, 73270), True, 'import gradio as gr\n'), ((66196, 66226), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66211, 66226), True, 'import gradio as gr\n'), ((66228, 66279), 'gradio.Video.update', 'gr.Video.update', ([], {'value': 'video_filename', 'visible': '(True)'}), '(value=video_filename, visible=True)\n', (66243, 66279), True, 'import gradio as gr\n'), ((66280, 66310), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66295, 66310), True, 'import gradio as gr\n'), ((66312, 66343), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66328, 66343), True, 'import gradio as gr\n')]
from langchain.indexes import VectorstoreIndexCreator from langchain_community.document_loaders import CSVLoader from langchain_community.vectorstores import FAISS loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv") docs = loader.load() index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS) index = index_creator.from_documents(docs) index.vectorstore.save_local("titanic_data")
[ "langchain.indexes.VectorstoreIndexCreator", "langchain_community.document_loaders.CSVLoader" ]
[((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreIndexCreator', 'VectorstoreIndexCreator', ([], {'vectorstore_cls': 'FAISS'}), '(vectorstore_cls=FAISS)\n', (291, 314), False, 'from langchain.indexes import VectorstoreIndexCreator\n')]
# ruff: noqa: E402 """Main entrypoint into package.""" import warnings from importlib import metadata from typing import Any, Optional from langchain_core._api.deprecation import surface_langchain_deprecation_warnings try: __version__ = metadata.version(__package__) except metadata.PackageNotFoundError: # Case where package metadata is not available. __version__ = "" del metadata # optional, avoids polluting the results of dir(__package__) def _warn_on_import(name: str, replacement: Optional[str] = None) -> None: """Warn on import of deprecated module.""" from langchain.utils.interactive_env import is_interactive_env if is_interactive_env(): # No warnings for interactive environments. # This is done to avoid polluting the output of interactive environments # where users rely on auto-complete and may trigger this warning # even if they are not using any deprecated modules return if replacement: warnings.warn( f"Importing {name} from langchain root module is no longer supported. " f"Please use {replacement} instead." ) else: warnings.warn( f"Importing {name} from langchain root module is no longer supported." ) # Surfaces Deprecation and Pending Deprecation warnings from langchain. surface_langchain_deprecation_warnings() def __getattr__(name: str) -> Any: if name == "MRKLChain": from langchain.agents import MRKLChain _warn_on_import(name, replacement="langchain.agents.MRKLChain") return MRKLChain elif name == "ReActChain": from langchain.agents import ReActChain _warn_on_import(name, replacement="langchain.agents.ReActChain") return ReActChain elif name == "SelfAskWithSearchChain": from langchain.agents import SelfAskWithSearchChain _warn_on_import(name, replacement="langchain.agents.SelfAskWithSearchChain") return SelfAskWithSearchChain elif name == "ConversationChain": from langchain.chains import ConversationChain _warn_on_import(name, replacement="langchain.chains.ConversationChain") return ConversationChain elif name == "LLMBashChain": raise ImportError( "This module has been moved to langchain-experimental. " "For more details: " "https://github.com/langchain-ai/langchain/discussions/11352." "To access this code, install it with `pip install langchain-experimental`." "`from langchain_experimental.llm_bash.base " "import LLMBashChain`" ) elif name == "LLMChain": from langchain.chains import LLMChain _warn_on_import(name, replacement="langchain.chains.LLMChain") return LLMChain elif name == "LLMCheckerChain": from langchain.chains import LLMCheckerChain _warn_on_import(name, replacement="langchain.chains.LLMCheckerChain") return LLMCheckerChain elif name == "LLMMathChain": from langchain.chains import LLMMathChain _warn_on_import(name, replacement="langchain.chains.LLMMathChain") return LLMMathChain elif name == "QAWithSourcesChain": from langchain.chains import QAWithSourcesChain _warn_on_import(name, replacement="langchain.chains.QAWithSourcesChain") return QAWithSourcesChain elif name == "VectorDBQA": from langchain.chains import VectorDBQA _warn_on_import(name, replacement="langchain.chains.VectorDBQA") return VectorDBQA elif name == "VectorDBQAWithSourcesChain": from langchain.chains import VectorDBQAWithSourcesChain _warn_on_import(name, replacement="langchain.chains.VectorDBQAWithSourcesChain") return VectorDBQAWithSourcesChain elif name == "InMemoryDocstore": from langchain.docstore import InMemoryDocstore _warn_on_import(name, replacement="langchain.docstore.InMemoryDocstore") return InMemoryDocstore elif name == "Wikipedia": from langchain.docstore import Wikipedia _warn_on_import(name, replacement="langchain.docstore.Wikipedia") return Wikipedia elif name == "Anthropic": from langchain_community.llms import Anthropic _warn_on_import(name, replacement="langchain_community.llms.Anthropic") return Anthropic elif name == "Banana": from langchain_community.llms import Banana _warn_on_import(name, replacement="langchain_community.llms.Banana") return Banana elif name == "CerebriumAI": from langchain_community.llms import CerebriumAI _warn_on_import(name, replacement="langchain_community.llms.CerebriumAI") return CerebriumAI elif name == "Cohere": from langchain_community.llms import Cohere _warn_on_import(name, replacement="langchain_community.llms.Cohere") return Cohere elif name == "ForefrontAI": from langchain_community.llms import ForefrontAI _warn_on_import(name, replacement="langchain_community.llms.ForefrontAI") return ForefrontAI elif name == "GooseAI": from langchain_community.llms import GooseAI _warn_on_import(name, replacement="langchain_community.llms.GooseAI") return GooseAI elif name == "HuggingFaceHub": from langchain_community.llms import HuggingFaceHub _warn_on_import(name, replacement="langchain_community.llms.HuggingFaceHub") return HuggingFaceHub elif name == "HuggingFaceTextGenInference": from langchain_community.llms import HuggingFaceTextGenInference _warn_on_import( name, replacement="langchain_community.llms.HuggingFaceTextGenInference" ) return HuggingFaceTextGenInference elif name == "LlamaCpp": from langchain_community.llms import LlamaCpp _warn_on_import(name, replacement="langchain_community.llms.LlamaCpp") return LlamaCpp elif name == "Modal": from langchain_community.llms import Modal _warn_on_import(name, replacement="langchain_community.llms.Modal") return Modal elif name == "OpenAI": from langchain_community.llms import OpenAI _warn_on_import(name, replacement="langchain_community.llms.OpenAI") return OpenAI elif name == "Petals": from langchain_community.llms import Petals _warn_on_import(name, replacement="langchain_community.llms.Petals") return Petals elif name == "PipelineAI": from langchain_community.llms import PipelineAI _warn_on_import(name, replacement="langchain_community.llms.PipelineAI") return PipelineAI elif name == "SagemakerEndpoint": from langchain_community.llms import SagemakerEndpoint _warn_on_import(name, replacement="langchain_community.llms.SagemakerEndpoint") return SagemakerEndpoint elif name == "StochasticAI": from langchain_community.llms import StochasticAI _warn_on_import(name, replacement="langchain_community.llms.StochasticAI") return StochasticAI elif name == "Writer": from langchain_community.llms import Writer _warn_on_import(name, replacement="langchain_community.llms.Writer") return Writer elif name == "HuggingFacePipeline": from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline _warn_on_import( name, replacement="langchain_community.llms.huggingface_pipeline.HuggingFacePipeline", ) return HuggingFacePipeline elif name == "FewShotPromptTemplate": from langchain_core.prompts import FewShotPromptTemplate _warn_on_import( name, replacement="langchain_core.prompts.FewShotPromptTemplate" ) return FewShotPromptTemplate elif name == "Prompt": from langchain_core.prompts import PromptTemplate _warn_on_import(name, replacement="langchain_core.prompts.PromptTemplate") # it's renamed as prompt template anyways # this is just for backwards compat return PromptTemplate elif name == "PromptTemplate": from langchain_core.prompts import PromptTemplate _warn_on_import(name, replacement="langchain_core.prompts.PromptTemplate") return PromptTemplate elif name == "BasePromptTemplate": from langchain_core.prompts import BasePromptTemplate _warn_on_import(name, replacement="langchain_core.prompts.BasePromptTemplate") return BasePromptTemplate elif name == "ArxivAPIWrapper": from langchain_community.utilities import ArxivAPIWrapper _warn_on_import( name, replacement="langchain_community.utilities.ArxivAPIWrapper" ) return ArxivAPIWrapper elif name == "GoldenQueryAPIWrapper": from langchain_community.utilities import GoldenQueryAPIWrapper _warn_on_import( name, replacement="langchain_community.utilities.GoldenQueryAPIWrapper" ) return GoldenQueryAPIWrapper elif name == "GoogleSearchAPIWrapper": from langchain_community.utilities import GoogleSearchAPIWrapper _warn_on_import( name, replacement="langchain_community.utilities.GoogleSearchAPIWrapper" ) return GoogleSearchAPIWrapper elif name == "GoogleSerperAPIWrapper": from langchain_community.utilities import GoogleSerperAPIWrapper _warn_on_import( name, replacement="langchain_community.utilities.GoogleSerperAPIWrapper" ) return GoogleSerperAPIWrapper elif name == "PowerBIDataset": from langchain_community.utilities import PowerBIDataset _warn_on_import( name, replacement="langchain_community.utilities.PowerBIDataset" ) return PowerBIDataset elif name == "SearxSearchWrapper": from langchain_community.utilities import SearxSearchWrapper _warn_on_import( name, replacement="langchain_community.utilities.SearxSearchWrapper" ) return SearxSearchWrapper elif name == "WikipediaAPIWrapper": from langchain_community.utilities import WikipediaAPIWrapper _warn_on_import( name, replacement="langchain_community.utilities.WikipediaAPIWrapper" ) return WikipediaAPIWrapper elif name == "WolframAlphaAPIWrapper": from langchain_community.utilities import WolframAlphaAPIWrapper _warn_on_import( name, replacement="langchain_community.utilities.WolframAlphaAPIWrapper" ) return WolframAlphaAPIWrapper elif name == "SQLDatabase": from langchain_community.utilities import SQLDatabase _warn_on_import(name, replacement="langchain_community.utilities.SQLDatabase") return SQLDatabase elif name == "FAISS": from langchain_community.vectorstores import FAISS _warn_on_import(name, replacement="langchain_community.vectorstores.FAISS") return FAISS elif name == "ElasticVectorSearch": from langchain_community.vectorstores import ElasticVectorSearch _warn_on_import( name, replacement="langchain_community.vectorstores.ElasticVectorSearch" ) return ElasticVectorSearch # For backwards compatibility elif name == "SerpAPIChain" or name == "SerpAPIWrapper": from langchain_community.utilities import SerpAPIWrapper _warn_on_import( name, replacement="langchain_community.utilities.SerpAPIWrapper" ) return SerpAPIWrapper elif name == "verbose": from langchain.globals import _verbose _warn_on_import( name, replacement=( "langchain.globals.set_verbose() / langchain.globals.get_verbose()" ), ) return _verbose elif name == "debug": from langchain.globals import _debug _warn_on_import( name, replacement=( "langchain.globals.set_debug() / langchain.globals.get_debug()" ), ) return _debug elif name == "llm_cache": from langchain.globals import _llm_cache _warn_on_import( name, replacement=( "langchain.globals.set_llm_cache() / langchain.globals.get_llm_cache()" ), ) return _llm_cache else: raise AttributeError(f"Could not find: {name}") __all__ = [ "LLMChain", "LLMCheckerChain", "LLMMathChain", "ArxivAPIWrapper", "GoldenQueryAPIWrapper", "SelfAskWithSearchChain", "SerpAPIWrapper", "SerpAPIChain", "SearxSearchWrapper", "GoogleSearchAPIWrapper", "GoogleSerperAPIWrapper", "WolframAlphaAPIWrapper", "WikipediaAPIWrapper", "Anthropic", "Banana", "CerebriumAI", "Cohere", "ForefrontAI", "GooseAI", "Modal", "OpenAI", "Petals", "PipelineAI", "StochasticAI", "Writer", "BasePromptTemplate", "Prompt", "FewShotPromptTemplate", "PromptTemplate", "ReActChain", "Wikipedia", "HuggingFaceHub", "SagemakerEndpoint", "HuggingFacePipeline", "SQLDatabase", "PowerBIDataset", "FAISS", "MRKLChain", "VectorDBQA", "ElasticVectorSearch", "InMemoryDocstore", "ConversationChain", "VectorDBQAWithSourcesChain", "QAWithSourcesChain", "LlamaCpp", "HuggingFaceTextGenInference", ]
[ "langchain.utils.interactive_env.is_interactive_env", "langchain_core._api.deprecation.surface_langchain_deprecation_warnings" ]
[((1348, 1388), 'langchain_core._api.deprecation.surface_langchain_deprecation_warnings', 'surface_langchain_deprecation_warnings', ([], {}), '()\n', (1386, 1388), False, 'from langchain_core._api.deprecation import surface_langchain_deprecation_warnings\n'), ((243, 272), 'importlib.metadata.version', 'metadata.version', (['__package__'], {}), '(__package__)\n', (259, 272), False, 'from importlib import metadata\n'), ((658, 678), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (676, 678), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((990, 1119), 'warnings.warn', 'warnings.warn', (['f"""Importing {name} from langchain root module is no longer supported. Please use {replacement} instead."""'], {}), "(\n f'Importing {name} from langchain root module is no longer supported. Please use {replacement} instead.'\n )\n", (1003, 1119), False, 'import warnings\n'), ((1166, 1256), 'warnings.warn', 'warnings.warn', (['f"""Importing {name} from langchain root module is no longer supported."""'], {}), "(\n f'Importing {name} from langchain root module is no longer supported.')\n", (1179, 1256), False, 'import warnings\n')]
from typing import Any, Dict, List, Type, Union from langchain_community.graphs import NetworkxEntityGraph from langchain_community.graphs.networkx_graph import ( KnowledgeTriple, get_entities, parse_triples, ) from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Field from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key class ConversationKGMemory(BaseChatMemory): """Knowledge graph conversation memory. Integrates with external knowledge graph to store and retrieve information about knowledge triples in the conversation. """ k: int = 2 human_prefix: str = "Human" ai_prefix: str = "AI" kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph) knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT llm: BaseLanguageModel summary_message_cls: Type[BaseMessage] = SystemMessage """Number of previous utterances to include in the context.""" memory_key: str = "history" #: :meta private: def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" entities = self._get_current_entities(inputs) summary_strings = [] for entity in entities: knowledge = self.kg.get_entity_knowledge(entity) if knowledge: summary = f"On {entity}: {'. '.join(knowledge)}." summary_strings.append(summary) context: Union[str, List] if not summary_strings: context = [] if self.return_messages else "" elif self.return_messages: context = [ self.summary_message_cls(content=text) for text in summary_strings ] else: context = "\n".join(summary_strings) return {self.memory_key: context} @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str: """Get the output key for the prompt.""" if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") return list(outputs.keys())[0] return self.output_key def get_current_entities(self, input_string: str) -> List[str]: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, ) return get_entities(output) def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]: """Get the current entities in the conversation.""" prompt_input_key = self._get_prompt_input_key(inputs) return self.get_current_entities(inputs[prompt_input_key]) def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]: chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, verbose=True, ) knowledge = parse_triples(output) return knowledge def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None: """Get and update knowledge graph from the conversation history.""" prompt_input_key = self._get_prompt_input_key(inputs) knowledge = self.get_knowledge_triplets(inputs[prompt_input_key]) for triple in knowledge: self.kg.add_triple(triple) def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self._get_and_update_kg(inputs) def clear(self) -> None: """Clear memory contents.""" super().clear() self.kg.clear()
[ "langchain_core.messages.get_buffer_string", "langchain.chains.llm.LLMChain", "langchain_community.graphs.networkx_graph.parse_triples", "langchain.memory.utils.get_prompt_input_key", "langchain_community.graphs.networkx_graph.get_entities", "langchain_core.pydantic_v1.Field" ]
[((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (3171, 3223), False, 'from langchain.chains.llm import LLMChain\n'), ((3248, 3369), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (3265, 3369), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((3537, 3557), 'langchain_community.graphs.networkx_graph.get_entities', 'get_entities', (['output'], {}), '(output)\n', (3549, 3557), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((3921, 3984), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.knowledge_extraction_prompt'}), '(llm=self.llm, prompt=self.knowledge_extraction_prompt)\n', (3929, 3984), False, 'from langchain.chains.llm import LLMChain\n'), ((4009, 4130), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (4026, 4130), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((4329, 4350), 'langchain_community.graphs.networkx_graph.parse_triples', 'parse_triples', (['output'], {}), '(output)\n', (4342, 4350), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((2649, 2700), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (2669, 2700), False, 'from langchain.memory.utils import get_prompt_input_key\n')]
""" **LLM** classes provide access to the large language model (**LLM**) APIs and services. **Class hierarchy:** .. code-block:: BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI **Main helpers:** .. code-block:: LLMResult, PromptValue, CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun, CallbackManager, AsyncCallbackManager, AIMessage, BaseMessage """ # noqa: E501 import warnings from typing import Any, Callable, Dict, Type from langchain_core._api import LangChainDeprecationWarning from langchain_core.language_models.llms import BaseLLM from langchain.utils.interactive_env import is_interactive_env def _import_ai21() -> Any: from langchain_community.llms.ai21 import AI21 return AI21 def _import_aleph_alpha() -> Any: from langchain_community.llms.aleph_alpha import AlephAlpha return AlephAlpha def _import_amazon_api_gateway() -> Any: from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway return AmazonAPIGateway def _import_anthropic() -> Any: from langchain_community.llms.anthropic import Anthropic return Anthropic def _import_anyscale() -> Any: from langchain_community.llms.anyscale import Anyscale return Anyscale def _import_arcee() -> Any: from langchain_community.llms.arcee import Arcee return Arcee def _import_aviary() -> Any: from langchain_community.llms.aviary import Aviary return Aviary def _import_azureml_endpoint() -> Any: from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint return AzureMLOnlineEndpoint def _import_baidu_qianfan_endpoint() -> Any: from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint return QianfanLLMEndpoint def _import_bananadev() -> Any: from langchain_community.llms.bananadev import Banana return Banana def _import_baseten() -> Any: from langchain_community.llms.baseten import Baseten return Baseten def _import_beam() -> Any: from langchain_community.llms.beam import Beam return Beam def _import_bedrock() -> Any: from langchain_community.llms.bedrock import Bedrock return Bedrock def _import_bittensor() -> Any: from langchain_community.llms.bittensor import NIBittensorLLM return NIBittensorLLM def _import_cerebriumai() -> Any: from langchain_community.llms.cerebriumai import CerebriumAI return CerebriumAI def _import_chatglm() -> Any: from langchain_community.llms.chatglm import ChatGLM return ChatGLM def _import_clarifai() -> Any: from langchain_community.llms.clarifai import Clarifai return Clarifai def _import_cohere() -> Any: from langchain_community.llms.cohere import Cohere return Cohere def _import_ctransformers() -> Any: from langchain_community.llms.ctransformers import CTransformers return CTransformers def _import_ctranslate2() -> Any: from langchain_community.llms.ctranslate2 import CTranslate2 return CTranslate2 def _import_databricks() -> Any: from langchain_community.llms.databricks import Databricks return Databricks def _import_databricks_chat() -> Any: from langchain_community.chat_models.databricks import ChatDatabricks return ChatDatabricks def _import_deepinfra() -> Any: from langchain_community.llms.deepinfra import DeepInfra return DeepInfra def _import_deepsparse() -> Any: from langchain_community.llms.deepsparse import DeepSparse return DeepSparse def _import_edenai() -> Any: from langchain_community.llms.edenai import EdenAI return EdenAI def _import_fake() -> Any: from langchain_community.llms.fake import FakeListLLM return FakeListLLM def _import_fireworks() -> Any: from langchain_community.llms.fireworks import Fireworks return Fireworks def _import_forefrontai() -> Any: from langchain_community.llms.forefrontai import ForefrontAI return ForefrontAI def _import_gigachat() -> Any: from langchain_community.llms.gigachat import GigaChat return GigaChat def _import_google_palm() -> Any: from langchain_community.llms.google_palm import GooglePalm return GooglePalm def _import_gooseai() -> Any: from langchain_community.llms.gooseai import GooseAI return GooseAI def _import_gpt4all() -> Any: from langchain_community.llms.gpt4all import GPT4All return GPT4All def _import_gradient_ai() -> Any: from langchain_community.llms.gradient_ai import GradientLLM return GradientLLM def _import_huggingface_endpoint() -> Any: from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint return HuggingFaceEndpoint def _import_huggingface_hub() -> Any: from langchain_community.llms.huggingface_hub import HuggingFaceHub return HuggingFaceHub def _import_huggingface_pipeline() -> Any: from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline return HuggingFacePipeline def _import_huggingface_text_gen_inference() -> Any: from langchain_community.llms.huggingface_text_gen_inference import ( HuggingFaceTextGenInference, ) return HuggingFaceTextGenInference def _import_human() -> Any: from langchain_community.llms.human import HumanInputLLM return HumanInputLLM def _import_javelin_ai_gateway() -> Any: from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway return JavelinAIGateway def _import_koboldai() -> Any: from langchain_community.llms.koboldai import KoboldApiLLM return KoboldApiLLM def _import_llamacpp() -> Any: from langchain_community.llms.llamacpp import LlamaCpp return LlamaCpp def _import_manifest() -> Any: from langchain_community.llms.manifest import ManifestWrapper return ManifestWrapper def _import_minimax() -> Any: from langchain_community.llms.minimax import Minimax return Minimax def _import_mlflow() -> Any: from langchain_community.llms.mlflow import Mlflow return Mlflow def _import_mlflow_chat() -> Any: from langchain_community.chat_models.mlflow import ChatMlflow return ChatMlflow def _import_mlflow_ai_gateway() -> Any: from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway return MlflowAIGateway def _import_modal() -> Any: from langchain_community.llms.modal import Modal return Modal def _import_mosaicml() -> Any: from langchain_community.llms.mosaicml import MosaicML return MosaicML def _import_nlpcloud() -> Any: from langchain_community.llms.nlpcloud import NLPCloud return NLPCloud def _import_octoai_endpoint() -> Any: from langchain_community.llms.octoai_endpoint import OctoAIEndpoint return OctoAIEndpoint def _import_ollama() -> Any: from langchain_community.llms.ollama import Ollama return Ollama def _import_opaqueprompts() -> Any: from langchain_community.llms.opaqueprompts import OpaquePrompts return OpaquePrompts def _import_azure_openai() -> Any: from langchain_community.llms.openai import AzureOpenAI return AzureOpenAI def _import_openai() -> Any: from langchain_community.llms.openai import OpenAI return OpenAI def _import_openai_chat() -> Any: from langchain_community.llms.openai import OpenAIChat return OpenAIChat def _import_openllm() -> Any: from langchain_community.llms.openllm import OpenLLM return OpenLLM def _import_openlm() -> Any: from langchain_community.llms.openlm import OpenLM return OpenLM def _import_pai_eas_endpoint() -> Any: from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint return PaiEasEndpoint def _import_petals() -> Any: from langchain_community.llms.petals import Petals return Petals def _import_pipelineai() -> Any: from langchain_community.llms.pipelineai import PipelineAI return PipelineAI def _import_predibase() -> Any: from langchain_community.llms.predibase import Predibase return Predibase def _import_predictionguard() -> Any: from langchain_community.llms.predictionguard import PredictionGuard return PredictionGuard def _import_promptlayer() -> Any: from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI return PromptLayerOpenAI def _import_promptlayer_chat() -> Any: from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat return PromptLayerOpenAIChat def _import_replicate() -> Any: from langchain_community.llms.replicate import Replicate return Replicate def _import_rwkv() -> Any: from langchain_community.llms.rwkv import RWKV return RWKV def _import_sagemaker_endpoint() -> Any: from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint return SagemakerEndpoint def _import_self_hosted() -> Any: from langchain_community.llms.self_hosted import SelfHostedPipeline return SelfHostedPipeline def _import_self_hosted_hugging_face() -> Any: from langchain_community.llms.self_hosted_hugging_face import ( SelfHostedHuggingFaceLLM, ) return SelfHostedHuggingFaceLLM def _import_stochasticai() -> Any: from langchain_community.llms.stochasticai import StochasticAI return StochasticAI def _import_symblai_nebula() -> Any: from langchain_community.llms.symblai_nebula import Nebula return Nebula def _import_textgen() -> Any: from langchain_community.llms.textgen import TextGen return TextGen def _import_titan_takeoff() -> Any: from langchain_community.llms.titan_takeoff import TitanTakeoff return TitanTakeoff def _import_titan_takeoff_pro() -> Any: from langchain_community.llms.titan_takeoff_pro import TitanTakeoffPro return TitanTakeoffPro def _import_together() -> Any: from langchain_community.llms.together import Together return Together def _import_tongyi() -> Any: from langchain_community.llms.tongyi import Tongyi return Tongyi def _import_vertex() -> Any: from langchain_community.llms.vertexai import VertexAI return VertexAI def _import_vertex_model_garden() -> Any: from langchain_community.llms.vertexai import VertexAIModelGarden return VertexAIModelGarden def _import_vllm() -> Any: from langchain_community.llms.vllm import VLLM return VLLM def _import_vllm_openai() -> Any: from langchain_community.llms.vllm import VLLMOpenAI return VLLMOpenAI def _import_watsonxllm() -> Any: from langchain_community.llms.watsonxllm import WatsonxLLM return WatsonxLLM def _import_writer() -> Any: from langchain_community.llms.writer import Writer return Writer def _import_xinference() -> Any: from langchain_community.llms.xinference import Xinference return Xinference def _import_yandex_gpt() -> Any: from langchain_community.llms.yandex import YandexGPT return YandexGPT def _import_volcengine_maas() -> Any: from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM return VolcEngineMaasLLM def __getattr__(name: str) -> Any: from langchain_community import llms # If not in interactive env, raise warning. if not is_interactive_env(): warnings.warn( "Importing LLMs from langchain is deprecated. Importing from " "langchain will no longer be supported as of langchain==0.2.0. " "Please import from langchain-community instead:\n\n" f"`from langchain_community.llms import {name}`.\n\n" "To install langchain-community run `pip install -U langchain-community`.", category=LangChainDeprecationWarning, ) if name == "type_to_cls_dict": # for backwards compatibility type_to_cls_dict: Dict[str, Type[BaseLLM]] = { k: v() for k, v in get_type_to_cls_dict().items() } return type_to_cls_dict else: return getattr(llms, name) __all__ = [ "AI21", "AlephAlpha", "AmazonAPIGateway", "Anthropic", "Anyscale", "Arcee", "Aviary", "AzureMLOnlineEndpoint", "AzureOpenAI", "Banana", "Baseten", "Beam", "Bedrock", "CTransformers", "CTranslate2", "CerebriumAI", "ChatGLM", "Clarifai", "Cohere", "Databricks", "DeepInfra", "DeepSparse", "EdenAI", "FakeListLLM", "Fireworks", "ForefrontAI", "GigaChat", "GPT4All", "GooglePalm", "GooseAI", "GradientLLM", "HuggingFaceEndpoint", "HuggingFaceHub", "HuggingFacePipeline", "HuggingFaceTextGenInference", "HumanInputLLM", "KoboldApiLLM", "LlamaCpp", "TextGen", "ManifestWrapper", "Minimax", "MlflowAIGateway", "Modal", "MosaicML", "Nebula", "NIBittensorLLM", "NLPCloud", "Ollama", "OpenAI", "OpenAIChat", "OpenLLM", "OpenLM", "PaiEasEndpoint", "Petals", "PipelineAI", "Predibase", "PredictionGuard", "PromptLayerOpenAI", "PromptLayerOpenAIChat", "OpaquePrompts", "RWKV", "Replicate", "SagemakerEndpoint", "SelfHostedHuggingFaceLLM", "SelfHostedPipeline", "StochasticAI", "TitanTakeoff", "TitanTakeoffPro", "Tongyi", "VertexAI", "VertexAIModelGarden", "VLLM", "VLLMOpenAI", "WatsonxLLM", "Writer", "OctoAIEndpoint", "Xinference", "JavelinAIGateway", "QianfanLLMEndpoint", "YandexGPT", "VolcEngineMaasLLM", ] def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]: return { "ai21": _import_ai21, "aleph_alpha": _import_aleph_alpha, "amazon_api_gateway": _import_amazon_api_gateway, "amazon_bedrock": _import_bedrock, "anthropic": _import_anthropic, "anyscale": _import_anyscale, "arcee": _import_arcee, "aviary": _import_aviary, "azure": _import_azure_openai, "azureml_endpoint": _import_azureml_endpoint, "bananadev": _import_bananadev, "baseten": _import_baseten, "beam": _import_beam, "cerebriumai": _import_cerebriumai, "chat_glm": _import_chatglm, "clarifai": _import_clarifai, "cohere": _import_cohere, "ctransformers": _import_ctransformers, "ctranslate2": _import_ctranslate2, "databricks": _import_databricks, "databricks-chat": _import_databricks_chat, "deepinfra": _import_deepinfra, "deepsparse": _import_deepsparse, "edenai": _import_edenai, "fake-list": _import_fake, "forefrontai": _import_forefrontai, "giga-chat-model": _import_gigachat, "google_palm": _import_google_palm, "gooseai": _import_gooseai, "gradient": _import_gradient_ai, "gpt4all": _import_gpt4all, "huggingface_endpoint": _import_huggingface_endpoint, "huggingface_hub": _import_huggingface_hub, "huggingface_pipeline": _import_huggingface_pipeline, "huggingface_textgen_inference": _import_huggingface_text_gen_inference, "human-input": _import_human, "koboldai": _import_koboldai, "llamacpp": _import_llamacpp, "textgen": _import_textgen, "minimax": _import_minimax, "mlflow": _import_mlflow, "mlflow-chat": _import_mlflow_chat, "mlflow-ai-gateway": _import_mlflow_ai_gateway, "modal": _import_modal, "mosaic": _import_mosaicml, "nebula": _import_symblai_nebula, "nibittensor": _import_bittensor, "nlpcloud": _import_nlpcloud, "ollama": _import_ollama, "openai": _import_openai, "openlm": _import_openlm, "pai_eas_endpoint": _import_pai_eas_endpoint, "petals": _import_petals, "pipelineai": _import_pipelineai, "predibase": _import_predibase, "opaqueprompts": _import_opaqueprompts, "replicate": _import_replicate, "rwkv": _import_rwkv, "sagemaker_endpoint": _import_sagemaker_endpoint, "self_hosted": _import_self_hosted, "self_hosted_hugging_face": _import_self_hosted_hugging_face, "stochasticai": _import_stochasticai, "together": _import_together, "tongyi": _import_tongyi, "titan_takeoff": _import_titan_takeoff, "titan_takeoff_pro": _import_titan_takeoff_pro, "vertexai": _import_vertex, "vertexai_model_garden": _import_vertex_model_garden, "openllm": _import_openllm, "openllm_client": _import_openllm, "vllm": _import_vllm, "vllm_openai": _import_vllm_openai, "watsonxllm": _import_watsonxllm, "writer": _import_writer, "xinference": _import_xinference, "javelin-ai-gateway": _import_javelin_ai_gateway, "qianfan_endpoint": _import_baidu_qianfan_endpoint, "yandex_gpt": _import_yandex_gpt, "VolcEngineMaasLLM": _import_volcengine_maas, }
[ "langchain.utils.interactive_env.is_interactive_env" ]
[((11338, 11358), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (11356, 11358), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((11368, 11729), 'warnings.warn', 'warnings.warn', (['f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (11381, 11729), False, 'import warnings\n')]
import logging from abc import ABC, abstractmethod from itertools import islice from typing import Any, Dict, Iterable, List, Optional from langchain_community.utilities.redis import get_client from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, get_buffer_string from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, ENTITY_SUMMARIZATION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key logger = logging.getLogger(__name__) class BaseEntityStore(BaseModel, ABC): """Abstract base class for Entity store.""" @abstractmethod def get(self, key: str, default: Optional[str] = None) -> Optional[str]: """Get entity value from store.""" pass @abstractmethod def set(self, key: str, value: Optional[str]) -> None: """Set entity value in store.""" pass @abstractmethod def delete(self, key: str) -> None: """Delete entity value from store.""" pass @abstractmethod def exists(self, key: str) -> bool: """Check if entity exists in store.""" pass @abstractmethod def clear(self) -> None: """Delete all entities from store.""" pass class InMemoryEntityStore(BaseEntityStore): """In-memory Entity store.""" store: Dict[str, Optional[str]] = {} def get(self, key: str, default: Optional[str] = None) -> Optional[str]: return self.store.get(key, default) def set(self, key: str, value: Optional[str]) -> None: self.store[key] = value def delete(self, key: str) -> None: del self.store[key] def exists(self, key: str) -> bool: return key in self.store def clear(self) -> None: return self.store.clear() class UpstashRedisEntityStore(BaseEntityStore): """Upstash Redis backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ def __init__( self, session_id: str = "default", url: str = "", token: str = "", key_prefix: str = "memory_store", ttl: Optional[int] = 60 * 60 * 24, recall_ttl: Optional[int] = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): try: from upstash_redis import Redis except ImportError: raise ImportError( "Could not import upstash_redis python package. " "Please install it with `pip install upstash_redis`." ) super().__init__(*args, **kwargs) try: self.redis_client = Redis(url=url, token=token) except Exception: logger.error("Upstash Redis instance could not be initiated.") self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: return f"{self.key_prefix}:{self.session_id}" def get(self, key: str, default: Optional[str] = None) -> Optional[str]: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug(f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'") return res def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( f"Redis MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" ) def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 def clear(self) -> None: def scan_and_delete(cursor: int) -> int: cursor, keys_to_delete = self.redis_client.scan( cursor, f"{self.full_key_prefix}:*" ) self.redis_client.delete(*keys_to_delete) return cursor cursor = scan_and_delete(0) while cursor != 0: scan_and_delete(cursor) class RedisEntityStore(BaseEntityStore): """Redis-backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ redis_client: Any session_id: str = "default" key_prefix: str = "memory_store" ttl: Optional[int] = 60 * 60 * 24 recall_ttl: Optional[int] = 60 * 60 * 24 * 3 def __init__( self, session_id: str = "default", url: str = "redis://localhost:6379/0", key_prefix: str = "memory_store", ttl: Optional[int] = 60 * 60 * 24, recall_ttl: Optional[int] = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): try: import redis except ImportError: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) super().__init__(*args, **kwargs) try: self.redis_client = get_client(redis_url=url, decode_responses=True) except redis.exceptions.ConnectionError as error: logger.error(error) self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: return f"{self.key_prefix}:{self.session_id}" def get(self, key: str, default: Optional[str] = None) -> Optional[str]: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'") return res def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" ) def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 def clear(self) -> None: # iterate a list in batches of size batch_size def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]: iterator = iter(iterable) while batch := list(islice(iterator, batch_size)): yield batch for keybatch in batched( self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500 ): self.redis_client.delete(*keybatch) class SQLiteEntityStore(BaseEntityStore): """SQLite-backed Entity store""" session_id: str = "default" table_name: str = "memory_store" conn: Any = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def __init__( self, session_id: str = "default", db_file: str = "entities.db", table_name: str = "memory_store", *args: Any, **kwargs: Any, ): try: import sqlite3 except ImportError: raise ImportError( "Could not import sqlite3 python package. " "Please install it with `pip install sqlite3`." ) super().__init__(*args, **kwargs) self.conn = sqlite3.connect(db_file) self.session_id = session_id self.table_name = table_name self._create_table_if_not_exists() @property def full_table_name(self) -> str: return f"{self.table_name}_{self.session_id}" def _create_table_if_not_exists(self) -> None: create_table_query = f""" CREATE TABLE IF NOT EXISTS {self.full_table_name} ( key TEXT PRIMARY KEY, value TEXT ) """ with self.conn: self.conn.execute(create_table_query) def get(self, key: str, default: Optional[str] = None) -> Optional[str]: query = f""" SELECT value FROM {self.full_table_name} WHERE key = ? """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() if result is not None: value = result[0] return value return default def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) query = f""" INSERT OR REPLACE INTO {self.full_table_name} (key, value) VALUES (?, ?) """ with self.conn: self.conn.execute(query, (key, value)) def delete(self, key: str) -> None: query = f""" DELETE FROM {self.full_table_name} WHERE key = ? """ with self.conn: self.conn.execute(query, (key,)) def exists(self, key: str) -> bool: query = f""" SELECT 1 FROM {self.full_table_name} WHERE key = ? LIMIT 1 """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() return result is not None def clear(self) -> None: query = f""" DELETE FROM {self.full_table_name} """ with self.conn: self.conn.execute(query) class ConversationEntityMemory(BaseChatMemory): """Entity extractor & summarizer memory. Extracts named entities from the recent chat history and generates summaries. With a swappable entity store, persisting entities across conversations. Defaults to an in-memory entity store, and can be swapped out for a Redis, SQLite, or other entity store. """ human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT # Cache of recently detected entity names, if any # It is updated when load_memory_variables is called: entity_cache: List[str] = [] # Number of recent message pairs to consider when updating entities: k: int = 3 chat_history_key: str = "history" # Store to manage entity-related data: entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore) @property def buffer(self) -> List[BaseMessage]: """Access chat memory messages.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return ["entities", self.chat_history_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """ Returns chat history and all generated entities with summaries if available, and updates or clears the recent entity cache. New entity name can be found when calling this method, before the entity summaries are generated, so the entity cache values may be empty if no entity descriptions are generated yet. """ # Create an LLMChain for predicting entity names from the recent chat history: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key # Extract an arbitrary window of the last message pairs from # the chat history, where the hyperparameter k is the # number of message pairs: buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) # Generates a comma-separated list of named entities, # e.g. "Jane, White House, UFO" # or "NONE" if no named entities are extracted: output = chain.predict( history=buffer_string, input=inputs[prompt_input_key], ) # If no named entities are extracted, assigns an empty list. if output.strip() == "NONE": entities = [] else: # Make a list of the extracted entities: entities = [w.strip() for w in output.split(",")] # Make a dictionary of entities with summary if exists: entity_summaries = {} for entity in entities: entity_summaries[entity] = self.entity_store.get(entity, "") # Replaces the entity name cache with the most recently discussed entities, # or if no entities were extracted, clears the cache: self.entity_cache = entities # Should we return as message objects or as a string? if self.return_messages: # Get last `k` pair of chat messages: buffer: Any = self.buffer[-self.k * 2 :] else: # Reuse the string we made earlier: buffer = buffer_string return { self.chat_history_key: buffer, "entities": entity_summaries, } def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """ Save context from this conversation history to the entity store. Generates a summary for each entity in the entity cache by prompting the model, and saves these summaries to the entity store. """ super().save_context(inputs, outputs) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key # Extract an arbitrary window of the last message pairs from # the chat history, where the hyperparameter k is the # number of message pairs: buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) input_data = inputs[prompt_input_key] # Create an LLMChain for predicting entity summarization from the context chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt) # Generate new summaries for entities and save them in the entity store for entity in self.entity_cache: # Get existing summary if it exists existing_summary = self.entity_store.get(entity, "") output = chain.predict( summary=existing_summary, entity=entity, history=buffer_string, input=input_data, ) # Save the updated summary to the entity store self.entity_store.set(entity, output.strip()) def clear(self) -> None: """Clear memory contents.""" self.chat_memory.clear() self.entity_cache.clear() self.entity_store.clear()
[ "langchain_core.messages.get_buffer_string", "langchain.chains.llm.LLMChain", "langchain.memory.utils.get_prompt_input_key", "langchain_core.pydantic_v1.Field", "langchain_community.utilities.redis.get_client" ]
[((701, 728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (718, 728), False, 'import logging\n'), ((10994, 11036), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'InMemoryEntityStore'}), '(default_factory=InMemoryEntityStore)\n', (10999, 11036), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((8049, 8073), 'sqlite3.connect', 'sqlite3.connect', (['db_file'], {}), '(db_file)\n', (8064, 8073), False, 'import sqlite3\n'), ((11938, 11998), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (11946, 11998), False, 'from langchain.chains.llm import LLMChain\n'), ((12369, 12475), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (12386, 12475), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14600, 14706), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (14617, 14706), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14897, 14960), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_summarization_prompt'}), '(llm=self.llm, prompt=self.entity_summarization_prompt)\n', (14905, 14960), False, 'from langchain.chains.llm import LLMChain\n'), ((2881, 2908), 'upstash_redis.Redis', 'Redis', ([], {'url': 'url', 'token': 'token'}), '(url=url, token=token)\n', (2886, 2908), False, 'from upstash_redis import Redis\n'), ((5539, 5587), 'langchain_community.utilities.redis.get_client', 'get_client', ([], {'redis_url': 'url', 'decode_responses': '(True)'}), '(redis_url=url, decode_responses=True)\n', (5549, 5587), False, 'from langchain_community.utilities.redis import get_client\n'), ((12066, 12117), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (12086, 12117), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((14297, 14348), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (14317, 14348), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((7038, 7066), 'itertools.islice', 'islice', (['iterator', 'batch_size'], {}), '(iterator, batch_size)\n', (7044, 7066), False, 'from itertools import islice\n')]
"""**Tools** are classes that an Agent uses to interact with the world. Each tool has a **description**. Agent uses the description to choose the right tool for the job. **Class hierarchy:** .. code-block:: ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool <name> # Examples: BraveSearch, HumanInputRun **Main helpers:** .. code-block:: CallbackManagerForToolRun, AsyncCallbackManagerForToolRun """ import warnings from typing import Any from langchain_core._api import LangChainDeprecationWarning from langchain_core.tools import BaseTool, StructuredTool, Tool, tool from langchain.utils.interactive_env import is_interactive_env # Used for internal purposes _DEPRECATED_TOOLS = {"PythonAstREPLTool", "PythonREPLTool"} def _import_python_tool_PythonAstREPLTool() -> Any: raise ImportError( "This tool has been moved to langchain experiment. " "This tool has access to a python REPL. " "For best practices make sure to sandbox this tool. " "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " "To keep using this code as is, install langchain experimental and " "update relevant imports replacing 'langchain' with 'langchain_experimental'" ) def _import_python_tool_PythonREPLTool() -> Any: raise ImportError( "This tool has been moved to langchain experiment. " "This tool has access to a python REPL. " "For best practices make sure to sandbox this tool. " "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " "To keep using this code as is, install langchain experimental and " "update relevant imports replacing 'langchain' with 'langchain_experimental'" ) def __getattr__(name: str) -> Any: if name == "PythonAstREPLTool": return _import_python_tool_PythonAstREPLTool() elif name == "PythonREPLTool": return _import_python_tool_PythonREPLTool() else: from langchain_community import tools # If not in interactive env, raise warning. if not is_interactive_env(): warnings.warn( "Importing tools from langchain is deprecated. Importing from " "langchain will no longer be supported as of langchain==0.2.0. " "Please import from langchain-community instead:\n\n" f"`from langchain_community.tools import {name}`.\n\n" "To install langchain-community run " "`pip install -U langchain-community`.", category=LangChainDeprecationWarning, ) return getattr(tools, name) __all__ = [ "AINAppOps", "AINOwnerOps", "AINRuleOps", "AINTransfer", "AINValueOps", "AIPluginTool", "APIOperation", "ArxivQueryRun", "AzureCogsFormRecognizerTool", "AzureCogsImageAnalysisTool", "AzureCogsSpeech2TextTool", "AzureCogsText2SpeechTool", "AzureCogsTextAnalyticsHealthTool", "BaseGraphQLTool", "BaseRequestsTool", "BaseSQLDatabaseTool", "BaseSparkSQLTool", "BaseTool", "BearlyInterpreterTool", "BingSearchResults", "BingSearchRun", "BraveSearch", "ClickTool", "CopyFileTool", "CurrentWebPageTool", "DeleteFileTool", "DuckDuckGoSearchResults", "DuckDuckGoSearchRun", "E2BDataAnalysisTool", "EdenAiExplicitImageTool", "EdenAiObjectDetectionTool", "EdenAiParsingIDTool", "EdenAiParsingInvoiceTool", "EdenAiSpeechToTextTool", "EdenAiTextModerationTool", "EdenAiTextToSpeechTool", "EdenaiTool", "ElevenLabsText2SpeechTool", "ExtractHyperlinksTool", "ExtractTextTool", "FileSearchTool", "GetElementsTool", "GmailCreateDraft", "GmailGetMessage", "GmailGetThread", "GmailSearch", "GmailSendMessage", "GoogleCloudTextToSpeechTool", "GooglePlacesTool", "GoogleSearchResults", "GoogleSearchRun", "GoogleSerperResults", "GoogleSerperRun", "SearchAPIResults", "SearchAPIRun", "HumanInputRun", "IFTTTWebhook", "InfoPowerBITool", "InfoSQLDatabaseTool", "InfoSparkSQLTool", "JiraAction", "JsonGetValueTool", "JsonListKeysTool", "ListDirectoryTool", "ListPowerBITool", "ListSQLDatabaseTool", "ListSparkSQLTool", "MerriamWebsterQueryRun", "MetaphorSearchResults", "MoveFileTool", "NasaAction", "NavigateBackTool", "NavigateTool", "O365CreateDraftMessage", "O365SearchEmails", "O365SearchEvents", "O365SendEvent", "O365SendMessage", "OpenAPISpec", "OpenWeatherMapQueryRun", "PubmedQueryRun", "RedditSearchRun", "QueryCheckerTool", "QueryPowerBITool", "QuerySQLCheckerTool", "QuerySQLDataBaseTool", "QuerySparkSQLTool", "ReadFileTool", "RequestsDeleteTool", "RequestsGetTool", "RequestsPatchTool", "RequestsPostTool", "RequestsPutTool", "SteamWebAPIQueryRun", "SceneXplainTool", "SearxSearchResults", "SearxSearchRun", "ShellTool", "SlackGetChannel", "SlackGetMessage", "SlackScheduleMessage", "SlackSendMessage", "SleepTool", "StdInInquireTool", "StackExchangeTool", "SteamshipImageGenerationTool", "StructuredTool", "Tool", "VectorStoreQATool", "VectorStoreQAWithSourcesTool", "WikipediaQueryRun", "WolframAlphaQueryRun", "WriteFileTool", "YahooFinanceNewsTool", "YouTubeSearchTool", "ZapierNLAListActions", "ZapierNLARunAction", "format_tool_to_openai_function", "tool", ]
[ "langchain.utils.interactive_env.is_interactive_env" ]
[((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (2198, 2548), False, 'import warnings\n')]
from functools import partial from typing import Optional from langchain_core.callbacks.manager import ( Callbacks, ) from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.retrievers import BaseRetriever from langchain.tools import Tool class RetrieverInput(BaseModel): """Input to the retriever.""" query: str = Field(description="query to look up in retriever") def _get_relevant_documents( query: str, retriever: BaseRetriever, document_prompt: BasePromptTemplate, document_separator: str, callbacks: Callbacks = None, ) -> str: docs = retriever.get_relevant_documents(query, callbacks=callbacks) return document_separator.join( format_document(doc, document_prompt) for doc in docs ) async def _aget_relevant_documents( query: str, retriever: BaseRetriever, document_prompt: BasePromptTemplate, document_separator: str, callbacks: Callbacks = None, ) -> str: docs = await retriever.aget_relevant_documents(query, callbacks=callbacks) return document_separator.join( format_document(doc, document_prompt) for doc in docs ) def create_retriever_tool( retriever: BaseRetriever, name: str, description: str, *, document_prompt: Optional[BasePromptTemplate] = None, document_separator: str = "\n\n", ) -> Tool: """Create a tool to do retrieval of documents. Args: retriever: The retriever to use for the retrieval name: The name for the tool. This will be passed to the language model, so should be unique and somewhat descriptive. description: The description for the tool. This will be passed to the language model, so should be descriptive. Returns: Tool class to pass to an agent """ document_prompt = document_prompt or PromptTemplate.from_template("{page_content}") func = partial( _get_relevant_documents, retriever=retriever, document_prompt=document_prompt, document_separator=document_separator, ) afunc = partial( _aget_relevant_documents, retriever=retriever, document_prompt=document_prompt, document_separator=document_separator, ) return Tool( name=name, description=description, func=func, coroutine=afunc, args_schema=RetrieverInput, )
[ "langchain.tools.Tool", "langchain_core.prompts.format_document", "langchain_core.prompts.PromptTemplate.from_template", "langchain_core.pydantic_v1.Field" ]
[((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_get_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2003, 2126), False, 'from functools import partial\n'), ((2173, 2304), 'functools.partial', 'partial', (['_aget_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_aget_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2180, 2304), False, 'from functools import partial\n'), ((2350, 2450), 'langchain.tools.Tool', 'Tool', ([], {'name': 'name', 'description': 'description', 'func': 'func', 'coroutine': 'afunc', 'args_schema': 'RetrieverInput'}), '(name=name, description=description, func=func, coroutine=afunc,\n args_schema=RetrieverInput)\n', (2354, 2450), False, 'from langchain.tools import Tool\n'), ((1938, 1984), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""{page_content}"""'], {}), "('{page_content}')\n", (1966, 1984), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((796, 833), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (811, 833), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((1176, 1213), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (1191, 1213), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n')]
from typing import Any, List, Sequence, Tuple, Union from langchain_core._api import deprecated from langchain_core.agents import AgentAction, AgentFinish from langchain_core.callbacks import Callbacks from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts.base import BasePromptTemplate from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate from langchain_core.runnables import Runnable, RunnablePassthrough from langchain_core.tools import BaseTool from langchain.agents.agent import BaseSingleActionAgent from langchain.agents.format_scratchpad import format_xml from langchain.agents.output_parsers import XMLAgentOutputParser from langchain.agents.xml.prompt import agent_instructions from langchain.chains.llm import LLMChain from langchain.tools.render import ToolsRenderer, render_text_description @deprecated("0.1.0", alternative="create_xml_agent", removal="0.2.0") class XMLAgent(BaseSingleActionAgent): """Agent that uses XML tags. Args: tools: list of tools the agent can choose from llm_chain: The LLMChain to call to predict the next action Examples: .. code-block:: python from langchain.agents import XMLAgent from langchain tools = ... model = """ tools: List[BaseTool] """List of tools this agent has access to.""" llm_chain: LLMChain """Chain to use to predict action.""" @property def input_keys(self) -> List[str]: return ["input"] @staticmethod def get_default_prompt() -> ChatPromptTemplate: base_prompt = ChatPromptTemplate.from_template(agent_instructions) return base_prompt + AIMessagePromptTemplate.from_template( "{intermediate_steps}" ) @staticmethod def get_default_output_parser() -> XMLAgentOutputParser: return XMLAgentOutputParser() def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: log = "" for action, observation in intermediate_steps: log += ( f"<tool>{action.tool}</tool><tool_input>{action.tool_input}" f"</tool_input><observation>{observation}</observation>" ) tools = "" for tool in self.tools: tools += f"{tool.name}: {tool.description}\n" inputs = { "intermediate_steps": log, "tools": tools, "question": kwargs["input"], "stop": ["</tool_input>", "</final_answer>"], } response = self.llm_chain(inputs, callbacks=callbacks) return response[self.llm_chain.output_key] async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: log = "" for action, observation in intermediate_steps: log += ( f"<tool>{action.tool}</tool><tool_input>{action.tool_input}" f"</tool_input><observation>{observation}</observation>" ) tools = "" for tool in self.tools: tools += f"{tool.name}: {tool.description}\n" inputs = { "intermediate_steps": log, "tools": tools, "question": kwargs["input"], "stop": ["</tool_input>", "</final_answer>"], } response = await self.llm_chain.acall(inputs, callbacks=callbacks) return response[self.llm_chain.output_key] def create_xml_agent( llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: BasePromptTemplate, tools_renderer: ToolsRenderer = render_text_description, ) -> Runnable: """Create an agent that uses XML to format its logic. Args: llm: LLM to use as the agent. tools: Tools this agent has access to. prompt: The prompt to use, must have input keys `tools`: contains descriptions for each tool. `agent_scratchpad`: contains previous agent actions and tool outputs. tools_renderer: This controls how the tools are converted into a string and then passed into the LLM. Default is `render_text_description`. Returns: A Runnable sequence representing an agent. It takes as input all the same input variables as the prompt passed in does. It returns as output either an AgentAction or AgentFinish. Example: .. code-block:: python from langchain import hub from langchain_community.chat_models import ChatAnthropic from langchain.agents import AgentExecutor, create_xml_agent prompt = hub.pull("hwchase17/xml-agent-convo") model = ChatAnthropic() tools = ... agent = create_xml_agent(model, tools, prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) agent_executor.invoke({"input": "hi"}) # Use with chat history from langchain_core.messages import AIMessage, HumanMessage agent_executor.invoke( { "input": "what's my name?", # Notice that chat_history is a string # since this prompt is aimed at LLMs, not chat models "chat_history": "Human: My name is Bob\\nAI: Hello Bob!", } ) Prompt: The prompt must have input keys: * `tools`: contains descriptions for each tool. * `agent_scratchpad`: contains previous agent actions and tool outputs as an XML string. Here's an example: .. code-block:: python from langchain_core.prompts import PromptTemplate template = '''You are a helpful assistant. Help the user answer any questions. You have access to the following tools: {tools} In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation> For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond: <tool>search</tool><tool_input>weather in SF</tool_input> <observation>64 degrees</observation> When you are done, respond with a final answer between <final_answer></final_answer>. For example: <final_answer>The weather in SF is 64 degrees</final_answer> Begin! Previous Conversation: {chat_history} Question: {input} {agent_scratchpad}''' prompt = PromptTemplate.from_template(template) """ # noqa: E501 missing_vars = {"tools", "agent_scratchpad"}.difference(prompt.input_variables) if missing_vars: raise ValueError(f"Prompt missing required variables: {missing_vars}") prompt = prompt.partial( tools=tools_renderer(list(tools)), ) llm_with_stop = llm.bind(stop=["</tool_input>"]) agent = ( RunnablePassthrough.assign( agent_scratchpad=lambda x: format_xml(x["intermediate_steps"]), ) | prompt | llm_with_stop | XMLAgentOutputParser() ) return agent
[ "langchain_core.prompts.chat.AIMessagePromptTemplate.from_template", "langchain.agents.format_scratchpad.format_xml", "langchain.agents.output_parsers.XMLAgentOutputParser", "langchain_core._api.deprecated", "langchain_core.prompts.chat.ChatPromptTemplate.from_template" ]
[((875, 943), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_xml_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_xml_agent', removal='0.2.0')\n", (885, 943), False, 'from langchain_core._api import deprecated\n'), ((1644, 1696), 'langchain_core.prompts.chat.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['agent_instructions'], {}), '(agent_instructions)\n', (1676, 1696), False, 'from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate\n'), ((1905, 1927), 'langchain.agents.output_parsers.XMLAgentOutputParser', 'XMLAgentOutputParser', ([], {}), '()\n', (1925, 1927), False, 'from langchain.agents.output_parsers import XMLAgentOutputParser\n'), ((7448, 7470), 'langchain.agents.output_parsers.XMLAgentOutputParser', 'XMLAgentOutputParser', ([], {}), '()\n', (7468, 7470), False, 'from langchain.agents.output_parsers import XMLAgentOutputParser\n'), ((1726, 1787), 'langchain_core.prompts.chat.AIMessagePromptTemplate.from_template', 'AIMessagePromptTemplate.from_template', (['"""{intermediate_steps}"""'], {}), "('{intermediate_steps}')\n", (1763, 1787), False, 'from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate\n'), ((7350, 7385), 'langchain.agents.format_scratchpad.format_xml', 'format_xml', (["x['intermediate_steps']"], {}), "(x['intermediate_steps'])\n", (7360, 7385), False, 'from langchain.agents.format_scratchpad import format_xml\n')]
"""**Graphs** provide a natural language interface to graph databases.""" import warnings from typing import Any from langchain_core._api import LangChainDeprecationWarning from langchain.utils.interactive_env import is_interactive_env def __getattr__(name: str) -> Any: from langchain_community import graphs # If not in interactive env, raise warning. if not is_interactive_env(): warnings.warn( "Importing graphs from langchain is deprecated. Importing from " "langchain will no longer be supported as of langchain==0.2.0. " "Please import from langchain-community instead:\n\n" f"`from langchain_community.graphs import {name}`.\n\n" "To install langchain-community run `pip install -U langchain-community`.", category=LangChainDeprecationWarning, ) return getattr(graphs, name) __all__ = [ "MemgraphGraph", "NetworkxEntityGraph", "Neo4jGraph", "NebulaGraph", "NeptuneGraph", "KuzuGraph", "HugeGraph", "RdfGraph", "ArangoGraph", "FalkorDBGraph", ]
[ "langchain.utils.interactive_env.is_interactive_env" ]
[((378, 398), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (396, 398), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((408, 773), 'warnings.warn', 'warnings.warn', (['f"""Importing graphs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.graphs import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing graphs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.graphs import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (421, 773), False, 'import warnings\n')]
"""Chain that makes API calls and summarizes the responses to answer a question.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Sequence, Tuple from urllib.parse import urlparse from langchain_community.utilities.requests import TextRequestsWrapper from langchain_core.callbacks import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, ) from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Field, root_validator from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT from langchain.chains.base import Chain from langchain.chains.llm import LLMChain def _extract_scheme_and_domain(url: str) -> Tuple[str, str]: """Extract the scheme + domain from a given URL. Args: url (str): The input URL. Returns: return a 2-tuple of scheme and domain """ parsed_uri = urlparse(url) return parsed_uri.scheme, parsed_uri.netloc def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool: """Check if a URL is in the allowed domains. Args: url (str): The input URL. limit_to_domains (Sequence[str]): The allowed domains. Returns: bool: True if the URL is in the allowed domains, False otherwise. """ scheme, domain = _extract_scheme_and_domain(url) for allowed_domain in limit_to_domains: allowed_scheme, allowed_domain = _extract_scheme_and_domain(allowed_domain) if scheme == allowed_scheme and domain == allowed_domain: return True return False class APIChain(Chain): """Chain that makes API calls and summarizes the responses to answer a question. *Security Note*: This API chain uses the requests toolkit to make GET, POST, PATCH, PUT, and DELETE requests to an API. Exercise care in who is allowed to use this chain. If exposing to end users, consider that users will be able to make arbitrary requests on behalf of the server hosting the code. For example, users could ask the server to make a request to a private API that is only accessible from the server. Control access to who can submit issue requests using this toolkit and what network access it has. See https://python.langchain.com/docs/security for more information. """ api_request_chain: LLMChain api_answer_chain: LLMChain requests_wrapper: TextRequestsWrapper = Field(exclude=True) api_docs: str question_key: str = "question" #: :meta private: output_key: str = "output" #: :meta private: limit_to_domains: Optional[Sequence[str]] """Use to limit the domains that can be accessed by the API chain. * For example, to limit to just the domain `https://www.example.com`, set `limit_to_domains=["https://www.example.com"]`. * The default value is an empty tuple, which means that no domains are allowed by default. By design this will raise an error on instantiation. * Use a None if you want to allow all domains by default -- this is not recommended for security reasons, as it would allow malicious users to make requests to arbitrary URLS including internal APIs accessible from the server. """ @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.question_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ return [self.output_key] @root_validator(pre=True) def validate_api_request_prompt(cls, values: Dict) -> Dict: """Check that api request prompt expects the right variables.""" input_vars = values["api_request_chain"].prompt.input_variables expected_vars = {"question", "api_docs"} if set(input_vars) != expected_vars: raise ValueError( f"Input variables should be {expected_vars}, got {input_vars}" ) return values @root_validator(pre=True) def validate_limit_to_domains(cls, values: Dict) -> Dict: """Check that allowed domains are valid.""" if "limit_to_domains" not in values: raise ValueError( "You must specify a list of domains to limit access using " "`limit_to_domains`" ) if not values["limit_to_domains"] and values["limit_to_domains"] is not None: raise ValueError( "Please provide a list of domains to limit access using " "`limit_to_domains`." ) return values @root_validator(pre=True) def validate_api_answer_prompt(cls, values: Dict) -> Dict: """Check that api answer prompt expects the right variables.""" input_vars = values["api_answer_chain"].prompt.input_variables expected_vars = {"question", "api_docs", "api_url", "api_response"} if set(input_vars) != expected_vars: raise ValueError( f"Input variables should be {expected_vars}, got {input_vars}" ) return values def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() question = inputs[self.question_key] api_url = self.api_request_chain.predict( question=question, api_docs=self.api_docs, callbacks=_run_manager.get_child(), ) _run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose) api_url = api_url.strip() if self.limit_to_domains and not _check_in_allowed_domain( api_url, self.limit_to_domains ): raise ValueError( f"{api_url} is not in the allowed domains: {self.limit_to_domains}" ) api_response = self.requests_wrapper.get(api_url) _run_manager.on_text( str(api_response), color="yellow", end="\n", verbose=self.verbose ) answer = self.api_answer_chain.predict( question=question, api_docs=self.api_docs, api_url=api_url, api_response=api_response, callbacks=_run_manager.get_child(), ) return {self.output_key: answer} async def _acall( self, inputs: Dict[str, Any], run_manager: Optional[AsyncCallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() question = inputs[self.question_key] api_url = await self.api_request_chain.apredict( question=question, api_docs=self.api_docs, callbacks=_run_manager.get_child(), ) await _run_manager.on_text( api_url, color="green", end="\n", verbose=self.verbose ) api_url = api_url.strip() if self.limit_to_domains and not _check_in_allowed_domain( api_url, self.limit_to_domains ): raise ValueError( f"{api_url} is not in the allowed domains: {self.limit_to_domains}" ) api_response = await self.requests_wrapper.aget(api_url) await _run_manager.on_text( str(api_response), color="yellow", end="\n", verbose=self.verbose ) answer = await self.api_answer_chain.apredict( question=question, api_docs=self.api_docs, api_url=api_url, api_response=api_response, callbacks=_run_manager.get_child(), ) return {self.output_key: answer} @classmethod def from_llm_and_api_docs( cls, llm: BaseLanguageModel, api_docs: str, headers: Optional[dict] = None, api_url_prompt: BasePromptTemplate = API_URL_PROMPT, api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT, limit_to_domains: Optional[Sequence[str]] = tuple(), **kwargs: Any, ) -> APIChain: """Load chain from just an LLM and the api docs.""" get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt) requests_wrapper = TextRequestsWrapper(headers=headers) get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt) return cls( api_request_chain=get_request_chain, api_answer_chain=get_answer_chain, requests_wrapper=requests_wrapper, api_docs=api_docs, limit_to_domains=limit_to_domains, **kwargs, ) @property def _chain_type(self) -> str: return "api_chain"
[ "langchain.chains.llm.LLMChain", "langchain_core.callbacks.AsyncCallbackManagerForChainRun.get_noop_manager", "langchain_community.utilities.requests.TextRequestsWrapper", "langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager", "langchain_core.pydantic_v1.root_validator", "langchain_core.pydantic_v1.Field" ]
[((979, 992), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (987, 992), False, 'from urllib.parse import urlparse\n'), ((2555, 2574), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'exclude': '(True)'}), '(exclude=True)\n', (2560, 2574), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((3687, 3711), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (3701, 3711), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((4166, 4190), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (4180, 4190), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((4777, 4801), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (4791, 4801), False, 'from langchain_core.pydantic_v1 import Field, root_validator\n'), ((8392, 8432), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_url_prompt'}), '(llm=llm, prompt=api_url_prompt)\n', (8400, 8432), False, 'from langchain.chains.llm import LLMChain\n'), ((8460, 8496), 'langchain_community.utilities.requests.TextRequestsWrapper', 'TextRequestsWrapper', ([], {'headers': 'headers'}), '(headers=headers)\n', (8479, 8496), False, 'from langchain_community.utilities.requests import TextRequestsWrapper\n'), ((8524, 8569), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'api_response_prompt'}), '(llm=llm, prompt=api_response_prompt)\n', (8532, 8569), False, 'from langchain.chains.llm import LLMChain\n'), ((5465, 5510), 'langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (5508, 5510), False, 'from langchain_core.callbacks import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n'), ((6760, 6810), 'langchain_core.callbacks.AsyncCallbackManagerForChainRun.get_noop_manager', 'AsyncCallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (6808, 6810), False, 'from langchain_core.callbacks import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun\n')]
"""Hypothetical Document Embeddings. https://arxiv.org/abs/2212.10496 """ from __future__ import annotations from typing import Any, Dict, List, Optional import numpy as np from langchain_core.callbacks import CallbackManagerForChainRun from langchain_core.embeddings import Embeddings from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Extra from langchain.chains.base import Chain from langchain.chains.hyde.prompts import PROMPT_MAP from langchain.chains.llm import LLMChain class HypotheticalDocumentEmbedder(Chain, Embeddings): """Generate hypothetical document for query, and then embed that. Based on https://arxiv.org/abs/2212.10496 """ base_embeddings: Embeddings llm_chain: LLMChain class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Input keys for Hyde's LLM chain.""" return self.llm_chain.input_keys @property def output_keys(self) -> List[str]: """Output keys for Hyde's LLM chain.""" return self.llm_chain.output_keys def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call the base embeddings.""" return self.base_embeddings.embed_documents(texts) def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]: """Combine embeddings into final embeddings.""" return list(np.array(embeddings).mean(axis=0)) def embed_query(self, text: str) -> List[float]: """Generate a hypothetical document and embedded it.""" var_name = self.llm_chain.input_keys[0] result = self.llm_chain.generate([{var_name: text}]) documents = [generation.text for generation in result.generations[0]] embeddings = self.embed_documents(documents) return self.combine_embeddings(embeddings) def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: """Call the internal llm chain.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() return self.llm_chain(inputs, callbacks=_run_manager.get_child()) @classmethod def from_llm( cls, llm: BaseLanguageModel, base_embeddings: Embeddings, prompt_key: Optional[str] = None, custom_prompt: Optional[BasePromptTemplate] = None, **kwargs: Any, ) -> HypotheticalDocumentEmbedder: """Load and use LLMChain with either a specific prompt key or custom prompt.""" if custom_prompt is not None: prompt = custom_prompt elif prompt_key is not None and prompt_key in PROMPT_MAP: prompt = PROMPT_MAP[prompt_key] else: raise ValueError( f"Must specify prompt_key if custom_prompt not provided. Should be one " f"of {list(PROMPT_MAP.keys())}." ) llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs) @property def _chain_type(self) -> str: return "hyde_chain"
[ "langchain.chains.llm.LLMChain", "langchain.chains.hyde.prompts.PROMPT_MAP.keys", "langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager" ]
[((3148, 3180), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (3156, 3180), False, 'from langchain.chains.llm import LLMChain\n'), ((2258, 2303), 'langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (2301, 2303), False, 'from langchain_core.callbacks import CallbackManagerForChainRun\n'), ((1580, 1600), 'numpy.array', 'np.array', (['embeddings'], {}), '(embeddings)\n', (1588, 1600), True, 'import numpy as np\n'), ((3091, 3108), 'langchain.chains.hyde.prompts.PROMPT_MAP.keys', 'PROMPT_MAP.keys', ([], {}), '()\n', (3106, 3108), False, 'from langchain.chains.hyde.prompts import PROMPT_MAP\n')]
"""Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf.""" from __future__ import annotations from typing import Any, Callable, List, NamedTuple, Optional, Sequence from langchain_core._api import deprecated from langchain_core.callbacks import BaseCallbackManager from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import PromptTemplate from langchain_core.pydantic_v1 import Field from langchain_core.tools import BaseTool from langchain.agents.agent import Agent, AgentExecutor, AgentOutputParser from langchain.agents.agent_types import AgentType from langchain.agents.mrkl.output_parser import MRKLOutputParser from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.agents.tools import Tool from langchain.agents.utils import validate_tools_single_input from langchain.chains import LLMChain from langchain.tools.render import render_text_description class ChainConfig(NamedTuple): """Configuration for chain to use in MRKL system. Args: action_name: Name of the action. action: Action function to call. action_description: Description of the action. """ action_name: str action: Callable action_description: str @deprecated("0.1.0", alternative="create_react_agent", removal="0.2.0") class ZeroShotAgent(Agent): """Agent for the MRKL chain.""" output_parser: AgentOutputParser = Field(default_factory=MRKLOutputParser) @classmethod def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return MRKLOutputParser() @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return AgentType.ZERO_SHOT_REACT_DESCRIPTION @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with.""" return "Thought:" @classmethod def create_prompt( cls, tools: Sequence[BaseTool], prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, ) -> PromptTemplate: """Create prompt in the style of the zero shot agent. Args: tools: List of tools the agent will have access to, used to format the prompt. prefix: String to put before the list of tools. suffix: String to put after the list of tools. input_variables: List of input variables the final prompt will expect. Returns: A PromptTemplate with the template assembled from the pieces here. """ tool_strings = render_text_description(list(tools)) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format(tool_names=tool_names) template = "\n\n".join([prefix, tool_strings, format_instructions, suffix]) if input_variables: return PromptTemplate(template=template, input_variables=input_variables) return PromptTemplate.from_template(template) @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = PREFIX, suffix: str = SUFFIX, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt( tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser() return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, ) @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: validate_tools_single_input(cls.__name__, tools) if len(tools) == 0: raise ValueError( f"Got no tools for {cls.__name__}. At least one tool must be provided." ) for tool in tools: if tool.description is None: raise ValueError( f"Got a tool {tool.name} without a description. For this agent, " f"a description must always be provided." ) super()._validate_tools(tools) @deprecated("0.1.0", removal="0.2.0") class MRKLChain(AgentExecutor): """[Deprecated] Chain that implements the MRKL system.""" @classmethod def from_chains( cls, llm: BaseLanguageModel, chains: List[ChainConfig], **kwargs: Any ) -> AgentExecutor: """User friendly way to initialize the MRKL chain. This is intended to be an easy way to get up and running with the MRKL chain. Args: llm: The LLM to use as the agent LLM. chains: The chains the MRKL system has access to. **kwargs: parameters to be passed to initialization. Returns: An initialized MRKL chain. """ tools = [ Tool( name=c.action_name, func=c.action, description=c.action_description, ) for c in chains ] agent = ZeroShotAgent.from_llm_and_tools(llm, tools) return cls(agent=agent, tools=tools, **kwargs)
[ "langchain.agents.mrkl.output_parser.MRKLOutputParser", "langchain.chains.LLMChain", "langchain.agents.utils.validate_tools_single_input", "langchain_core.prompts.PromptTemplate", "langchain.agents.tools.Tool", "langchain_core._api.deprecated", "langchain_core.prompts.PromptTemplate.from_template", "langchain_core.pydantic_v1.Field" ]
[((1278, 1348), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_react_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_react_agent', removal='0.2.0')\n", (1288, 1348), False, 'from langchain_core._api import deprecated\n'), ((5068, 5104), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (5078, 5104), False, 'from langchain_core._api import deprecated\n'), ((1453, 1492), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'MRKLOutputParser'}), '(default_factory=MRKLOutputParser)\n', (1458, 1492), False, 'from langchain_core.pydantic_v1 import Field\n'), ((1603, 1621), 'langchain.agents.mrkl.output_parser.MRKLOutputParser', 'MRKLOutputParser', ([], {}), '()\n', (1619, 1621), False, 'from langchain.agents.mrkl.output_parser import MRKLOutputParser\n'), ((3228, 3266), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (3256, 3266), False, 'from langchain_core.prompts import PromptTemplate\n'), ((4052, 4119), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callback_manager': 'callback_manager'}), '(llm=llm, prompt=prompt, callback_manager=callback_manager)\n', (4060, 4119), False, 'from langchain.chains import LLMChain\n'), ((4549, 4597), 'langchain.agents.utils.validate_tools_single_input', 'validate_tools_single_input', (['cls.__name__', 'tools'], {}), '(cls.__name__, tools)\n', (4576, 4597), False, 'from langchain.agents.utils import validate_tools_single_input\n'), ((3146, 3212), 'langchain_core.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': 'input_variables'}), '(template=template, input_variables=input_variables)\n', (3160, 3212), False, 'from langchain_core.prompts import PromptTemplate\n'), ((5785, 5858), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': 'c.action_name', 'func': 'c.action', 'description': 'c.action_description'}), '(name=c.action_name, func=c.action, description=c.action_description)\n', (5789, 5858), False, 'from langchain.agents.tools import Tool\n')]
import base64 import io import os import uuid from io import BytesIO from pathlib import Path from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import LocalFileStore from langchain_community.chat_models import ChatOllama from langchain_community.embeddings import OllamaEmbeddings from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_core.messages import HumanMessage from PIL import Image def image_summarize(img_base64, prompt): """ Make image summary :param img_base64: Base64 encoded string for image :param prompt: Text prompt for summarizatiomn :return: Image summarization prompt """ chat = ChatOllama(model="bakllava", temperature=0) msg = chat.invoke( [ HumanMessage( content=[ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": f"data:image/jpeg;base64,{img_base64}", }, ] ) ] ) return msg.content def generate_img_summaries(img_base64_list): """ Generate summaries for images :param img_base64_list: Base64 encoded images :return: List of image summaries and processed images """ # Store image summaries image_summaries = [] processed_images = [] # Prompt prompt = """Give a detailed summary of the image.""" # Apply summarization to images for i, base64_image in enumerate(img_base64_list): try: image_summaries.append(image_summarize(base64_image, prompt)) processed_images.append(base64_image) except Exception as e: print(f"Error with image {i+1}: {e}") # noqa: T201 return image_summaries, processed_images def get_images(img_path): """ Extract images. :param img_path: A string representing the path to the images. """ # Get image URIs pil_images = [ Image.open(os.path.join(img_path, image_name)) for image_name in os.listdir(img_path) if image_name.endswith(".jpg") ] return pil_images def resize_base64_image(base64_string, size=(128, 128)): """ Resize an image encoded as a Base64 string :param base64_string: Base64 string :param size: Image size :return: Re-sized Base64 string """ # Decode the Base64 string img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) # Resize the image resized_img = img.resize(size, Image.LANCZOS) # Save the resized image to a bytes buffer buffered = io.BytesIO() resized_img.save(buffered, format=img.format) # Encode the resized image to Base64 return base64.b64encode(buffered.getvalue()).decode("utf-8") def convert_to_base64(pil_image): """ Convert PIL images to Base64 encoded strings :param pil_image: PIL image :return: Re-sized Base64 string """ buffered = BytesIO() pil_image.save(buffered, format="JPEG") # You can change the format if needed img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") # img_str = resize_base64_image(img_str, size=(831,623)) return img_str def create_multi_vector_retriever(vectorstore, image_summaries, images): """ Create retriever that indexes summaries, but returns raw images or texts :param vectorstore: Vectorstore to store embedded image sumamries :param image_summaries: Image summaries :param images: Base64 encoded images :return: Retriever """ # Initialize the storage layer for images store = LocalFileStore( str(Path(__file__).parent / "multi_vector_retriever_metadata") ) id_key = "doc_id" # Create the multi-vector retriever retriever = MultiVectorRetriever( vectorstore=vectorstore, byte_store=store, id_key=id_key, ) # Helper function to add documents to the vectorstore and docstore def add_documents(retriever, doc_summaries, doc_contents): doc_ids = [str(uuid.uuid4()) for _ in doc_contents] summary_docs = [ Document(page_content=s, metadata={id_key: doc_ids[i]}) for i, s in enumerate(doc_summaries) ] retriever.vectorstore.add_documents(summary_docs) retriever.docstore.mset(list(zip(doc_ids, doc_contents))) add_documents(retriever, image_summaries, images) return retriever # Load images doc_path = Path(__file__).parent / "docs/" rel_doc_path = doc_path.relative_to(Path.cwd()) print("Read images") # noqa: T201 pil_images = get_images(rel_doc_path) # Convert to b64 images_base_64 = [convert_to_base64(i) for i in pil_images] # Image summaries print("Generate image summaries") # noqa: T201 image_summaries, images_base_64_processed = generate_img_summaries(images_base_64) # The vectorstore to use to index the images summaries vectorstore_mvr = Chroma( collection_name="image_summaries", persist_directory=str(Path(__file__).parent / "chroma_db_multi_modal"), embedding_function=OllamaEmbeddings(model="llama2:7b"), ) # Create documents images_base_64_processed_documents = [ Document(page_content=i) for i in images_base_64_processed ] # Create retriever retriever_multi_vector_img = create_multi_vector_retriever( vectorstore_mvr, image_summaries, images_base_64_processed_documents, )
[ "langchain_community.embeddings.OllamaEmbeddings", "langchain_community.chat_models.ChatOllama", "langchain_core.messages.HumanMessage", "langchain.retrievers.multi_vector.MultiVectorRetriever", "langchain_core.documents.Document" ]
[((731, 774), 'langchain_community.chat_models.ChatOllama', 'ChatOllama', ([], {'model': '"""bakllava"""', 'temperature': '(0)'}), "(model='bakllava', temperature=0)\n", (741, 774), False, 'from langchain_community.chat_models import ChatOllama\n'), ((2494, 2525), 'base64.b64decode', 'base64.b64decode', (['base64_string'], {}), '(base64_string)\n', (2510, 2525), False, 'import base64\n'), ((2706, 2718), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2716, 2718), False, 'import io\n'), ((3062, 3071), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3069, 3071), False, 'from io import BytesIO\n'), ((3881, 3959), 'langchain.retrievers.multi_vector.MultiVectorRetriever', 'MultiVectorRetriever', ([], {'vectorstore': 'vectorstore', 'byte_store': 'store', 'id_key': 'id_key'}), '(vectorstore=vectorstore, byte_store=store, id_key=id_key)\n', (3901, 3959), False, 'from langchain.retrievers.multi_vector import MultiVectorRetriever\n'), ((4634, 4644), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (4642, 4644), False, 'from pathlib import Path\n'), ((5269, 5293), 'langchain_core.documents.Document', 'Document', ([], {'page_content': 'i'}), '(page_content=i)\n', (5277, 5293), False, 'from langchain_core.documents import Document\n'), ((2547, 2567), 'io.BytesIO', 'io.BytesIO', (['img_data'], {}), '(img_data)\n', (2557, 2567), False, 'import io\n'), ((4566, 4580), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (4570, 4580), False, 'from pathlib import Path\n'), ((5167, 5202), 'langchain_community.embeddings.OllamaEmbeddings', 'OllamaEmbeddings', ([], {'model': '"""llama2:7b"""'}), "(model='llama2:7b')\n", (5183, 5202), False, 'from langchain_community.embeddings import OllamaEmbeddings\n'), ((821, 957), 'langchain_core.messages.HumanMessage', 'HumanMessage', ([], {'content': "[{'type': 'text', 'text': prompt}, {'type': 'image_url', 'image_url':\n f'data:image/jpeg;base64,{img_base64}'}]"}), "(content=[{'type': 'text', 'text': prompt}, {'type':\n 'image_url', 'image_url': f'data:image/jpeg;base64,{img_base64}'}])\n", (833, 957), False, 'from langchain_core.messages import HumanMessage\n'), ((2071, 2105), 'os.path.join', 'os.path.join', (['img_path', 'image_name'], {}), '(img_path, image_name)\n', (2083, 2105), False, 'import os\n'), ((2133, 2153), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (2143, 2153), False, 'import os\n'), ((4223, 4278), 'langchain_core.documents.Document', 'Document', ([], {'page_content': 's', 'metadata': '{id_key: doc_ids[i]}'}), '(page_content=s, metadata={id_key: doc_ids[i]})\n', (4231, 4278), False, 'from langchain_core.documents import Document\n'), ((4149, 4161), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4159, 4161), False, 'import uuid\n'), ((3737, 3751), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3741, 3751), False, 'from pathlib import Path\n'), ((5094, 5108), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5098, 5108), False, 'from pathlib import Path\n')]
from fastapi import Body from sse_starlette.sse import EventSourceResponse from configs import LLM_MODELS, TEMPERATURE from server.utils import wrap_done, get_OpenAI from langchain.chains import LLMChain from langchain.callbacks import AsyncIteratorCallbackHandler from typing import AsyncIterable, Optional import asyncio from langchain.prompts import PromptTemplate from server.utils import get_prompt_template async def completion(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]), stream: bool = Body(False, description="流式输出"), echo: bool = Body(False, description="除了输出之外,还回显输入"), model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"), temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0), max_tokens: Optional[int] = Body(1024, description="限制LLM生成Token数量,默认None代表模型最大值"), # top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0), prompt_name: str = Body("default", description="使用的prompt模板名称(在configs/prompt_config.py中配置)"), ): #todo 因ApiModelWorker 默认是按chat处理的,会对params["prompt"] 解析为messages,因此ApiModelWorker 使用时需要有相应处理 async def completion_iterator(query: str, model_name: str = LLM_MODELS[0], prompt_name: str = prompt_name, echo: bool = echo, ) -> AsyncIterable[str]: nonlocal max_tokens callback = AsyncIteratorCallbackHandler() if isinstance(max_tokens, int) and max_tokens <= 0: max_tokens = None model = get_OpenAI( model_name=model_name, temperature=temperature, max_tokens=max_tokens, callbacks=[callback], echo=echo ) prompt_template = get_prompt_template("completion", prompt_name) prompt = PromptTemplate.from_template(prompt_template) chain = LLMChain(prompt=prompt, llm=model) # Begin a task that runs in the background. task = asyncio.create_task(wrap_done( chain.acall({"input": query}), callback.done), ) if stream: async for token in callback.aiter(): # Use server-sent-events to stream the response yield token else: answer = "" async for token in callback.aiter(): answer += token yield answer await task return EventSourceResponse(completion_iterator(query=query, model_name=model_name, prompt_name=prompt_name), )
[ "langchain.callbacks.AsyncIteratorCallbackHandler", "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate.from_template" ]
[((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", (540, 567), False, 'from fastapi import Body\n'), ((603, 642), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""除了输出之外,还回显输入"""'}), "(False, description='除了输出之外,还回显输入')\n", (607, 642), False, 'from fastapi import Body\n'), ((683, 727), 'fastapi.Body', 'Body', (['LLM_MODELS[0]'], {'description': '"""LLM 模型名称。"""'}), "(LLM_MODELS[0], description='LLM 模型名称。')\n", (687, 727), False, 'from fastapi import Body\n'), ((771, 828), 'fastapi.Body', 'Body', (['TEMPERATURE'], {'description': '"""LLM 采样温度"""', 'ge': '(0.0)', 'le': '(1.0)'}), "(TEMPERATURE, description='LLM 采样温度', ge=0.0, le=1.0)\n", (775, 828), False, 'from fastapi import Body\n'), ((879, 933), 'fastapi.Body', 'Body', (['(1024)'], {'description': '"""限制LLM生成Token数量,默认None代表模型最大值"""'}), "(1024, description='限制LLM生成Token数量,默认None代表模型最大值')\n", (883, 933), False, 'from fastapi import Body\n'), ((1083, 1157), 'fastapi.Body', 'Body', (['"""default"""'], {'description': '"""使用的prompt模板名称(在configs/prompt_config.py中配置)"""'}), "('default', description='使用的prompt模板名称(在configs/prompt_config.py中配置)')\n", (1087, 1157), False, 'from fastapi import Body\n'), ((1664, 1694), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (1692, 1694), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((1802, 1921), 'server.utils.get_OpenAI', 'get_OpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'callbacks': '[callback]', 'echo': 'echo'}), '(model_name=model_name, temperature=temperature, max_tokens=\n max_tokens, callbacks=[callback], echo=echo)\n', (1812, 1921), False, 'from server.utils import wrap_done, get_OpenAI\n'), ((2014, 2060), 'server.utils.get_prompt_template', 'get_prompt_template', (['"""completion"""', 'prompt_name'], {}), "('completion', prompt_name)\n", (2033, 2060), False, 'from server.utils import get_prompt_template\n'), ((2078, 2123), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2106, 2123), False, 'from langchain.prompts import PromptTemplate\n'), ((2140, 2174), 'langchain.chains.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'model'}), '(prompt=prompt, llm=model)\n', (2148, 2174), False, 'from langchain.chains import LLMChain\n')]
from fastapi import Body from sse_starlette.sse import EventSourceResponse from configs import LLM_MODELS, TEMPERATURE from server.utils import wrap_done, get_OpenAI from langchain.chains import LLMChain from langchain.callbacks import AsyncIteratorCallbackHandler from typing import AsyncIterable, Optional import asyncio from langchain.prompts import PromptTemplate from server.utils import get_prompt_template async def completion(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]), stream: bool = Body(False, description="流式输出"), echo: bool = Body(False, description="除了输出之外,还回显输入"), model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"), temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0), max_tokens: Optional[int] = Body(1024, description="限制LLM生成Token数量,默认None代表模型最大值"), # top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0), prompt_name: str = Body("default", description="使用的prompt模板名称(在configs/prompt_config.py中配置)"), ): #todo 因ApiModelWorker 默认是按chat处理的,会对params["prompt"] 解析为messages,因此ApiModelWorker 使用时需要有相应处理 async def completion_iterator(query: str, model_name: str = LLM_MODELS[0], prompt_name: str = prompt_name, echo: bool = echo, ) -> AsyncIterable[str]: nonlocal max_tokens callback = AsyncIteratorCallbackHandler() if isinstance(max_tokens, int) and max_tokens <= 0: max_tokens = None model = get_OpenAI( model_name=model_name, temperature=temperature, max_tokens=max_tokens, callbacks=[callback], echo=echo ) prompt_template = get_prompt_template("completion", prompt_name) prompt = PromptTemplate.from_template(prompt_template) chain = LLMChain(prompt=prompt, llm=model) # Begin a task that runs in the background. task = asyncio.create_task(wrap_done( chain.acall({"input": query}), callback.done), ) if stream: async for token in callback.aiter(): # Use server-sent-events to stream the response yield token else: answer = "" async for token in callback.aiter(): answer += token yield answer await task return EventSourceResponse(completion_iterator(query=query, model_name=model_name, prompt_name=prompt_name), )
[ "langchain.callbacks.AsyncIteratorCallbackHandler", "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate.from_template" ]
[((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", (540, 567), False, 'from fastapi import Body\n'), ((603, 642), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""除了输出之外,还回显输入"""'}), "(False, description='除了输出之外,还回显输入')\n", (607, 642), False, 'from fastapi import Body\n'), ((683, 727), 'fastapi.Body', 'Body', (['LLM_MODELS[0]'], {'description': '"""LLM 模型名称。"""'}), "(LLM_MODELS[0], description='LLM 模型名称。')\n", (687, 727), False, 'from fastapi import Body\n'), ((771, 828), 'fastapi.Body', 'Body', (['TEMPERATURE'], {'description': '"""LLM 采样温度"""', 'ge': '(0.0)', 'le': '(1.0)'}), "(TEMPERATURE, description='LLM 采样温度', ge=0.0, le=1.0)\n", (775, 828), False, 'from fastapi import Body\n'), ((879, 933), 'fastapi.Body', 'Body', (['(1024)'], {'description': '"""限制LLM生成Token数量,默认None代表模型最大值"""'}), "(1024, description='限制LLM生成Token数量,默认None代表模型最大值')\n", (883, 933), False, 'from fastapi import Body\n'), ((1083, 1157), 'fastapi.Body', 'Body', (['"""default"""'], {'description': '"""使用的prompt模板名称(在configs/prompt_config.py中配置)"""'}), "('default', description='使用的prompt模板名称(在configs/prompt_config.py中配置)')\n", (1087, 1157), False, 'from fastapi import Body\n'), ((1664, 1694), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (1692, 1694), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((1802, 1921), 'server.utils.get_OpenAI', 'get_OpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'callbacks': '[callback]', 'echo': 'echo'}), '(model_name=model_name, temperature=temperature, max_tokens=\n max_tokens, callbacks=[callback], echo=echo)\n', (1812, 1921), False, 'from server.utils import wrap_done, get_OpenAI\n'), ((2014, 2060), 'server.utils.get_prompt_template', 'get_prompt_template', (['"""completion"""', 'prompt_name'], {}), "('completion', prompt_name)\n", (2033, 2060), False, 'from server.utils import get_prompt_template\n'), ((2078, 2123), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2106, 2123), False, 'from langchain.prompts import PromptTemplate\n'), ((2140, 2174), 'langchain.chains.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'model'}), '(prompt=prompt, llm=model)\n', (2148, 2174), False, 'from langchain.chains import LLMChain\n')]
from fastapi import Body from sse_starlette.sse import EventSourceResponse from configs import LLM_MODELS, TEMPERATURE from server.utils import wrap_done, get_OpenAI from langchain.chains import LLMChain from langchain.callbacks import AsyncIteratorCallbackHandler from typing import AsyncIterable, Optional import asyncio from langchain.prompts import PromptTemplate from server.utils import get_prompt_template async def completion(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]), stream: bool = Body(False, description="流式输出"), echo: bool = Body(False, description="除了输出之外,还回显输入"), model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"), temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0), max_tokens: Optional[int] = Body(1024, description="限制LLM生成Token数量,默认None代表模型最大值"), # top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0), prompt_name: str = Body("default", description="使用的prompt模板名称(在configs/prompt_config.py中配置)"), ): #todo 因ApiModelWorker 默认是按chat处理的,会对params["prompt"] 解析为messages,因此ApiModelWorker 使用时需要有相应处理 async def completion_iterator(query: str, model_name: str = LLM_MODELS[0], prompt_name: str = prompt_name, echo: bool = echo, ) -> AsyncIterable[str]: nonlocal max_tokens callback = AsyncIteratorCallbackHandler() if isinstance(max_tokens, int) and max_tokens <= 0: max_tokens = None model = get_OpenAI( model_name=model_name, temperature=temperature, max_tokens=max_tokens, callbacks=[callback], echo=echo ) prompt_template = get_prompt_template("completion", prompt_name) prompt = PromptTemplate.from_template(prompt_template) chain = LLMChain(prompt=prompt, llm=model) # Begin a task that runs in the background. task = asyncio.create_task(wrap_done( chain.acall({"input": query}), callback.done), ) if stream: async for token in callback.aiter(): # Use server-sent-events to stream the response yield token else: answer = "" async for token in callback.aiter(): answer += token yield answer await task return EventSourceResponse(completion_iterator(query=query, model_name=model_name, prompt_name=prompt_name), )
[ "langchain.callbacks.AsyncIteratorCallbackHandler", "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate.from_template" ]
[((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", (540, 567), False, 'from fastapi import Body\n'), ((603, 642), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""除了输出之外,还回显输入"""'}), "(False, description='除了输出之外,还回显输入')\n", (607, 642), False, 'from fastapi import Body\n'), ((683, 727), 'fastapi.Body', 'Body', (['LLM_MODELS[0]'], {'description': '"""LLM 模型名称。"""'}), "(LLM_MODELS[0], description='LLM 模型名称。')\n", (687, 727), False, 'from fastapi import Body\n'), ((771, 828), 'fastapi.Body', 'Body', (['TEMPERATURE'], {'description': '"""LLM 采样温度"""', 'ge': '(0.0)', 'le': '(1.0)'}), "(TEMPERATURE, description='LLM 采样温度', ge=0.0, le=1.0)\n", (775, 828), False, 'from fastapi import Body\n'), ((879, 933), 'fastapi.Body', 'Body', (['(1024)'], {'description': '"""限制LLM生成Token数量,默认None代表模型最大值"""'}), "(1024, description='限制LLM生成Token数量,默认None代表模型最大值')\n", (883, 933), False, 'from fastapi import Body\n'), ((1083, 1157), 'fastapi.Body', 'Body', (['"""default"""'], {'description': '"""使用的prompt模板名称(在configs/prompt_config.py中配置)"""'}), "('default', description='使用的prompt模板名称(在configs/prompt_config.py中配置)')\n", (1087, 1157), False, 'from fastapi import Body\n'), ((1664, 1694), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (1692, 1694), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((1802, 1921), 'server.utils.get_OpenAI', 'get_OpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'callbacks': '[callback]', 'echo': 'echo'}), '(model_name=model_name, temperature=temperature, max_tokens=\n max_tokens, callbacks=[callback], echo=echo)\n', (1812, 1921), False, 'from server.utils import wrap_done, get_OpenAI\n'), ((2014, 2060), 'server.utils.get_prompt_template', 'get_prompt_template', (['"""completion"""', 'prompt_name'], {}), "('completion', prompt_name)\n", (2033, 2060), False, 'from server.utils import get_prompt_template\n'), ((2078, 2123), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2106, 2123), False, 'from langchain.prompts import PromptTemplate\n'), ((2140, 2174), 'langchain.chains.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'model'}), '(prompt=prompt, llm=model)\n', (2148, 2174), False, 'from langchain.chains import LLMChain\n')]
from fastapi import Body from sse_starlette.sse import EventSourceResponse from configs import LLM_MODELS, TEMPERATURE from server.utils import wrap_done, get_OpenAI from langchain.chains import LLMChain from langchain.callbacks import AsyncIteratorCallbackHandler from typing import AsyncIterable, Optional import asyncio from langchain.prompts import PromptTemplate from server.utils import get_prompt_template async def completion(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]), stream: bool = Body(False, description="流式输出"), echo: bool = Body(False, description="除了输出之外,还回显输入"), model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"), temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0), max_tokens: Optional[int] = Body(1024, description="限制LLM生成Token数量,默认None代表模型最大值"), # top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0), prompt_name: str = Body("default", description="使用的prompt模板名称(在configs/prompt_config.py中配置)"), ): #todo 因ApiModelWorker 默认是按chat处理的,会对params["prompt"] 解析为messages,因此ApiModelWorker 使用时需要有相应处理 async def completion_iterator(query: str, model_name: str = LLM_MODELS[0], prompt_name: str = prompt_name, echo: bool = echo, ) -> AsyncIterable[str]: nonlocal max_tokens callback = AsyncIteratorCallbackHandler() if isinstance(max_tokens, int) and max_tokens <= 0: max_tokens = None model = get_OpenAI( model_name=model_name, temperature=temperature, max_tokens=max_tokens, callbacks=[callback], echo=echo ) prompt_template = get_prompt_template("completion", prompt_name) prompt = PromptTemplate.from_template(prompt_template) chain = LLMChain(prompt=prompt, llm=model) # Begin a task that runs in the background. task = asyncio.create_task(wrap_done( chain.acall({"input": query}), callback.done), ) if stream: async for token in callback.aiter(): # Use server-sent-events to stream the response yield token else: answer = "" async for token in callback.aiter(): answer += token yield answer await task return EventSourceResponse(completion_iterator(query=query, model_name=model_name, prompt_name=prompt_name), )
[ "langchain.callbacks.AsyncIteratorCallbackHandler", "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate.from_template" ]
[((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", (540, 567), False, 'from fastapi import Body\n'), ((603, 642), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""除了输出之外,还回显输入"""'}), "(False, description='除了输出之外,还回显输入')\n", (607, 642), False, 'from fastapi import Body\n'), ((683, 727), 'fastapi.Body', 'Body', (['LLM_MODELS[0]'], {'description': '"""LLM 模型名称。"""'}), "(LLM_MODELS[0], description='LLM 模型名称。')\n", (687, 727), False, 'from fastapi import Body\n'), ((771, 828), 'fastapi.Body', 'Body', (['TEMPERATURE'], {'description': '"""LLM 采样温度"""', 'ge': '(0.0)', 'le': '(1.0)'}), "(TEMPERATURE, description='LLM 采样温度', ge=0.0, le=1.0)\n", (775, 828), False, 'from fastapi import Body\n'), ((879, 933), 'fastapi.Body', 'Body', (['(1024)'], {'description': '"""限制LLM生成Token数量,默认None代表模型最大值"""'}), "(1024, description='限制LLM生成Token数量,默认None代表模型最大值')\n", (883, 933), False, 'from fastapi import Body\n'), ((1083, 1157), 'fastapi.Body', 'Body', (['"""default"""'], {'description': '"""使用的prompt模板名称(在configs/prompt_config.py中配置)"""'}), "('default', description='使用的prompt模板名称(在configs/prompt_config.py中配置)')\n", (1087, 1157), False, 'from fastapi import Body\n'), ((1664, 1694), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (1692, 1694), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((1802, 1921), 'server.utils.get_OpenAI', 'get_OpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'callbacks': '[callback]', 'echo': 'echo'}), '(model_name=model_name, temperature=temperature, max_tokens=\n max_tokens, callbacks=[callback], echo=echo)\n', (1812, 1921), False, 'from server.utils import wrap_done, get_OpenAI\n'), ((2014, 2060), 'server.utils.get_prompt_template', 'get_prompt_template', (['"""completion"""', 'prompt_name'], {}), "('completion', prompt_name)\n", (2033, 2060), False, 'from server.utils import get_prompt_template\n'), ((2078, 2123), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2106, 2123), False, 'from langchain.prompts import PromptTemplate\n'), ((2140, 2174), 'langchain.chains.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'model'}), '(prompt=prompt, llm=model)\n', (2148, 2174), False, 'from langchain.chains import LLMChain\n')]
from langchain.llms import Ollama input = input("What is your question?") llm = Ollama(model="llama2") res = llm.predict(input) print (res)
[ "langchain.llms.Ollama" ]
[((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')]
from langchain.llms import Ollama input = input("What is your question?") llm = Ollama(model="llama2") res = llm.predict(input) print (res)
[ "langchain.llms.Ollama" ]
[((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')]
import os import tempfile from typing import List, Union import streamlit as st import tiktoken from langchain.text_splitter import ( CharacterTextSplitter, RecursiveCharacterTextSplitter, ) from langchain.text_splitter import ( TextSplitter as LCSplitter, ) from langchain.text_splitter import TokenTextSplitter as LCTokenTextSplitter from llama_index import SimpleDirectoryReader from llama_index.node_parser.interface import TextSplitter from llama_index.schema import Document from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter from streamlit.runtime.uploaded_file_manager import UploadedFile DEFAULT_TEXT = "The quick brown fox jumps over the lazy dog." text = st.sidebar.text_area("Enter text", value=DEFAULT_TEXT) uploaded_files = st.sidebar.file_uploader("Upload file", accept_multiple_files=True) type = st.sidebar.radio("Document Type", options=["Text", "Code"]) n_cols = st.sidebar.number_input("Columns", value=2, min_value=1, max_value=3) assert isinstance(n_cols, int) @st.cache_resource(ttl=3600) def load_document(uploaded_files: List[UploadedFile]) -> List[Document]: # Read documents temp_dir = tempfile.TemporaryDirectory() for file in uploaded_files: temp_filepath = os.path.join(temp_dir.name, file.name) with open(temp_filepath, "wb") as f: f.write(file.getvalue()) reader = SimpleDirectoryReader(input_dir=temp_dir.name) return reader.load_data() if uploaded_files: if text != DEFAULT_TEXT: st.warning("Text will be ignored when uploading files") docs = load_document(uploaded_files) text = "\n".join([doc.text for doc in docs]) chunk_size = st.slider( "Chunk Size", value=512, min_value=1, max_value=4096, ) chunk_overlap = st.slider( "Chunk Overlap", value=0, min_value=0, max_value=4096, ) cols = st.columns(n_cols) for ind, col in enumerate(cols): if type == "Text": text_splitter_cls = col.selectbox( "Text Splitter", options=[ "TokenTextSplitter", "SentenceSplitter", "LC:RecursiveCharacterTextSplitter", "LC:CharacterTextSplitter", "LC:TokenTextSplitter", ], index=ind, key=f"splitter_cls_{ind}", ) text_splitter: Union[TextSplitter, LCSplitter] if text_splitter_cls == "TokenTextSplitter": text_splitter = TokenTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) elif text_splitter_cls == "SentenceSplitter": text_splitter = SentenceSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) elif text_splitter_cls == "LC:RecursiveCharacterTextSplitter": text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) elif text_splitter_cls == "LC:CharacterTextSplitter": text_splitter = CharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) elif text_splitter_cls == "LC:TokenTextSplitter": text_splitter = LCTokenTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) else: raise ValueError("Unknown text splitter") elif type == "Code": text_splitter_cls = col.selectbox("Text Splitter", options=["CodeSplitter"]) if text_splitter_cls == "CodeSplitter": language = col.text_input("Language", value="python") max_chars = col.slider("Max Chars", value=1500) text_splitter = CodeSplitter(language=language, max_chars=max_chars) else: raise ValueError("Unknown text splitter") chunks = text_splitter.split_text(text) tokenizer = tiktoken.get_encoding("gpt2").encode for chunk_ind, chunk in enumerate(chunks): n_tokens = len(tokenizer(chunk)) n_chars = len(chunk) col.text_area( f"Chunk {chunk_ind} - {n_tokens} tokens - {n_chars} chars", chunk, key=f"text_area_{ind}_{chunk_ind}", height=500, )
[ "langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder", "langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder", "langchain.text_splitter.TokenTextSplitter" ]
[((718, 772), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter text"""'], {'value': 'DEFAULT_TEXT'}), "('Enter text', value=DEFAULT_TEXT)\n", (738, 772), True, 'import streamlit as st\n'), ((790, 857), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload file"""'], {'accept_multiple_files': '(True)'}), "('Upload file', accept_multiple_files=True)\n", (814, 857), True, 'import streamlit as st\n'), ((865, 924), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Document Type"""'], {'options': "['Text', 'Code']"}), "('Document Type', options=['Text', 'Code'])\n", (881, 924), True, 'import streamlit as st\n'), ((934, 1003), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Columns"""'], {'value': '(2)', 'min_value': '(1)', 'max_value': '(3)'}), "('Columns', value=2, min_value=1, max_value=3)\n", (957, 1003), True, 'import streamlit as st\n'), ((1038, 1065), 'streamlit.cache_resource', 'st.cache_resource', ([], {'ttl': '(3600)'}), '(ttl=3600)\n', (1055, 1065), True, 'import streamlit as st\n'), ((1692, 1755), 'streamlit.slider', 'st.slider', (['"""Chunk Size"""'], {'value': '(512)', 'min_value': '(1)', 'max_value': '(4096)'}), "('Chunk Size', value=512, min_value=1, max_value=4096)\n", (1701, 1755), True, 'import streamlit as st\n'), ((1791, 1855), 'streamlit.slider', 'st.slider', (['"""Chunk Overlap"""'], {'value': '(0)', 'min_value': '(0)', 'max_value': '(4096)'}), "('Chunk Overlap', value=0, min_value=0, max_value=4096)\n", (1800, 1855), True, 'import streamlit as st\n'), ((1883, 1901), 'streamlit.columns', 'st.columns', (['n_cols'], {}), '(n_cols)\n', (1893, 1901), True, 'import streamlit as st\n'), ((1175, 1204), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1202, 1204), False, 'import tempfile\n'), ((1396, 1442), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'temp_dir.name'}), '(input_dir=temp_dir.name)\n', (1417, 1442), False, 'from llama_index import SimpleDirectoryReader\n'), ((1261, 1299), 'os.path.join', 'os.path.join', (['temp_dir.name', 'file.name'], {}), '(temp_dir.name, file.name)\n', (1273, 1299), False, 'import os\n'), ((1531, 1586), 'streamlit.warning', 'st.warning', (['"""Text will be ignored when uploading files"""'], {}), "('Text will be ignored when uploading files')\n", (1541, 1586), True, 'import streamlit as st\n'), ((3968, 3997), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""gpt2"""'], {}), "('gpt2')\n", (3989, 3997), False, 'import tiktoken\n'), ((2486, 2555), 'llama_index.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (2503, 2555), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((2668, 2736), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (2684, 2736), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((3786, 3838), 'llama_index.text_splitter.CodeSplitter', 'CodeSplitter', ([], {'language': 'language', 'max_chars': 'max_chars'}), '(language=language, max_chars=max_chars)\n', (3798, 3838), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((2866, 2974), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n', (2918, 2974), False, 'from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter\n'), ((3091, 3190), 'langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder', 'CharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n', (3134, 3190), False, 'from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter\n'), ((3303, 3374), 'langchain.text_splitter.TokenTextSplitter', 'LCTokenTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (3322, 3374), True, 'from langchain.text_splitter import TokenTextSplitter as LCTokenTextSplitter\n')]
import os import tempfile from typing import List, Union import streamlit as st import tiktoken from langchain.text_splitter import ( CharacterTextSplitter, RecursiveCharacterTextSplitter, ) from langchain.text_splitter import ( TextSplitter as LCSplitter, ) from langchain.text_splitter import TokenTextSplitter as LCTokenTextSplitter from llama_index import SimpleDirectoryReader from llama_index.node_parser.interface import TextSplitter from llama_index.schema import Document from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter from streamlit.runtime.uploaded_file_manager import UploadedFile DEFAULT_TEXT = "The quick brown fox jumps over the lazy dog." text = st.sidebar.text_area("Enter text", value=DEFAULT_TEXT) uploaded_files = st.sidebar.file_uploader("Upload file", accept_multiple_files=True) type = st.sidebar.radio("Document Type", options=["Text", "Code"]) n_cols = st.sidebar.number_input("Columns", value=2, min_value=1, max_value=3) assert isinstance(n_cols, int) @st.cache_resource(ttl=3600) def load_document(uploaded_files: List[UploadedFile]) -> List[Document]: # Read documents temp_dir = tempfile.TemporaryDirectory() for file in uploaded_files: temp_filepath = os.path.join(temp_dir.name, file.name) with open(temp_filepath, "wb") as f: f.write(file.getvalue()) reader = SimpleDirectoryReader(input_dir=temp_dir.name) return reader.load_data() if uploaded_files: if text != DEFAULT_TEXT: st.warning("Text will be ignored when uploading files") docs = load_document(uploaded_files) text = "\n".join([doc.text for doc in docs]) chunk_size = st.slider( "Chunk Size", value=512, min_value=1, max_value=4096, ) chunk_overlap = st.slider( "Chunk Overlap", value=0, min_value=0, max_value=4096, ) cols = st.columns(n_cols) for ind, col in enumerate(cols): if type == "Text": text_splitter_cls = col.selectbox( "Text Splitter", options=[ "TokenTextSplitter", "SentenceSplitter", "LC:RecursiveCharacterTextSplitter", "LC:CharacterTextSplitter", "LC:TokenTextSplitter", ], index=ind, key=f"splitter_cls_{ind}", ) text_splitter: Union[TextSplitter, LCSplitter] if text_splitter_cls == "TokenTextSplitter": text_splitter = TokenTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) elif text_splitter_cls == "SentenceSplitter": text_splitter = SentenceSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) elif text_splitter_cls == "LC:RecursiveCharacterTextSplitter": text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) elif text_splitter_cls == "LC:CharacterTextSplitter": text_splitter = CharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) elif text_splitter_cls == "LC:TokenTextSplitter": text_splitter = LCTokenTextSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap ) else: raise ValueError("Unknown text splitter") elif type == "Code": text_splitter_cls = col.selectbox("Text Splitter", options=["CodeSplitter"]) if text_splitter_cls == "CodeSplitter": language = col.text_input("Language", value="python") max_chars = col.slider("Max Chars", value=1500) text_splitter = CodeSplitter(language=language, max_chars=max_chars) else: raise ValueError("Unknown text splitter") chunks = text_splitter.split_text(text) tokenizer = tiktoken.get_encoding("gpt2").encode for chunk_ind, chunk in enumerate(chunks): n_tokens = len(tokenizer(chunk)) n_chars = len(chunk) col.text_area( f"Chunk {chunk_ind} - {n_tokens} tokens - {n_chars} chars", chunk, key=f"text_area_{ind}_{chunk_ind}", height=500, )
[ "langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder", "langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder", "langchain.text_splitter.TokenTextSplitter" ]
[((718, 772), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter text"""'], {'value': 'DEFAULT_TEXT'}), "('Enter text', value=DEFAULT_TEXT)\n", (738, 772), True, 'import streamlit as st\n'), ((790, 857), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload file"""'], {'accept_multiple_files': '(True)'}), "('Upload file', accept_multiple_files=True)\n", (814, 857), True, 'import streamlit as st\n'), ((865, 924), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Document Type"""'], {'options': "['Text', 'Code']"}), "('Document Type', options=['Text', 'Code'])\n", (881, 924), True, 'import streamlit as st\n'), ((934, 1003), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Columns"""'], {'value': '(2)', 'min_value': '(1)', 'max_value': '(3)'}), "('Columns', value=2, min_value=1, max_value=3)\n", (957, 1003), True, 'import streamlit as st\n'), ((1038, 1065), 'streamlit.cache_resource', 'st.cache_resource', ([], {'ttl': '(3600)'}), '(ttl=3600)\n', (1055, 1065), True, 'import streamlit as st\n'), ((1692, 1755), 'streamlit.slider', 'st.slider', (['"""Chunk Size"""'], {'value': '(512)', 'min_value': '(1)', 'max_value': '(4096)'}), "('Chunk Size', value=512, min_value=1, max_value=4096)\n", (1701, 1755), True, 'import streamlit as st\n'), ((1791, 1855), 'streamlit.slider', 'st.slider', (['"""Chunk Overlap"""'], {'value': '(0)', 'min_value': '(0)', 'max_value': '(4096)'}), "('Chunk Overlap', value=0, min_value=0, max_value=4096)\n", (1800, 1855), True, 'import streamlit as st\n'), ((1883, 1901), 'streamlit.columns', 'st.columns', (['n_cols'], {}), '(n_cols)\n', (1893, 1901), True, 'import streamlit as st\n'), ((1175, 1204), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1202, 1204), False, 'import tempfile\n'), ((1396, 1442), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'temp_dir.name'}), '(input_dir=temp_dir.name)\n', (1417, 1442), False, 'from llama_index import SimpleDirectoryReader\n'), ((1261, 1299), 'os.path.join', 'os.path.join', (['temp_dir.name', 'file.name'], {}), '(temp_dir.name, file.name)\n', (1273, 1299), False, 'import os\n'), ((1531, 1586), 'streamlit.warning', 'st.warning', (['"""Text will be ignored when uploading files"""'], {}), "('Text will be ignored when uploading files')\n", (1541, 1586), True, 'import streamlit as st\n'), ((3968, 3997), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""gpt2"""'], {}), "('gpt2')\n", (3989, 3997), False, 'import tiktoken\n'), ((2486, 2555), 'llama_index.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (2503, 2555), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((2668, 2736), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (2684, 2736), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((3786, 3838), 'llama_index.text_splitter.CodeSplitter', 'CodeSplitter', ([], {'language': 'language', 'max_chars': 'max_chars'}), '(language=language, max_chars=max_chars)\n', (3798, 3838), False, 'from llama_index.text_splitter import CodeSplitter, SentenceSplitter, TokenTextSplitter\n'), ((2866, 2974), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n', (2918, 2974), False, 'from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter\n'), ((3091, 3190), 'langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder', 'CharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n', (3134, 3190), False, 'from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter\n'), ((3303, 3374), 'langchain.text_splitter.TokenTextSplitter', 'LCTokenTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (3322, 3374), True, 'from langchain.text_splitter import TokenTextSplitter as LCTokenTextSplitter\n')]
import json from langchain.schema import OutputParserException def parse_json_markdown(json_string: str) -> dict: # Remove the triple backticks if present json_string = json_string.strip() start_index = json_string.find("```json") end_index = json_string.find("```", start_index + len("```json")) if start_index != -1 and end_index != -1: extracted_content = json_string[start_index + len("```json"):end_index].strip() # Parse the JSON string into a Python dictionary parsed = json.loads(extracted_content) elif start_index != -1 and end_index == -1 and json_string.endswith("``"): end_index = json_string.find("``", start_index + len("```json")) extracted_content = json_string[start_index + len("```json"):end_index].strip() # Parse the JSON string into a Python dictionary parsed = json.loads(extracted_content) elif json_string.startswith("{"): # Parse the JSON string into a Python dictionary parsed = json.loads(json_string) else: raise Exception("Could not find JSON block in the output.") return parsed def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict: try: json_obj = parse_json_markdown(text) except json.JSONDecodeError as e: raise OutputParserException(f"Got invalid JSON object. Error: {e}") for key in expected_keys: if key not in json_obj: raise OutputParserException( f"Got invalid return object. Expected key `{key}` " f"to be present, but got {json_obj}" ) return json_obj
[ "langchain.schema.OutputParserException" ]
[((526, 555), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (536, 555), False, 'import json\n'), ((871, 900), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (881, 900), False, 'import json\n'), ((1322, 1383), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Got invalid JSON object. Error: {e}"""'], {}), "(f'Got invalid JSON object. Error: {e}')\n", (1343, 1383), False, 'from langchain.schema import OutputParserException\n'), ((1464, 1581), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Got invalid return object. Expected key `{key}` to be present, but got {json_obj}"""'], {}), "(\n f'Got invalid return object. Expected key `{key}` to be present, but got {json_obj}'\n )\n", (1485, 1581), False, 'from langchain.schema import OutputParserException\n'), ((1013, 1036), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (1023, 1036), False, 'import json\n')]
# From project chatglm-langchain from langchain.document_loaders import UnstructuredFileLoader from langchain.text_splitter import CharacterTextSplitter import re from typing import List class ChineseTextSplitter(CharacterTextSplitter): def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs): super().__init__(**kwargs) self.pdf = pdf self.sentence_size = sentence_size def split_text1(self, text: str) -> List[str]: if self.pdf: text = re.sub(r"\n{3,}", "\n", text) text = re.sub('\s', ' ', text) text = text.replace("\n\n", "") sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))') # del :; sent_list = [] for ele in sent_sep_pattern.split(text): if sent_sep_pattern.match(ele) and sent_list: sent_list[-1] += ele elif ele: sent_list.append(ele) return sent_list def split_text(self, text: str) -> List[str]: ##此处需要进一步优化逻辑 if self.pdf: text = re.sub(r"\n{3,}", r"\n", text) text = re.sub('\s', " ", text) text = re.sub("\n\n", "", text) text = re.sub(r'([;;.!?。!?\?])([^”’])', r"\1\n\2", text) # 单字符断句符 text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text) # 英文省略号 text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text) # 中文省略号 text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r'\1\n\2', text) # 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号 text = text.rstrip() # 段尾如果有多余的\n就去掉它 # 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。 ls = [i for i in text.split("\n") if i] for ele in ls: if len(ele) > self.sentence_size: ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r'\1\n\2', ele) ele1_ls = ele1.split("\n") for ele_ele1 in ele1_ls: if len(ele_ele1) > self.sentence_size: ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1) ele2_ls = ele_ele2.split("\n") for ele_ele2 in ele2_ls: if len(ele_ele2) > self.sentence_size: ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2) ele2_id = ele2_ls.index(ele_ele2) ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[ ele2_id + 1:] ele_id = ele1_ls.index(ele_ele1) ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:] id = ls.index(ele) ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:] return ls def load_file(filepath, sentence_size): loader = UnstructuredFileLoader(filepath, mode="elements") textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size) docs = loader.load_and_split(text_splitter=textsplitter) # write_check_file(filepath, docs) return docs
[ "langchain.document_loaders.UnstructuredFileLoader" ]
[((3017, 3066), 'langchain.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['filepath'], {'mode': '"""elements"""'}), "(filepath, mode='elements')\n", (3039, 3066), False, 'from langchain.document_loaders import UnstructuredFileLoader\n'), ((657, 714), 're.compile', 're.compile', (['"""([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))"""'], {}), '(\'([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))\')\n', (667, 714), False, 'import re\n'), ((1218, 1269), 're.sub', 're.sub', (['"""([;;.!?。!?\\\\?])([^”’])"""', '"""\\\\1\\\\n\\\\2"""', 'text'], {}), "('([;;.!?。!?\\\\?])([^”’])', '\\\\1\\\\n\\\\2', text)\n", (1224, 1269), False, 'import re\n'), ((1293, 1340), 're.sub', 're.sub', (['"""(\\\\.{6})([^"’”」』])"""', '"""\\\\1\\\\n\\\\2"""', 'text'], {}), '(\'(\\\\.{6})([^"’”」』])\', \'\\\\1\\\\n\\\\2\', text)\n', (1299, 1340), False, 'import re\n'), ((1363, 1410), 're.sub', 're.sub', (['"""(\\\\…{2})([^"’”」』])"""', '"""\\\\1\\\\n\\\\2"""', 'text'], {}), '(\'(\\\\…{2})([^"’”」』])\', \'\\\\1\\\\n\\\\2\', text)\n', (1369, 1410), False, 'import re\n'), ((1433, 1504), 're.sub', 're.sub', (['"""([;;!?。!?\\\\?]["’”」』]{0,2})([^;;!?,。!?\\\\?])"""', '"""\\\\1\\\\n\\\\2"""', 'text'], {}), '(\'([;;!?。!?\\\\?]["’”」』]{0,2})([^;;!?,。!?\\\\?])\', \'\\\\1\\\\n\\\\2\', text)\n', (1439, 1504), False, 'import re\n'), ((513, 542), 're.sub', 're.sub', (['"""\\\\n{3,}"""', '"""\n"""', 'text'], {}), "('\\\\n{3,}', '\\n', text)\n", (519, 542), False, 'import re\n'), ((562, 586), 're.sub', 're.sub', (['"""\\\\s"""', '""" """', 'text'], {}), "('\\\\s', ' ', text)\n", (568, 586), False, 'import re\n'), ((1084, 1114), 're.sub', 're.sub', (['"""\\\\n{3,}"""', '"""\\\\n"""', 'text'], {}), "('\\\\n{3,}', '\\\\n', text)\n", (1090, 1114), False, 'import re\n'), ((1134, 1158), 're.sub', 're.sub', (['"""\\\\s"""', '""" """', 'text'], {}), "('\\\\s', ' ', text)\n", (1140, 1158), False, 'import re\n'), ((1177, 1201), 're.sub', 're.sub', (['"""\n\n"""', '""""""', 'text'], {}), "('\\n\\n', '', text)\n", (1183, 1201), False, 'import re\n'), ((1816, 1871), 're.sub', 're.sub', (['"""([,,.]["’”」』]{0,2})([^,,.])"""', '"""\\\\1\\\\n\\\\2"""', 'ele'], {}), '(\'([,,.]["’”」』]{0,2})([^,,.])\', \'\\\\1\\\\n\\\\2\', ele)\n', (1822, 1871), False, 'import re\n'), ((2049, 2119), 're.sub', 're.sub', (['"""([\\\\n]{1,}| {2,}["’”」』]{0,2})([^\\\\s])"""', '"""\\\\1\\\\n\\\\2"""', 'ele_ele1'], {}), '(\'([\\\\n]{1,}| {2,}["’”」』]{0,2})([^\\\\s])\', \'\\\\1\\\\n\\\\2\', ele_ele1)\n', (2055, 2119), False, 'import re\n'), ((2331, 2385), 're.sub', 're.sub', (['"""( ["’”」』]{0,2})([^ ])"""', '"""\\\\1\\\\n\\\\2"""', 'ele_ele2'], {}), '(\'( ["’”」』]{0,2})([^ ])\', \'\\\\1\\\\n\\\\2\', ele_ele2)\n', (2337, 2385), False, 'import re\n')]
import os import uuid from typing import Any, Dict, List, Optional, Tuple from langchain.agents.agent import RunnableAgent from langchain.agents.tools import tool as LangChainTool from langchain.memory import ConversationSummaryMemory from langchain.tools.render import render_text_description from langchain_core.agents import AgentAction from langchain_core.callbacks import BaseCallbackHandler from langchain_openai import ChatOpenAI from pydantic import ( UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator, ) from pydantic_core import PydanticCustomError from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler from crewai.utilities import I18N, Logger, Prompts, RPMController from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess class Agent(BaseModel): """Represents an agent in a system. Each agent has a role, a goal, a backstory, and an optional language model (llm). The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents. Attributes: agent_executor: An instance of the CrewAgentExecutor class. role: The role of the agent. goal: The objective of the agent. backstory: The backstory of the agent. config: Dict representation of agent configuration. llm: The language model that will run the agent. function_calling_llm: The language model that will the tool calling for this agent, it overrides the crew function_calling_llm. max_iter: Maximum number of iterations for an agent to execute a task. memory: Whether the agent should have memory or not. max_rpm: Maximum number of requests per minute for the agent execution to be respected. verbose: Whether the agent execution should be in verbose mode. allow_delegation: Whether the agent is allowed to delegate tasks to other agents. tools: Tools at agents disposal step_callback: Callback to be executed after each step of the agent execution. callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process """ __hash__ = object.__hash__ # type: ignore _logger: Logger = PrivateAttr() _rpm_controller: RPMController = PrivateAttr(default=None) _request_within_rpm_limit: Any = PrivateAttr(default=None) _token_process: TokenProcess = TokenProcess() formatting_errors: int = 0 model_config = ConfigDict(arbitrary_types_allowed=True) id: UUID4 = Field( default_factory=uuid.uuid4, frozen=True, description="Unique identifier for the object, not set by user.", ) role: str = Field(description="Role of the agent") goal: str = Field(description="Objective of the agent") backstory: str = Field(description="Backstory of the agent") config: Optional[Dict[str, Any]] = Field( description="Configuration for the agent", default=None, ) max_rpm: Optional[int] = Field( default=None, description="Maximum number of requests per minute for the agent execution to be respected.", ) memory: bool = Field( default=False, description="Whether the agent should have memory or not" ) verbose: bool = Field( default=False, description="Verbose mode for the Agent Execution" ) allow_delegation: bool = Field( default=True, description="Allow delegation of tasks to agents" ) tools: Optional[List[Any]] = Field( default_factory=list, description="Tools at agents disposal" ) max_iter: Optional[int] = Field( default=15, description="Maximum iterations for an agent to execute a task" ) agent_executor: InstanceOf[CrewAgentExecutor] = Field( default=None, description="An instance of the CrewAgentExecutor class." ) tools_handler: InstanceOf[ToolsHandler] = Field( default=None, description="An instance of the ToolsHandler class." ) cache_handler: InstanceOf[CacheHandler] = Field( default=CacheHandler(), description="An instance of the CacheHandler class." ) step_callback: Optional[Any] = Field( default=None, description="Callback to be executed after each step of the agent execution.", ) i18n: I18N = Field(default=I18N(), description="Internationalization settings.") llm: Any = Field( default_factory=lambda: ChatOpenAI( model=os.environ.get("OPENAI_MODEL_NAME", "gpt-4") ), description="Language model that will run the agent.", ) function_calling_llm: Optional[Any] = Field( description="Language model that will run the agent.", default=None ) callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field( default=None, description="Callback to be executed" ) def __init__(__pydantic_self__, **data): config = data.pop("config", {}) super().__init__(**config, **data) @field_validator("id", mode="before") @classmethod def _deny_user_set_id(cls, v: Optional[UUID4]) -> None: if v: raise PydanticCustomError( "may_not_set_field", "This field is not to be set by the user.", {} ) @model_validator(mode="after") def set_attributes_based_on_config(self) -> "Agent": """Set attributes based on the agent configuration.""" if self.config: for key, value in self.config.items(): setattr(self, key, value) return self @model_validator(mode="after") def set_private_attrs(self): """Set private attributes.""" self._logger = Logger(self.verbose) if self.max_rpm and not self._rpm_controller: self._rpm_controller = RPMController( max_rpm=self.max_rpm, logger=self._logger ) return self @model_validator(mode="after") def set_agent_executor(self) -> "Agent": """set agent executor is set.""" if hasattr(self.llm, "model_name"): self.llm.callbacks = [ TokenCalcHandler(self.llm.model_name, self._token_process) ] if not self.agent_executor: self.set_cache_handler(self.cache_handler) return self def execute_task( self, task: Any, context: Optional[str] = None, tools: Optional[List[Any]] = None, ) -> str: """Execute a task with the agent. Args: task: Task to execute. context: Context to execute the task in. tools: Tools to use for the task. Returns: Output of the agent """ self.tools_handler.last_used_tool = {} task_prompt = task.prompt() if context: task_prompt = self.i18n.slice("task_with_context").format( task=task_prompt, context=context ) tools = self._parse_tools(tools or self.tools) self.create_agent_executor(tools=tools) self.agent_executor.tools = tools self.agent_executor.task = task self.agent_executor.tools_description = render_text_description(tools) self.agent_executor.tools_names = self.__tools_names(tools) result = self.agent_executor.invoke( { "input": task_prompt, "tool_names": self.agent_executor.tools_names, "tools": self.agent_executor.tools_description, } )["output"] if self.max_rpm: self._rpm_controller.stop_rpm_counter() return result def set_cache_handler(self, cache_handler: CacheHandler) -> None: """Set the cache handler for the agent. Args: cache_handler: An instance of the CacheHandler class. """ self.cache_handler = cache_handler self.tools_handler = ToolsHandler(cache=self.cache_handler) self.create_agent_executor() def set_rpm_controller(self, rpm_controller: RPMController) -> None: """Set the rpm controller for the agent. Args: rpm_controller: An instance of the RPMController class. """ if not self._rpm_controller: self._rpm_controller = rpm_controller self.create_agent_executor() def create_agent_executor(self, tools=None) -> None: """Create an agent executor for the agent. Returns: An instance of the CrewAgentExecutor class. """ tools = tools or self.tools agent_args = { "input": lambda x: x["input"], "tools": lambda x: x["tools"], "tool_names": lambda x: x["tool_names"], "agent_scratchpad": lambda x: self.format_log_to_str( x["intermediate_steps"] ), } executor_args = { "llm": self.llm, "i18n": self.i18n, "tools": self._parse_tools(tools), "verbose": self.verbose, "handle_parsing_errors": True, "max_iterations": self.max_iter, "step_callback": self.step_callback, "tools_handler": self.tools_handler, "function_calling_llm": self.function_calling_llm, "callbacks": self.callbacks, } if self._rpm_controller: executor_args[ "request_within_rpm_limit" ] = self._rpm_controller.check_or_wait if self.memory: summary_memory = ConversationSummaryMemory( llm=self.llm, input_key="input", memory_key="chat_history" ) executor_args["memory"] = summary_memory agent_args["chat_history"] = lambda x: x["chat_history"] prompt = Prompts(i18n=self.i18n, tools=tools).task_execution_with_memory() else: prompt = Prompts(i18n=self.i18n, tools=tools).task_execution() execution_prompt = prompt.partial( goal=self.goal, role=self.role, backstory=self.backstory, ) bind = self.llm.bind(stop=[self.i18n.slice("observation")]) inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self) self.agent_executor = CrewAgentExecutor( agent=RunnableAgent(runnable=inner_agent), **executor_args ) def interpolate_inputs(self, inputs: Dict[str, Any]) -> None: """Interpolate inputs into the agent description and backstory.""" if inputs: self.role = self.role.format(**inputs) self.goal = self.goal.format(**inputs) self.backstory = self.backstory.format(**inputs) def increment_formatting_errors(self) -> None: """Count the formatting errors of the agent.""" self.formatting_errors += 1 def format_log_to_str( self, intermediate_steps: List[Tuple[AgentAction, str]], observation_prefix: str = "Observation: ", llm_prefix: str = "", ) -> str: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}" return thoughts def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]: """Parse tools to be used for the task.""" # tentatively try to import from crewai_tools import BaseTool as CrewAITool tools_list = [] try: from crewai_tools import BaseTool as CrewAITool for tool in tools: if isinstance(tool, CrewAITool): tools_list.append(tool.to_langchain()) else: tools_list.append(tool) except ModuleNotFoundError: for tool in tools: tools_list.append(tool) return tools_list @staticmethod def __tools_names(tools) -> str: return ", ".join([t.name for t in tools]) def __repr__(self): return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
[ "langchain.tools.render.render_text_description", "langchain.agents.agent.RunnableAgent", "langchain.memory.ConversationSummaryMemory" ]
[((2392, 2405), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2403, 2405), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2443, 2468), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {'default': 'None'}), '(default=None)\n', (2454, 2468), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2506, 2531), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {'default': 'None'}), '(default=None)\n', (2517, 2531), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2567, 2581), 'crewai.utilities.token_counter_callback.TokenProcess', 'TokenProcess', ([], {}), '()\n', (2579, 2581), False, 'from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess\n'), ((2633, 2673), 'pydantic.ConfigDict', 'ConfigDict', ([], {'arbitrary_types_allowed': '(True)'}), '(arbitrary_types_allowed=True)\n', (2643, 2673), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2690, 2807), 'pydantic.Field', 'Field', ([], {'default_factory': 'uuid.uuid4', 'frozen': '(True)', 'description': '"""Unique identifier for the object, not set by user."""'}), "(default_factory=uuid.uuid4, frozen=True, description=\n 'Unique identifier for the object, not set by user.')\n", (2695, 2807), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2850, 2888), 'pydantic.Field', 'Field', ([], {'description': '"""Role of the agent"""'}), "(description='Role of the agent')\n", (2855, 2888), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2905, 2948), 'pydantic.Field', 'Field', ([], {'description': '"""Objective of the agent"""'}), "(description='Objective of the agent')\n", (2910, 2948), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2970, 3013), 'pydantic.Field', 'Field', ([], {'description': '"""Backstory of the agent"""'}), "(description='Backstory of the agent')\n", (2975, 3013), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3053, 3115), 'pydantic.Field', 'Field', ([], {'description': '"""Configuration for the agent"""', 'default': 'None'}), "(description='Configuration for the agent', default=None)\n", (3058, 3115), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3168, 3291), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Maximum number of requests per minute for the agent execution to be respected."""'}), "(default=None, description=\n 'Maximum number of requests per minute for the agent execution to be respected.'\n )\n", (3173, 3291), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3324, 3403), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the agent should have memory or not"""'}), "(default=False, description='Whether the agent should have memory or not')\n", (3329, 3403), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3438, 3510), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Verbose mode for the Agent Execution"""'}), "(default=False, description='Verbose mode for the Agent Execution')\n", (3443, 3510), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3554, 3624), 'pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Allow delegation of tasks to agents"""'}), "(default=True, description='Allow delegation of tasks to agents')\n", (3559, 3624), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3672, 3739), 'pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': '"""Tools at agents disposal"""'}), "(default_factory=list, description='Tools at agents disposal')\n", (3677, 3739), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3784, 3871), 'pydantic.Field', 'Field', ([], {'default': '(15)', 'description': '"""Maximum iterations for an agent to execute a task"""'}), "(default=15, description=\n 'Maximum iterations for an agent to execute a task')\n", (3789, 3871), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3933, 4011), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""An instance of the CrewAgentExecutor class."""'}), "(default=None, description='An instance of the CrewAgentExecutor class.')\n", (3938, 4011), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4072, 4145), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""An instance of the ToolsHandler class."""'}), "(default=None, description='An instance of the ToolsHandler class.')\n", (4077, 4145), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4339, 4442), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Callback to be executed after each step of the agent execution."""'}), "(default=None, description=\n 'Callback to be executed after each step of the agent execution.')\n", (4344, 4442), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4797, 4871), 'pydantic.Field', 'Field', ([], {'description': '"""Language model that will run the agent."""', 'default': 'None'}), "(description='Language model that will run the agent.', default=None)\n", (4802, 4871), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4951, 5009), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Callback to be executed"""'}), "(default=None, description='Callback to be executed')\n", (4956, 5009), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5159, 5195), 'pydantic.field_validator', 'field_validator', (['"""id"""'], {'mode': '"""before"""'}), "('id', mode='before')\n", (5174, 5195), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5430, 5459), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (5445, 5459), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5723, 5752), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (5738, 5752), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((6070, 6099), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (6085, 6099), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5847, 5867), 'crewai.utilities.Logger', 'Logger', (['self.verbose'], {}), '(self.verbose)\n', (5853, 5867), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((7345, 7375), 'langchain.tools.render.render_text_description', 'render_text_description', (['tools'], {}), '(tools)\n', (7368, 7375), False, 'from langchain.tools.render import render_text_description\n'), ((8088, 8126), 'crewai.agents.ToolsHandler', 'ToolsHandler', ([], {'cache': 'self.cache_handler'}), '(cache=self.cache_handler)\n', (8100, 8126), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((4229, 4243), 'crewai.agents.CacheHandler', 'CacheHandler', ([], {}), '()\n', (4241, 4243), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((4492, 4498), 'crewai.utilities.I18N', 'I18N', ([], {}), '()\n', (4496, 4498), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((5305, 5397), 'pydantic_core.PydanticCustomError', 'PydanticCustomError', (['"""may_not_set_field"""', '"""This field is not to be set by the user."""', '{}'], {}), "('may_not_set_field',\n 'This field is not to be set by the user.', {})\n", (5324, 5397), False, 'from pydantic_core import PydanticCustomError\n'), ((5957, 6013), 'crewai.utilities.RPMController', 'RPMController', ([], {'max_rpm': 'self.max_rpm', 'logger': 'self._logger'}), '(max_rpm=self.max_rpm, logger=self._logger)\n', (5970, 6013), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((9715, 9805), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {'llm': 'self.llm', 'input_key': '"""input"""', 'memory_key': '"""chat_history"""'}), "(llm=self.llm, input_key='input', memory_key=\n 'chat_history')\n", (9740, 9805), False, 'from langchain.memory import ConversationSummaryMemory\n'), ((10407, 10434), 'crewai.agents.CrewAgentParser', 'CrewAgentParser', ([], {'agent': 'self'}), '(agent=self)\n', (10422, 10434), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((6281, 6339), 'crewai.utilities.token_counter_callback.TokenCalcHandler', 'TokenCalcHandler', (['self.llm.model_name', 'self._token_process'], {}), '(self.llm.model_name, self._token_process)\n', (6297, 6339), False, 'from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess\n'), ((10502, 10537), 'langchain.agents.agent.RunnableAgent', 'RunnableAgent', ([], {'runnable': 'inner_agent'}), '(runnable=inner_agent)\n', (10515, 10537), False, 'from langchain.agents.agent import RunnableAgent\n'), ((9974, 10010), 'crewai.utilities.Prompts', 'Prompts', ([], {'i18n': 'self.i18n', 'tools': 'tools'}), '(i18n=self.i18n, tools=tools)\n', (9981, 10010), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((10075, 10111), 'crewai.utilities.Prompts', 'Prompts', ([], {'i18n': 'self.i18n', 'tools': 'tools'}), '(i18n=self.i18n, tools=tools)\n', (10082, 10111), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((4630, 4674), 'os.environ.get', 'os.environ.get', (['"""OPENAI_MODEL_NAME"""', '"""gpt-4"""'], {}), "('OPENAI_MODEL_NAME', 'gpt-4')\n", (4644, 4674), False, 'import os\n')]
import re from typing import Union from langchain.agents.mrkl.output_parser import MRKLOutputParser from langchain.schema import AgentAction, AgentFinish, OutputParserException FORMAT_INSTRUCTIONS0 = """Use the following format and be sure to use new lines after each task. Question: the input question you must answer Thought: you should always think about what to do Action: Exactly only one word out of: {tool_names} Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question""" FORMAT_INSTRUCTIONS = """List of tools, use exactly one word when choosing Action: {tool_names} Only user asks a question, not you. For example user might ask: What is the latest news? Here is an example sequence you can follow: Thought: I should search online for the latest news. Action: Search Action Input: What is the latest news? Observation: X is going away. Z is again happening. Thought: That is interesting, I should search for more information about X and Z and also search about Q. Action: Search Action Input: How is X impacting things. Why is Z happening again, and what are the consequences? Observation: X is causing Y. Z may be caused by P and will lead to H. Thought: I now know the final answer Final Answer: The latest news is: * X is going away, and this is caused by Y. * Z is happening again, and the cause is P and will lead to H. Overall, X and Z are important problems. """ FORMAT_INSTRUCTIONS_PYTHON = """List of tools, use exactly one word when choosing Action: {tool_names} Only user asks a question, not you. For example user might ask: How many rows are in the dataset? Here is an example sequence you can follow. You can repeat Thoughts, but as soon as possible you should try to answer the original user question. Once you an answer the user question, just say: Thought: I now know the final answer Thought: I should use python_repl_ast tool. Action: python_repl_ast Action Input: df.shape Observation: (25, 10) Thought: I now know the final answer Final Answer: There are 25 rows in the dataset. """ FINAL_ANSWER_ACTION = "Final Answer:" MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = ( "Invalid Format: Missing 'Action:' after 'Thought:" ) MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = ( "Invalid Format: Missing 'Action Input:' after 'Action:'" ) FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = ( "Parsing LLM output produced both a final answer and a parse-able action:" ) class H2OMRKLOutputParser(MRKLOutputParser): """MRKL Output parser for the chat agent.""" def get_format_instructions(self) -> str: return FORMAT_INSTRUCTIONS def parse(self, text: str) -> Union[AgentAction, AgentFinish]: includes_answer = FINAL_ANSWER_ACTION in text regex = ( r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" ) action_match = re.search(regex, text, re.DOTALL) if includes_answer: return AgentFinish( {"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text ) elif action_match: action = action_match.group(1).strip() action_input = action_match.group(2) tool_input = action_input.strip(" ") # ensure if its a well formed SQL query we don't remove any trailing " chars if tool_input.startswith("SELECT ") is False: tool_input = tool_input.strip('"') return AgentAction(action, tool_input, text) if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL): raise OutputParserException( f"Could not parse LLM output: `{text}`", observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, llm_output=text, send_to_llm=True, ) elif not re.search( r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL ): raise OutputParserException( f"Could not parse LLM output: `{text}`", observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, llm_output=text, send_to_llm=True, ) else: raise OutputParserException(f"Could not parse LLM output: `{text}`") @property def _type(self) -> str: return "mrkl" class H2OPythonMRKLOutputParser(H2OMRKLOutputParser): def get_format_instructions(self) -> str: return FORMAT_INSTRUCTIONS_PYTHON
[ "langchain.schema.AgentAction", "langchain.schema.OutputParserException" ]
[((3055, 3088), 're.search', 're.search', (['regex', 'text', 're.DOTALL'], {}), '(regex, text, re.DOTALL)\n', (3064, 3088), False, 'import re\n'), ((3689, 3749), 're.search', 're.search', (['"""Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)"""', 'text', 're.DOTALL'], {}), "('Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)', text, re.DOTALL)\n", (3698, 3749), False, 'import re\n'), ((3766, 3928), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {'observation': 'MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE', 'llm_output': 'text', 'send_to_llm': '(True)'}), "(f'Could not parse LLM output: `{text}`', observation=\n MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, llm_output=text,\n send_to_llm=True)\n", (3787, 3928), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((3635, 3672), 'langchain.schema.AgentAction', 'AgentAction', (['action', 'tool_input', 'text'], {}), '(action, tool_input, text)\n', (3646, 3672), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((4016, 4103), 're.search', 're.search', (['"""[\\\\s]*Action\\\\s*\\\\d*\\\\s*Input\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*)"""', 'text', 're.DOTALL'], {}), "('[\\\\s]*Action\\\\s*\\\\d*\\\\s*Input\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*)', text, re.\n DOTALL)\n", (4025, 4103), False, 'import re\n'), ((4133, 4300), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {'observation': 'MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE', 'llm_output': 'text', 'send_to_llm': '(True)'}), "(f'Could not parse LLM output: `{text}`', observation=\n MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, llm_output=text,\n send_to_llm=True)\n", (4154, 4300), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((4403, 4465), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {}), "(f'Could not parse LLM output: `{text}`')\n", (4424, 4465), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n')]
import os import re import uuid import cv2 import torch import requests import io, base64 import numpy as np import gradio as gr from PIL import Image from omegaconf import OmegaConf from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation from langchain.agents.initialize import initialize_agent from langchain.agents.tools import Tool from langchain.chains.conversation.memory import ConversationBufferMemory from langchain.llms.openai import OpenAI VISUAL_CHATGPT_PREFIX = """Visual ChatGPT is designed to be able to assist with a wide range of text and visual related tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. Visual ChatGPT is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. Visual ChatGPT is able to process and understand large amounts of text and images. As a language model, Visual ChatGPT can not directly read images, but it has a list of tools to finish different visual tasks. Each image will have a file name formed as "image/xxx.png", and Visual ChatGPT can invoke different tools to indirectly understand pictures. When talking about images, Visual ChatGPT is very strict to the file name and will never fabricate nonexistent files. When using tools to generate new image files, Visual ChatGPT is also known that the image may not be the same as the user's demand, and will use other visual question answering tools or description tools to observe the real image. Visual ChatGPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the image content and image file name. It will remember to provide the file name from the last tool observation, if a new image is generated. Human may provide new figures to Visual ChatGPT with a description. The description helps Visual ChatGPT to understand this image, but Visual ChatGPT should use tools to finish following tasks, rather than directly imagine from the description. Overall, Visual ChatGPT is a powerful visual dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. TOOLS: ------ Visual ChatGPT has access to the following tools:""" VISUAL_CHATGPT_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format: ``` Thought: Do I need to use a tool? Yes Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ``` When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format: ``` Thought: Do I need to use a tool? No {ai_prefix}: [your response here] ``` """ VISUAL_CHATGPT_SUFFIX = """You are very strict to the filename correctness and will never fake a file name if it does not exist. You will remember to provide the image file name loyally if it's provided in the last tool observation. Begin! Previous conversation history: {chat_history} New input: {input} Since Visual ChatGPT is a text language model, Visual ChatGPT must use tools to observe images rather than imagination. The thoughts and observations are only visible for Visual ChatGPT, Visual ChatGPT should remember to repeat important information in the final response for Human. Thought: Do I need to use a tool? {agent_scratchpad}""" ENDPOINT = "http://localhost:7860" T2IAPI = ENDPOINT + "/controlnet/txt2img" DETECTAPI = ENDPOINT + "/controlnet/detect" MODELLIST = ENDPOINT + "/controlnet/model_list" device = "cpu" if torch.cuda.is_available(): device = "cuda" def readImage(path): img = cv2.imread(path) retval, buffer = cv2.imencode('.jpg', img) b64img = base64.b64encode(buffer).decode("utf-8") return b64img def get_model(pattern='^control_canny.*'): r = requests.get(MODELLIST) result = r.json()["model_list"] for item in result: if re.match(pattern, item): return item def do_webui_request(url=T2IAPI, **kwargs): reqbody = { "prompt": "best quality, extremely detailed", "negative_prompt": "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality", "seed": -1, "subseed": -1, "subseed_strength": 0, "batch_size": 1, "n_iter": 1, "steps": 15, "cfg_scale": 7, "width": 512, "height": 768, "restore_faces": True, "eta": 0, "sampler_index": "Euler a", "controlnet_input_images": [], "controlnet_module": 'canny', "controlnet_model": 'control_canny-fp16 [e3fe7712]', "controlnet_guidance": 1.0, } reqbody.update(kwargs) r = requests.post(url, json=reqbody) return r.json() def cut_dialogue_history(history_memory, keep_last_n_words=500): tokens = history_memory.split() n_tokens = len(tokens) print(f"hitory_memory:{history_memory}, n_tokens: {n_tokens}") if n_tokens < keep_last_n_words: return history_memory else: paragraphs = history_memory.split('\n') last_n_tokens = n_tokens while last_n_tokens >= keep_last_n_words: last_n_tokens = last_n_tokens - len(paragraphs[0].split(' ')) paragraphs = paragraphs[1:] return '\n' + '\n'.join(paragraphs) def get_new_image_name(org_img_name, func_name="update"): head_tail = os.path.split(org_img_name) head = head_tail[0] tail = head_tail[1] name_split = tail.split('.')[0].split('_') this_new_uuid = str(uuid.uuid4())[0:4] if len(name_split) == 1: most_org_file_name = name_split[0] recent_prev_file_name = name_split[0] new_file_name = '{}_{}_{}_{}.png'.format(this_new_uuid, func_name, recent_prev_file_name, most_org_file_name) else: assert len(name_split) == 4 most_org_file_name = name_split[3] recent_prev_file_name = name_split[0] new_file_name = '{}_{}_{}_{}.png'.format(this_new_uuid, func_name, recent_prev_file_name, most_org_file_name) return os.path.join(head, new_file_name) class MaskFormer: def __init__(self, device): self.device = device self.processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") self.model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined").to(device) def inference(self, image_path, text): threshold = 0.5 min_area = 0.02 padding = 20 original_image = Image.open(image_path) image = original_image.resize((512, 512)) inputs = self.processor(text=text, images=image, padding="max_length", return_tensors="pt",).to(self.device) with torch.no_grad(): outputs = self.model(**inputs) mask = torch.sigmoid(outputs[0]).squeeze().cpu().numpy() > threshold area_ratio = len(np.argwhere(mask)) / (mask.shape[0] * mask.shape[1]) if area_ratio < min_area: return None true_indices = np.argwhere(mask) mask_array = np.zeros_like(mask, dtype=bool) for idx in true_indices: padded_slice = tuple(slice(max(0, i - padding), i + padding + 1) for i in idx) mask_array[padded_slice] = True visual_mask = (mask_array * 255).astype(np.uint8) image_mask = Image.fromarray(visual_mask) return image_mask.resize(image.size) # class ImageEditing: # def __init__(self, device): # print("Initializing StableDiffusionInpaint to %s" % device) # self.device = device # self.mask_former = MaskFormer(device=self.device) # # self.inpainting = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting",).to(device) # def remove_part_of_image(self, input): # image_path, to_be_removed_txt = input.split(",") # print(f'remove_part_of_image: to_be_removed {to_be_removed_txt}') # return self.replace_part_of_image(f"{image_path},{to_be_removed_txt},background") # def replace_part_of_image(self, input): # image_path, to_be_replaced_txt, replace_with_txt = input.split(",") # print(f'replace_part_of_image: replace_with_txt {replace_with_txt}') # mask_image = self.mask_former.inference(image_path, to_be_replaced_txt) # buffered = io.BytesIO() # mask_image.save(buffered, format="JPEG") # resp = do_webui_request( # url=ENDPOINT + "/sdapi/v1/img2img", # init_images=[readImage(image_path)], # mask=base64.b64encode(buffered.getvalue()).decode("utf-8"), # prompt=replace_with_txt, # ) # updated_image_path = get_new_image_name(image_path, func_name="replace-something") # with open(updated_image_path, 'wb') as f: # f.write(base64.b64decode(resp['images'][0])) # return updated_image_path # class Pix2Pix: # def __init__(self, device): # print("Initializing Pix2Pix to %s" % device) # self.device = device # self.pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained("timbrooks/instruct-pix2pix", torch_dtype=torch.float16, safety_checker=None).to(device) # self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config) # def inference(self, inputs): # """Change style of image.""" # print("===>Starting Pix2Pix Inference") # image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) # original_image = Image.open(image_path) # image = self.pipe(instruct_text,image=original_image,num_inference_steps=40,image_guidance_scale=1.2,).images[0] # updated_image_path = get_new_image_name(image_path, func_name="pix2pix") # image.save(updated_image_path) # return updated_image_path class T2I: def __init__(self, device): print("Initializing T2I to %s" % device) self.device = device self.text_refine_tokenizer = AutoTokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion") self.text_refine_model = AutoModelForCausalLM.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion") self.text_refine_gpt2_pipe = pipeline("text-generation", model=self.text_refine_model, tokenizer=self.text_refine_tokenizer, device=self.device) def inference(self, text): image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png") refined_text = self.text_refine_gpt2_pipe(text)[0]["generated_text"] print(f'{text} refined to {refined_text}') resp = do_webui_request( url=ENDPOINT + "/sdapi/v1/txt2img", prompt=refined_text, ) with open(image_filename, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) print(f"Processed T2I.run, text: {text}, image_filename: {image_filename}") return image_filename class ImageCaptioning: def __init__(self, device): print("Initializing ImageCaptioning to %s" % device) self.device = device self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(self.device) def inference(self, image_path): inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device) out = self.model.generate(**inputs) captions = self.processor.decode(out[0], skip_special_tokens=True) return captions class image2canny: def inference(self, inputs): print("===>Starting image2canny Inference") resp = do_webui_request( url=DETECTAPI, controlnet_input_images=[readImage(inputs)], controlnet_module="segmentation", ) updated_image_path = get_new_image_name(inputs, func_name="edge") image.save(updated_image_path) return updated_image_path class canny2image: def inference(self, inputs): print("===>Starting canny2image Inference") image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) resp = do_webui_request( prompt=instruct_text, controlnet_input_images=[readImage(image_path)], controlnet_module="none", controlnet_model=get_model(pattern='^control_canny.*'), ) updated_image_path = get_new_image_name(image_path, func_name="canny2image") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class image2line: def inference(self, inputs): print("===>Starting image2hough Inference") resp = do_webui_request( url=DETECTAPI, controlnet_input_images=[readImage(inputs)], controlnet_module="mlsd", ) updated_image_path = get_new_image_name(inputs, func_name="line-of") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class line2image: def inference(self, inputs): print("===>Starting line2image Inference") image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) resp = do_webui_request( prompt=instruct_text, controlnet_input_images=[readImage(image_path)], controlnet_module="none", controlnet_model=get_model(pattern='^control_mlsd.*'), ) updated_image_path = get_new_image_name(image_path, func_name="line2image") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class image2hed: def inference(self, inputs): print("===>Starting image2hed Inference") resp = do_webui_request( url=DETECTAPI, controlnet_input_images=[readImage(inputs)], controlnet_module="hed", ) updated_image_path = get_new_image_name(inputs, func_name="hed-boundary") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class hed2image: def inference(self, inputs): print("===>Starting hed2image Inference") resp = do_webui_request( prompt=instruct_text, controlnet_input_images=[readImage(image_path)], controlnet_module="none", controlnet_model=get_model(pattern='^control_hed.*'), ) updated_image_path = get_new_image_name(image_path, func_name="hed2image") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class image2scribble: def inference(self, inputs): print("===>Starting image2scribble Inference") resp = do_webui_request( url=DETECTAPI, controlnet_input_images=[readImage(inputs)], controlnet_module="scribble", ) updated_image_path = get_new_image_name(inputs, func_name="scribble") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class scribble2image: def inference(self, inputs): print("===>Starting seg2image Inference") image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) resp = do_webui_request( prompt=instruct_text, controlnet_input_images=[readImage(image_path)], controlnet_module="none", controlnet_model=get_model(pattern='^control_scribble.*'), ) updated_image_path = get_new_image_name(image_path, func_name="scribble2image") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class image2pose: def inference(self, inputs): print("===>Starting image2pose Inference") resp = do_webui_request( url=DETECTAPI, controlnet_input_images=[readImage(inputs)], controlnet_module="openpose", ) updated_image_path = get_new_image_name(inputs, func_name="human-pose") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class pose2image: def inference(self, inputs): print("===>Starting pose2image Inference") image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) resp = do_webui_request( prompt=instruct_text, controlnet_input_images=[readImage(image_path)], controlnet_module="none", controlnet_model=get_model(pattern='^control_openpose.*'), ) updated_image_path = get_new_image_name(image_path, func_name="pose2image") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class image2seg: def inference(self, inputs): print("===>Starting image2seg Inference") resp = do_webui_request( url=DETECTAPI, controlnet_input_images=[readImage(inputs)], controlnet_module="segmentation", ) updated_image_path = get_new_image_name(inputs, func_name="segmentation") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class seg2image: def inference(self, inputs): print("===>Starting seg2image Inference") image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) resp = do_webui_request( prompt=instruct_text, controlnet_input_images=[readImage(image_path)], controlnet_module="none", controlnet_model=get_model(pattern='^control_seg.*'), ) updated_image_path = get_new_image_name(image_path, func_name="segment2image") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class image2depth: def inference(self, inputs): print("===>Starting image2depth Inference") resp = do_webui_request( url=DETECTAPI, controlnet_input_images=[readImage(inputs)], controlnet_module="depth", ) updated_image_path = get_new_image_name(inputs, func_name="depth") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class depth2image: def inference(self, inputs): print("===>Starting depth2image Inference") image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) resp = do_webui_request( prompt=instruct_text, controlnet_input_images=[readImage(image_path)], controlnet_module="depth", controlnet_model=get_model(pattern='^control_depth.*'), ) updated_image_path = get_new_image_name(image_path, func_name="depth2image") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class image2normal: def inference(self, inputs): print("===>Starting image2 normal Inference") resp = do_webui_request( url=DETECTAPI, controlnet_input_images=[readImage(inputs)], controlnet_module="normal", ) updated_image_path = get_new_image_name(inputs, func_name="normal-map") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class normal2image: def inference(self, inputs): print("===>Starting normal2image Inference") image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) resp = do_webui_request( prompt=instruct_text, controlnet_input_images=[readImage(image_path)], controlnet_module="normal", controlnet_model=get_model(pattern='^control_normal.*'), ) updated_image_path = get_new_image_name(image_path, func_name="normal2image") with open(updated_image_path, 'wb') as f: f.write(base64.b64decode(resp['images'][0])) return updated_image_path class BLIPVQA: def __init__(self, device): print("Initializing BLIP VQA to %s" % device) self.device = device self.processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base") self.model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base").to(self.device) def get_answer_from_question_and_image(self, inputs): image_path, question = inputs.split(",") raw_image = Image.open(image_path).convert('RGB') print(F'BLIPVQA :question :{question}') inputs = self.processor(raw_image, question, return_tensors="pt").to(self.device) out = self.model.generate(**inputs) answer = self.processor.decode(out[0], skip_special_tokens=True) return answer class ConversationBot: def __init__(self): print("Initializing VisualChatGPT") # self.edit = ImageEditing(device=device) self.i2t = ImageCaptioning(device=device) self.t2i = T2I(device=device) self.image2canny = image2canny() self.canny2image = canny2image() self.image2line = image2line() self.line2image = line2image() self.image2hed = image2hed() self.hed2image = hed2image() self.image2scribble = image2scribble() self.scribble2image = scribble2image() self.image2pose = image2pose() self.pose2image = pose2image() self.BLIPVQA = BLIPVQA(device=device) self.image2seg = image2seg() self.seg2image = seg2image() self.image2depth = image2depth() self.depth2image = depth2image() self.image2normal = image2normal() self.normal2image = normal2image() # self.pix2pix = Pix2Pix(device="cuda:3") self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output') self.tools = [ Tool(name="Get Photo Description", func=self.i2t.inference, description="useful when you want to know what is inside the photo. receives image_path as input. " "The input to this tool should be a string, representing the image_path. "), Tool(name="Generate Image From User Input Text", func=self.t2i.inference, description="useful when you want to generate an image from a user input text and save it to a file. like: generate an image of an object or something, or generate an image that includes some objects. " "The input to this tool should be a string, representing the text used to generate image. "), # Tool(name="Remove Something From The Photo", func=self.edit.remove_part_of_image, # description="useful when you want to remove and object or something from the photo from its description or location. " # "The input to this tool should be a comma seperated string of two, representing the image_path and the object need to be removed. "), # Tool(name="Replace Something From The Photo", func=self.edit.replace_part_of_image, # description="useful when you want to replace an object from the object description or location with another object from its description. " # "The input to this tool should be a comma seperated string of three, representing the image_path, the object to be replaced, the object to be replaced with "), # Tool(name="Instruct Image Using Text", func=self.pix2pix.inference, # description="useful when you want to the style of the image to be like the text. like: make it look like a painting. or make it like a robot. " # "The input to this tool should be a comma seperated string of two, representing the image_path and the text. "), Tool(name="Answer Question About The Image", func=self.BLIPVQA.get_answer_from_question_and_image, description="useful when you need an answer for a question based on an image. like: what is the background color of the last image, how many cats in this figure, what is in this figure. " "The input to this tool should be a comma seperated string of two, representing the image_path and the question"), Tool(name="Edge Detection On Image", func=self.image2canny.inference, description="useful when you want to detect the edge of the image. like: detect the edges of this image, or canny detection on image, or peform edge detection on this image, or detect the canny image of this image. " "The input to this tool should be a string, representing the image_path"), Tool(name="Generate Image Condition On Canny Image", func=self.canny2image.inference, description="useful when you want to generate a new real image from both the user desciption and a canny image. like: generate a real image of a object or something from this canny image, or generate a new real image of a object or something from this edge image. " "The input to this tool should be a comma seperated string of two, representing the image_path and the user description. "), Tool(name="Line Detection On Image", func=self.image2line.inference, description="useful when you want to detect the straight line of the image. like: detect the straight lines of this image, or straight line detection on image, or peform straight line detection on this image, or detect the straight line image of this image. " "The input to this tool should be a string, representing the image_path"), Tool(name="Generate Image Condition On Line Image", func=self.line2image.inference, description="useful when you want to generate a new real image from both the user desciption and a straight line image. like: generate a real image of a object or something from this straight line image, or generate a new real image of a object or something from this straight lines. " "The input to this tool should be a comma seperated string of two, representing the image_path and the user description. "), Tool(name="Hed Detection On Image", func=self.image2hed.inference, description="useful when you want to detect the soft hed boundary of the image. like: detect the soft hed boundary of this image, or hed boundary detection on image, or peform hed boundary detection on this image, or detect soft hed boundary image of this image. " "The input to this tool should be a string, representing the image_path"), Tool(name="Generate Image Condition On Soft Hed Boundary Image", func=self.hed2image.inference, description="useful when you want to generate a new real image from both the user desciption and a soft hed boundary image. like: generate a real image of a object or something from this soft hed boundary image, or generate a new real image of a object or something from this hed boundary. " "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"), Tool(name="Segmentation On Image", func=self.image2seg.inference, description="useful when you want to detect segmentations of the image. like: segment this image, or generate segmentations on this image, or peform segmentation on this image. " "The input to this tool should be a string, representing the image_path"), Tool(name="Generate Image Condition On Segmentations", func=self.seg2image.inference, description="useful when you want to generate a new real image from both the user desciption and segmentations. like: generate a real image of a object or something from this segmentation image, or generate a new real image of a object or something from these segmentations. " "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"), Tool(name="Predict Depth On Image", func=self.image2depth.inference, description="useful when you want to detect depth of the image. like: generate the depth from this image, or detect the depth map on this image, or predict the depth for this image. " "The input to this tool should be a string, representing the image_path"), Tool(name="Generate Image Condition On Depth", func=self.depth2image.inference, description="useful when you want to generate a new real image from both the user desciption and depth image. like: generate a real image of a object or something from this depth image, or generate a new real image of a object or something from the depth map. " "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"), Tool(name="Predict Normal Map On Image", func=self.image2normal.inference, description="useful when you want to detect norm map of the image. like: generate normal map from this image, or predict normal map of this image. " "The input to this tool should be a string, representing the image_path"), Tool(name="Generate Image Condition On Normal Map", func=self.normal2image.inference, description="useful when you want to generate a new real image from both the user desciption and normal map. like: generate a real image of a object or something from this normal map, or generate a new real image of a object or something from the normal map. " "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"), Tool(name="Sketch Detection On Image", func=self.image2scribble.inference, description="useful when you want to generate a scribble of the image. like: generate a scribble of this image, or generate a sketch from this image, detect the sketch from this image. " "The input to this tool should be a string, representing the image_path"), Tool(name="Generate Image Condition On Sketch Image", func=self.scribble2image.inference, description="useful when you want to generate a new real image from both the user desciption and a scribble image or a sketch image. " "The input to this tool should be a comma seperated string of two, representing the image_path and the user description"), Tool(name="Pose Detection On Image", func=self.image2pose.inference, description="useful when you want to detect the human pose of the image. like: generate human poses of this image, or generate a pose image from this image. " "The input to this tool should be a string, representing the image_path"), Tool(name="Generate Image Condition On Pose Image", func=self.pose2image.inference, description="useful when you want to generate a new real image from both the user desciption and a human pose image. like: generate a real image of a human from this human pose image, or generate a new real image of a human from this pose. " "The input to this tool should be a comma seperated string of two, representing the image_path and the user description")] def init_langchain(self, openai_api_key): self.llm = OpenAI(temperature=0, openai_api_key=openai_api_key) self.agent = initialize_agent( self.tools, self.llm, agent="conversational-react-description", verbose=True, memory=self.memory, return_intermediate_steps=True, agent_kwargs={'prefix': VISUAL_CHATGPT_PREFIX, 'format_instructions': VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': VISUAL_CHATGPT_SUFFIX} ) def run_text(self, openai_api_key, text, state): if not hasattr(self, "agent"): self.init_langchain(openai_api_key) print("===============Running run_text =============") print("Inputs:", text, state) print("======>Previous memory:\n %s" % self.agent.memory) self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500) res = self.agent({"input": text}) print("======>Current memory:\n %s" % self.agent.memory) response = re.sub('(image/\S*png)', lambda m: f'![](/file={m.group(0)})*{m.group(0)}*', res['output']) state = state + [(text, response)] print("Outputs:", state) return state, state def run_image(self, openai_api_key, image, state, txt): if not hasattr(self, "agent"): self.init_langchain(openai_api_key) print("===============Running run_image =============") print("Inputs:", image, state) print("======>Previous memory:\n %s" % self.agent.memory) image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png") print("======>Auto Resize Image...") img = Image.open(image.name) width, height = img.size ratio = min(512 / width, 512 / height) width_new, height_new = (round(width * ratio), round(height * ratio)) img = img.resize((width_new, height_new)) img = img.convert('RGB') img.save(image_filename, "PNG") print(f"Resize image form {width}x{height} to {width_new}x{height_new}") description = self.i2t.inference(image_filename) Human_prompt = "\nHuman: provide a figure named {}. The description is: {}. This information helps you to understand this image, but you should use tools to finish following tasks, " \ "rather than directly imagine from my description. If you understand, say \"Received\". \n".format(image_filename, description) AI_prompt = "Received. " self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt print("======>Current memory:\n %s" % self.agent.memory) state = state + [(f"![](/file={image_filename})*{image_filename}*", AI_prompt)] print("Outputs:", state) return state, state, txt + ' ' + image_filename + ' ' if __name__ == '__main__': os.makedirs("image/", exist_ok=True) bot = ConversationBot() with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo: openai_api_key = gr.Textbox(type="password", label="Enter your OpenAI API key here") chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT") state = gr.State([]) with gr.Row(): with gr.Column(scale=0.7): txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False) with gr.Column(scale=0.15, min_width=0): clear = gr.Button("Clear️") with gr.Column(scale=0.15, min_width=0): btn = gr.UploadButton("Upload", file_types=["image"]) txt.submit(bot.run_text, [openai_api_key, txt, state], [chatbot, state]) txt.submit(lambda: "", None, txt) btn.upload(bot.run_image, [openai_api_key, btn, state, txt], [chatbot, state, txt]) clear.click(bot.memory.clear) clear.click(lambda: [], None, chatbot) clear.click(lambda: [], None, state) demo.launch(server_name="0.0.0.0", server_port=7864)
[ "langchain.llms.openai.OpenAI", "langchain.chains.conversation.memory.ConversationBufferMemory", "langchain.agents.initialize.initialize_agent", "langchain.agents.tools.Tool" ]
[((3812, 3837), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3835, 3837), False, 'import torch\n'), ((3891, 3907), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (3901, 3907), False, 'import cv2\n'), ((3929, 3954), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'img'], {}), "('.jpg', img)\n", (3941, 3954), False, 'import cv2\n'), ((4079, 4102), 'requests.get', 'requests.get', (['MODELLIST'], {}), '(MODELLIST)\n', (4091, 4102), False, 'import requests\n'), ((5000, 5032), 'requests.post', 'requests.post', (['url'], {'json': 'reqbody'}), '(url, json=reqbody)\n', (5013, 5032), False, 'import requests\n'), ((5695, 5722), 'os.path.split', 'os.path.split', (['org_img_name'], {}), '(org_img_name)\n', (5708, 5722), False, 'import os\n'), ((6361, 6394), 'os.path.join', 'os.path.join', (['head', 'new_file_name'], {}), '(head, new_file_name)\n', (6373, 6394), False, 'import os\n'), ((35612, 35648), 'os.makedirs', 'os.makedirs', (['"""image/"""'], {'exist_ok': '(True)'}), "('image/', exist_ok=True)\n", (35623, 35648), False, 'import os\n'), ((4174, 4197), 're.match', 're.match', (['pattern', 'item'], {}), '(pattern, item)\n', (4182, 4197), False, 'import re\n'), ((6500, 6562), 'transformers.CLIPSegProcessor.from_pretrained', 'CLIPSegProcessor.from_pretrained', (['"""CIDAS/clipseg-rd64-refined"""'], {}), "('CIDAS/clipseg-rd64-refined')\n", (6532, 6562), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation\n'), ((6807, 6829), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (6817, 6829), False, 'from PIL import Image\n'), ((7306, 7323), 'numpy.argwhere', 'np.argwhere', (['mask'], {}), '(mask)\n', (7317, 7323), True, 'import numpy as np\n'), ((7345, 7376), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {'dtype': 'bool'}), '(mask, dtype=bool)\n', (7358, 7376), True, 'import numpy as np\n'), ((7624, 7652), 'PIL.Image.fromarray', 'Image.fromarray', (['visual_mask'], {}), '(visual_mask)\n', (7639, 7652), False, 'from PIL import Image\n'), ((10312, 10384), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""Gustavosta/MagicPrompt-Stable-Diffusion"""'], {}), "('Gustavosta/MagicPrompt-Stable-Diffusion')\n", (10341, 10384), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation\n'), ((10418, 10497), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['"""Gustavosta/MagicPrompt-Stable-Diffusion"""'], {}), "('Gustavosta/MagicPrompt-Stable-Diffusion')\n", (10454, 10497), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation\n'), ((10535, 10655), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'self.text_refine_model', 'tokenizer': 'self.text_refine_tokenizer', 'device': 'self.device'}), "('text-generation', model=self.text_refine_model, tokenizer=self.\n text_refine_tokenizer, device=self.device)\n", (10543, 10655), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((11412, 11482), 'transformers.BlipProcessor.from_pretrained', 'BlipProcessor.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "('Salesforce/blip-image-captioning-base')\n", (11441, 11482), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((21185, 21242), 'transformers.BlipProcessor.from_pretrained', 'BlipProcessor.from_pretrained', (['"""Salesforce/blip-vqa-base"""'], {}), "('Salesforce/blip-vqa-base')\n", (21214, 21242), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((22789, 22861), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(memory_key='chat_history', output_key='output')\n", (22813, 22861), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((32769, 32821), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'openai_api_key': 'openai_api_key'}), '(temperature=0, openai_api_key=openai_api_key)\n', (32775, 32821), False, 'from langchain.llms.openai import OpenAI\n'), ((32843, 33146), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': VISUAL_CHATGPT_PREFIX, 'format_instructions':\n VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': VISUAL_CHATGPT_SUFFIX}"}), "(self.tools, self.llm, agent=\n 'conversational-react-description', verbose=True, memory=self.memory,\n return_intermediate_steps=True, agent_kwargs={'prefix':\n VISUAL_CHATGPT_PREFIX, 'format_instructions':\n VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': VISUAL_CHATGPT_SUFFIX})\n", (32859, 33146), False, 'from langchain.agents.initialize import initialize_agent\n'), ((34415, 34437), 'PIL.Image.open', 'Image.open', (['image.name'], {}), '(image.name)\n', (34425, 34437), False, 'from PIL import Image\n'), ((35686, 35742), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#chatbot .overflow-y-auto{height:500px}"""'}), "(css='#chatbot .overflow-y-auto{height:500px}')\n", (35695, 35742), True, 'import gradio as gr\n'), ((35777, 35844), 'gradio.Textbox', 'gr.Textbox', ([], {'type': '"""password"""', 'label': '"""Enter your OpenAI API key here"""'}), "(type='password', label='Enter your OpenAI API key here')\n", (35787, 35844), True, 'import gradio as gr\n'), ((35870, 35923), 'gradio.Chatbot', 'gr.Chatbot', ([], {'elem_id': '"""chatbot"""', 'label': '"""Visual ChatGPT"""'}), "(elem_id='chatbot', label='Visual ChatGPT')\n", (35880, 35923), True, 'import gradio as gr\n'), ((35940, 35952), 'gradio.State', 'gr.State', (['[]'], {}), '([])\n', (35948, 35952), True, 'import gradio as gr\n'), ((3968, 3992), 'base64.b64encode', 'base64.b64encode', (['buffer'], {}), '(buffer)\n', (3984, 3992), False, 'import io, base64\n'), ((5842, 5854), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5852, 5854), False, 'import uuid\n'), ((7010, 7025), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7023, 7025), False, 'import torch\n'), ((22897, 23139), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Get Photo Description"""', 'func': 'self.i2t.inference', 'description': '"""useful when you want to know what is inside the photo. receives image_path as input. The input to this tool should be a string, representing the image_path. """'}), "(name='Get Photo Description', func=self.i2t.inference, description=\n 'useful when you want to know what is inside the photo. receives image_path as input. The input to this tool should be a string, representing the image_path. '\n )\n", (22901, 23139), False, 'from langchain.agents.tools import Tool\n'), ((23192, 23572), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image From User Input Text"""', 'func': 'self.t2i.inference', 'description': '"""useful when you want to generate an image from a user input text and save it to a file. like: generate an image of an object or something, or generate an image that includes some objects. The input to this tool should be a string, representing the text used to generate image. """'}), "(name='Generate Image From User Input Text', func=self.t2i.inference,\n description=\n 'useful when you want to generate an image from a user input text and save it to a file. like: generate an image of an object or something, or generate an image that includes some objects. The input to this tool should be a string, representing the text used to generate image. '\n )\n", (23196, 23572), False, 'from langchain.agents.tools import Tool\n'), ((24857, 25269), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Answer Question About The Image"""', 'func': 'self.BLIPVQA.get_answer_from_question_and_image', 'description': '"""useful when you need an answer for a question based on an image. like: what is the background color of the last image, how many cats in this figure, what is in this figure. The input to this tool should be a comma seperated string of two, representing the image_path and the question"""'}), "(name='Answer Question About The Image', func=self.BLIPVQA.\n get_answer_from_question_and_image, description=\n 'useful when you need an answer for a question based on an image. like: what is the background color of the last image, how many cats in this figure, what is in this figure. The input to this tool should be a comma seperated string of two, representing the image_path and the question'\n )\n", (24861, 25269), False, 'from langchain.agents.tools import Tool\n'), ((25317, 25688), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Edge Detection On Image"""', 'func': 'self.image2canny.inference', 'description': '"""useful when you want to detect the edge of the image. like: detect the edges of this image, or canny detection on image, or peform edge detection on this image, or detect the canny image of this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Edge Detection On Image', func=self.image2canny.inference,\n description=\n 'useful when you want to detect the edge of the image. like: detect the edges of this image, or canny detection on image, or peform edge detection on this image, or detect the canny image of this image. The input to this tool should be a string, representing the image_path'\n )\n", (25321, 25688), False, 'from langchain.agents.tools import Tool\n'), ((25737, 26224), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Canny Image"""', 'func': 'self.canny2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a canny image. like: generate a real image of a object or something from this canny image, or generate a new real image of a object or something from this edge image. The input to this tool should be a comma seperated string of two, representing the image_path and the user description. """'}), "(name='Generate Image Condition On Canny Image', func=self.canny2image.\n inference, description=\n 'useful when you want to generate a new real image from both the user desciption and a canny image. like: generate a real image of a object or something from this canny image, or generate a new real image of a object or something from this edge image. The input to this tool should be a comma seperated string of two, representing the image_path and the user description. '\n )\n", (25741, 26224), False, 'from langchain.agents.tools import Tool\n'), ((26272, 26685), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Line Detection On Image"""', 'func': 'self.image2line.inference', 'description': '"""useful when you want to detect the straight line of the image. like: detect the straight lines of this image, or straight line detection on image, or peform straight line detection on this image, or detect the straight line image of this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Line Detection On Image', func=self.image2line.inference,\n description=\n 'useful when you want to detect the straight line of the image. like: detect the straight lines of this image, or straight line detection on image, or peform straight line detection on this image, or detect the straight line image of this image. The input to this tool should be a string, representing the image_path'\n )\n", (26276, 26685), False, 'from langchain.agents.tools import Tool\n'), ((26734, 27239), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Line Image"""', 'func': 'self.line2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a straight line image. like: generate a real image of a object or something from this straight line image, or generate a new real image of a object or something from this straight lines. The input to this tool should be a comma seperated string of two, representing the image_path and the user description. """'}), "(name='Generate Image Condition On Line Image', func=self.line2image.\n inference, description=\n 'useful when you want to generate a new real image from both the user desciption and a straight line image. like: generate a real image of a object or something from this straight line image, or generate a new real image of a object or something from this straight lines. The input to this tool should be a comma seperated string of two, representing the image_path and the user description. '\n )\n", (26738, 27239), False, 'from langchain.agents.tools import Tool\n'), ((27287, 27703), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Hed Detection On Image"""', 'func': 'self.image2hed.inference', 'description': '"""useful when you want to detect the soft hed boundary of the image. like: detect the soft hed boundary of this image, or hed boundary detection on image, or peform hed boundary detection on this image, or detect soft hed boundary image of this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Hed Detection On Image', func=self.image2hed.inference,\n description=\n 'useful when you want to detect the soft hed boundary of the image. like: detect the soft hed boundary of this image, or hed boundary detection on image, or peform hed boundary detection on this image, or detect soft hed boundary image of this image. The input to this tool should be a string, representing the image_path'\n )\n", (27291, 27703), False, 'from langchain.agents.tools import Tool\n'), ((27752, 28273), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Soft Hed Boundary Image"""', 'func': 'self.hed2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a soft hed boundary image. like: generate a real image of a object or something from this soft hed boundary image, or generate a new real image of a object or something from this hed boundary. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Soft Hed Boundary Image', func=self.\n hed2image.inference, description=\n 'useful when you want to generate a new real image from both the user desciption and a soft hed boundary image. like: generate a real image of a object or something from this soft hed boundary image, or generate a new real image of a object or something from this hed boundary. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n )\n", (27756, 28273), False, 'from langchain.agents.tools import Tool\n'), ((28321, 28650), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Segmentation On Image"""', 'func': 'self.image2seg.inference', 'description': '"""useful when you want to detect segmentations of the image. like: segment this image, or generate segmentations on this image, or peform segmentation on this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Segmentation On Image', func=self.image2seg.inference,\n description=\n 'useful when you want to detect segmentations of the image. like: segment this image, or generate segmentations on this image, or peform segmentation on this image. The input to this tool should be a string, representing the image_path'\n )\n", (28325, 28650), False, 'from langchain.agents.tools import Tool\n'), ((28699, 29195), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Segmentations"""', 'func': 'self.seg2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and segmentations. like: generate a real image of a object or something from this segmentation image, or generate a new real image of a object or something from these segmentations. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Segmentations', func=self.seg2image.\n inference, description=\n 'useful when you want to generate a new real image from both the user desciption and segmentations. like: generate a real image of a object or something from this segmentation image, or generate a new real image of a object or something from these segmentations. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n )\n", (28703, 29195), False, 'from langchain.agents.tools import Tool\n'), ((29243, 29580), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Predict Depth On Image"""', 'func': 'self.image2depth.inference', 'description': '"""useful when you want to detect depth of the image. like: generate the depth from this image, or detect the depth map on this image, or predict the depth for this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Predict Depth On Image', func=self.image2depth.inference,\n description=\n 'useful when you want to detect depth of the image. like: generate the depth from this image, or detect the depth map on this image, or predict the depth for this image. The input to this tool should be a string, representing the image_path'\n )\n", (29247, 29580), False, 'from langchain.agents.tools import Tool\n'), ((29629, 30104), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Depth"""', 'func': 'self.depth2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and depth image. like: generate a real image of a object or something from this depth image, or generate a new real image of a object or something from the depth map. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Depth', func=self.depth2image.\n inference, description=\n 'useful when you want to generate a new real image from both the user desciption and depth image. like: generate a real image of a object or something from this depth image, or generate a new real image of a object or something from the depth map. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n )\n", (29633, 30104), False, 'from langchain.agents.tools import Tool\n'), ((30153, 30461), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Predict Normal Map On Image"""', 'func': 'self.image2normal.inference', 'description': '"""useful when you want to detect norm map of the image. like: generate normal map from this image, or predict normal map of this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Predict Normal Map On Image', func=self.image2normal.inference,\n description=\n 'useful when you want to detect norm map of the image. like: generate normal map from this image, or predict normal map of this image. The input to this tool should be a string, representing the image_path'\n )\n", (30157, 30461), False, 'from langchain.agents.tools import Tool\n'), ((30510, 30990), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Normal Map"""', 'func': 'self.normal2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and normal map. like: generate a real image of a object or something from this normal map, or generate a new real image of a object or something from the normal map. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Normal Map', func=self.normal2image.\n inference, description=\n 'useful when you want to generate a new real image from both the user desciption and normal map. like: generate a real image of a object or something from this normal map, or generate a new real image of a object or something from the normal map. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n )\n", (30514, 30990), False, 'from langchain.agents.tools import Tool\n'), ((31038, 31384), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Sketch Detection On Image"""', 'func': 'self.image2scribble.inference', 'description': '"""useful when you want to generate a scribble of the image. like: generate a scribble of this image, or generate a sketch from this image, detect the sketch from this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Sketch Detection On Image', func=self.image2scribble.inference,\n description=\n 'useful when you want to generate a scribble of the image. like: generate a scribble of this image, or generate a sketch from this image, detect the sketch from this image. The input to this tool should be a string, representing the image_path'\n )\n", (31042, 31384), False, 'from langchain.agents.tools import Tool\n'), ((31433, 31791), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Sketch Image"""', 'func': 'self.scribble2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a scribble image or a sketch image. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Sketch Image', func=self.\n scribble2image.inference, description=\n 'useful when you want to generate a new real image from both the user desciption and a scribble image or a sketch image. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n )\n", (31437, 31791), False, 'from langchain.agents.tools import Tool\n'), ((31839, 32151), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Pose Detection On Image"""', 'func': 'self.image2pose.inference', 'description': '"""useful when you want to detect the human pose of the image. like: generate human poses of this image, or generate a pose image from this image. The input to this tool should be a string, representing the image_path"""'}), "(name='Pose Detection On Image', func=self.image2pose.inference,\n description=\n 'useful when you want to detect the human pose of the image. like: generate human poses of this image, or generate a pose image from this image. The input to this tool should be a string, representing the image_path'\n )\n", (31843, 32151), False, 'from langchain.agents.tools import Tool\n'), ((32200, 32659), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image Condition On Pose Image"""', 'func': 'self.pose2image.inference', 'description': '"""useful when you want to generate a new real image from both the user desciption and a human pose image. like: generate a real image of a human from this human pose image, or generate a new real image of a human from this pose. The input to this tool should be a comma seperated string of two, representing the image_path and the user description"""'}), "(name='Generate Image Condition On Pose Image', func=self.pose2image.\n inference, description=\n 'useful when you want to generate a new real image from both the user desciption and a human pose image. like: generate a real image of a human from this human pose image, or generate a new real image of a human from this pose. The input to this tool should be a comma seperated string of two, representing the image_path and the user description'\n )\n", (32204, 32659), False, 'from langchain.agents.tools import Tool\n'), ((35966, 35974), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (35972, 35974), True, 'import gradio as gr\n'), ((6584, 6657), 'transformers.CLIPSegForImageSegmentation.from_pretrained', 'CLIPSegForImageSegmentation.from_pretrained', (['"""CIDAS/clipseg-rd64-refined"""'], {}), "('CIDAS/clipseg-rd64-refined')\n", (6627, 6657), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation\n'), ((7172, 7189), 'numpy.argwhere', 'np.argwhere', (['mask'], {}), '(mask)\n', (7183, 7189), True, 'import numpy as np\n'), ((11089, 11124), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (11105, 11124), False, 'import io, base64\n'), ((11504, 11594), 'transformers.BlipForConditionalGeneration.from_pretrained', 'BlipForConditionalGeneration.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "(\n 'Salesforce/blip-image-captioning-base')\n", (11548, 11594), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((12911, 12946), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (12927, 12946), False, 'import io, base64\n'), ((13400, 13435), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (13416, 13435), False, 'import io, base64\n'), ((14063, 14098), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (14079, 14098), False, 'import io, base64\n'), ((14553, 14588), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (14569, 14588), False, 'import io, base64\n'), ((15122, 15157), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (15138, 15157), False, 'import io, base64\n'), ((15623, 15658), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (15639, 15658), False, 'import io, base64\n'), ((16297, 16332), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (16313, 16332), False, 'import io, base64\n'), ((16800, 16835), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (16816, 16835), False, 'import io, base64\n'), ((17467, 17502), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (17483, 17502), False, 'import io, base64\n'), ((17966, 18001), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (17982, 18001), False, 'import io, base64\n'), ((18629, 18664), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (18645, 18664), False, 'import io, base64\n'), ((19118, 19153), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (19134, 19153), False, 'import io, base64\n'), ((19786, 19821), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (19802, 19821), False, 'import io, base64\n'), ((20284, 20319), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (20300, 20319), False, 'import io, base64\n'), ((20957, 20992), 'base64.b64decode', 'base64.b64decode', (["resp['images'][0]"], {}), "(resp['images'][0])\n", (20973, 20992), False, 'import io, base64\n'), ((21264, 21332), 'transformers.BlipForQuestionAnswering.from_pretrained', 'BlipForQuestionAnswering.from_pretrained', (['"""Salesforce/blip-vqa-base"""'], {}), "('Salesforce/blip-vqa-base')\n", (21304, 21332), False, 'from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering\n'), ((21477, 21499), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (21487, 21499), False, 'from PIL import Image\n'), ((35993, 36013), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (36002, 36013), True, 'import gradio as gr\n'), ((36168, 36202), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (36177, 36202), True, 'import gradio as gr\n'), ((36228, 36247), 'gradio.Button', 'gr.Button', (['"""Clear️"""'], {}), "('Clear️')\n", (36237, 36247), True, 'import gradio as gr\n'), ((36265, 36299), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (36274, 36299), True, 'import gradio as gr\n'), ((36323, 36370), 'gradio.UploadButton', 'gr.UploadButton', (['"""Upload"""'], {'file_types': "['image']"}), "('Upload', file_types=['image'])\n", (36338, 36370), True, 'import gradio as gr\n'), ((11676, 11698), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (11686, 11698), False, 'from PIL import Image\n'), ((10742, 10754), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10752, 10754), False, 'import uuid\n'), ((34327, 34339), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (34337, 34339), False, 'import uuid\n'), ((36037, 36132), 'gradio.Textbox', 'gr.Textbox', ([], {'show_label': '(False)', 'placeholder': '"""Enter text and press enter, or upload an image"""'}), "(show_label=False, placeholder=\n 'Enter text and press enter, or upload an image')\n", (36047, 36132), True, 'import gradio as gr\n'), ((7085, 7110), 'torch.sigmoid', 'torch.sigmoid', (['outputs[0]'], {}), '(outputs[0])\n', (7098, 7110), False, 'import torch\n')]
from typing import Any, Callable, Dict, TypeVar from langchain import BasePromptTemplate, LLMChain from langchain.chat_models.base import BaseChatModel from langchain.schema import BaseOutputParser, OutputParserException from openai.error import ( AuthenticationError, InvalidRequestError, RateLimitError, ServiceUnavailableError, ) from reworkd_platform.schemas.agent import ModelSettings from reworkd_platform.web.api.errors import OpenAIError T = TypeVar("T") def parse_with_handling(parser: BaseOutputParser[T], completion: str) -> T: try: return parser.parse(completion) except OutputParserException as e: raise OpenAIError( e, "There was an issue parsing the response from the AI model." ) async def openai_error_handler( func: Callable[..., Any], *args: Any, settings: ModelSettings, **kwargs: Any ) -> Any: try: return await func(*args, **kwargs) except ServiceUnavailableError as e: raise OpenAIError( e, "OpenAI is experiencing issues. Visit " "https://status.openai.com/ for more info.", should_log=not settings.custom_api_key, ) except InvalidRequestError as e: if e.user_message.startswith("The model:"): raise OpenAIError( e, f"Your API key does not have access to your current model. Please use a different model.", should_log=not settings.custom_api_key, ) raise OpenAIError(e, e.user_message) except AuthenticationError as e: raise OpenAIError( e, "Authentication error: Ensure a valid API key is being used.", should_log=not settings.custom_api_key, ) except RateLimitError as e: if e.user_message.startswith("You exceeded your current quota"): raise OpenAIError( e, f"Your API key exceeded your current quota, please check your plan and billing details.", should_log=not settings.custom_api_key, ) raise OpenAIError(e, e.user_message) except Exception as e: raise OpenAIError( e, "There was an unexpected issue getting a response from the AI model." ) async def call_model_with_handling( model: BaseChatModel, prompt: BasePromptTemplate, args: Dict[str, str], settings: ModelSettings, **kwargs: Any, ) -> str: chain = LLMChain(llm=model, prompt=prompt) return await openai_error_handler(chain.arun, args, settings=settings, **kwargs)
[ "langchain.LLMChain" ]
[((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BasePromptTemplate, LLMChain\n'), ((662, 738), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an issue parsing the response from the AI model."""'], {}), "(e, 'There was an issue parsing the response from the AI model.')\n", (673, 738), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((993, 1138), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""OpenAI is experiencing issues. Visit https://status.openai.com/ for more info."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n 'OpenAI is experiencing issues. Visit https://status.openai.com/ for more info.'\n , should_log=not settings.custom_api_key)\n", (1004, 1138), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1522, 1552), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (1533, 1552), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1604, 1729), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""Authentication error: Ensure a valid API key is being used."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n 'Authentication error: Ensure a valid API key is being used.',\n should_log=not settings.custom_api_key)\n", (1615, 1729), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2114, 2144), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (2125, 2144), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2186, 2275), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an unexpected issue getting a response from the AI model."""'], {}), "(e,\n 'There was an unexpected issue getting a response from the AI model.')\n", (2197, 2275), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1299, 1453), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key does not have access to your current model. Please use a different model."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n f'Your API key does not have access to your current model. Please use a different model.'\n , should_log=not settings.custom_api_key)\n", (1310, 1453), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1892, 2045), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key exceeded your current quota, please check your plan and billing details."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n f'Your API key exceeded your current quota, please check your plan and billing details.'\n , should_log=not settings.custom_api_key)\n", (1903, 2045), False, 'from reworkd_platform.web.api.errors import OpenAIError\n')]
from typing import Any, Callable, Dict, TypeVar from langchain import BasePromptTemplate, LLMChain from langchain.chat_models.base import BaseChatModel from langchain.schema import BaseOutputParser, OutputParserException from openai.error import ( AuthenticationError, InvalidRequestError, RateLimitError, ServiceUnavailableError, ) from reworkd_platform.schemas.agent import ModelSettings from reworkd_platform.web.api.errors import OpenAIError T = TypeVar("T") def parse_with_handling(parser: BaseOutputParser[T], completion: str) -> T: try: return parser.parse(completion) except OutputParserException as e: raise OpenAIError( e, "There was an issue parsing the response from the AI model." ) async def openai_error_handler( func: Callable[..., Any], *args: Any, settings: ModelSettings, **kwargs: Any ) -> Any: try: return await func(*args, **kwargs) except ServiceUnavailableError as e: raise OpenAIError( e, "OpenAI is experiencing issues. Visit " "https://status.openai.com/ for more info.", should_log=not settings.custom_api_key, ) except InvalidRequestError as e: if e.user_message.startswith("The model:"): raise OpenAIError( e, f"Your API key does not have access to your current model. Please use a different model.", should_log=not settings.custom_api_key, ) raise OpenAIError(e, e.user_message) except AuthenticationError as e: raise OpenAIError( e, "Authentication error: Ensure a valid API key is being used.", should_log=not settings.custom_api_key, ) except RateLimitError as e: if e.user_message.startswith("You exceeded your current quota"): raise OpenAIError( e, f"Your API key exceeded your current quota, please check your plan and billing details.", should_log=not settings.custom_api_key, ) raise OpenAIError(e, e.user_message) except Exception as e: raise OpenAIError( e, "There was an unexpected issue getting a response from the AI model." ) async def call_model_with_handling( model: BaseChatModel, prompt: BasePromptTemplate, args: Dict[str, str], settings: ModelSettings, **kwargs: Any, ) -> str: chain = LLMChain(llm=model, prompt=prompt) return await openai_error_handler(chain.arun, args, settings=settings, **kwargs)
[ "langchain.LLMChain" ]
[((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BasePromptTemplate, LLMChain\n'), ((662, 738), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an issue parsing the response from the AI model."""'], {}), "(e, 'There was an issue parsing the response from the AI model.')\n", (673, 738), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((993, 1138), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""OpenAI is experiencing issues. Visit https://status.openai.com/ for more info."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n 'OpenAI is experiencing issues. Visit https://status.openai.com/ for more info.'\n , should_log=not settings.custom_api_key)\n", (1004, 1138), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1522, 1552), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (1533, 1552), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1604, 1729), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""Authentication error: Ensure a valid API key is being used."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n 'Authentication error: Ensure a valid API key is being used.',\n should_log=not settings.custom_api_key)\n", (1615, 1729), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2114, 2144), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (2125, 2144), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2186, 2275), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an unexpected issue getting a response from the AI model."""'], {}), "(e,\n 'There was an unexpected issue getting a response from the AI model.')\n", (2197, 2275), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1299, 1453), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key does not have access to your current model. Please use a different model."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n f'Your API key does not have access to your current model. Please use a different model.'\n , should_log=not settings.custom_api_key)\n", (1310, 1453), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1892, 2045), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key exceeded your current quota, please check your plan and billing details."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n f'Your API key exceeded your current quota, please check your plan and billing details.'\n , should_log=not settings.custom_api_key)\n", (1903, 2045), False, 'from reworkd_platform.web.api.errors import OpenAIError\n')]
from typing import Any, Callable, Dict, TypeVar from langchain import BasePromptTemplate, LLMChain from langchain.chat_models.base import BaseChatModel from langchain.schema import BaseOutputParser, OutputParserException from openai.error import ( AuthenticationError, InvalidRequestError, RateLimitError, ServiceUnavailableError, ) from reworkd_platform.schemas.agent import ModelSettings from reworkd_platform.web.api.errors import OpenAIError T = TypeVar("T") def parse_with_handling(parser: BaseOutputParser[T], completion: str) -> T: try: return parser.parse(completion) except OutputParserException as e: raise OpenAIError( e, "There was an issue parsing the response from the AI model." ) async def openai_error_handler( func: Callable[..., Any], *args: Any, settings: ModelSettings, **kwargs: Any ) -> Any: try: return await func(*args, **kwargs) except ServiceUnavailableError as e: raise OpenAIError( e, "OpenAI is experiencing issues. Visit " "https://status.openai.com/ for more info.", should_log=not settings.custom_api_key, ) except InvalidRequestError as e: if e.user_message.startswith("The model:"): raise OpenAIError( e, f"Your API key does not have access to your current model. Please use a different model.", should_log=not settings.custom_api_key, ) raise OpenAIError(e, e.user_message) except AuthenticationError as e: raise OpenAIError( e, "Authentication error: Ensure a valid API key is being used.", should_log=not settings.custom_api_key, ) except RateLimitError as e: if e.user_message.startswith("You exceeded your current quota"): raise OpenAIError( e, f"Your API key exceeded your current quota, please check your plan and billing details.", should_log=not settings.custom_api_key, ) raise OpenAIError(e, e.user_message) except Exception as e: raise OpenAIError( e, "There was an unexpected issue getting a response from the AI model." ) async def call_model_with_handling( model: BaseChatModel, prompt: BasePromptTemplate, args: Dict[str, str], settings: ModelSettings, **kwargs: Any, ) -> str: chain = LLMChain(llm=model, prompt=prompt) return await openai_error_handler(chain.arun, args, settings=settings, **kwargs)
[ "langchain.LLMChain" ]
[((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BasePromptTemplate, LLMChain\n'), ((662, 738), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an issue parsing the response from the AI model."""'], {}), "(e, 'There was an issue parsing the response from the AI model.')\n", (673, 738), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((993, 1138), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""OpenAI is experiencing issues. Visit https://status.openai.com/ for more info."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n 'OpenAI is experiencing issues. Visit https://status.openai.com/ for more info.'\n , should_log=not settings.custom_api_key)\n", (1004, 1138), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1522, 1552), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (1533, 1552), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1604, 1729), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""Authentication error: Ensure a valid API key is being used."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n 'Authentication error: Ensure a valid API key is being used.',\n should_log=not settings.custom_api_key)\n", (1615, 1729), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2114, 2144), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (2125, 2144), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2186, 2275), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an unexpected issue getting a response from the AI model."""'], {}), "(e,\n 'There was an unexpected issue getting a response from the AI model.')\n", (2197, 2275), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1299, 1453), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key does not have access to your current model. Please use a different model."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n f'Your API key does not have access to your current model. Please use a different model.'\n , should_log=not settings.custom_api_key)\n", (1310, 1453), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1892, 2045), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key exceeded your current quota, please check your plan and billing details."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n f'Your API key exceeded your current quota, please check your plan and billing details.'\n , should_log=not settings.custom_api_key)\n", (1903, 2045), False, 'from reworkd_platform.web.api.errors import OpenAIError\n')]
from typing import Any, Callable, Dict, TypeVar from langchain import BasePromptTemplate, LLMChain from langchain.chat_models.base import BaseChatModel from langchain.schema import BaseOutputParser, OutputParserException from openai.error import ( AuthenticationError, InvalidRequestError, RateLimitError, ServiceUnavailableError, ) from reworkd_platform.schemas.agent import ModelSettings from reworkd_platform.web.api.errors import OpenAIError T = TypeVar("T") def parse_with_handling(parser: BaseOutputParser[T], completion: str) -> T: try: return parser.parse(completion) except OutputParserException as e: raise OpenAIError( e, "There was an issue parsing the response from the AI model." ) async def openai_error_handler( func: Callable[..., Any], *args: Any, settings: ModelSettings, **kwargs: Any ) -> Any: try: return await func(*args, **kwargs) except ServiceUnavailableError as e: raise OpenAIError( e, "OpenAI is experiencing issues. Visit " "https://status.openai.com/ for more info.", should_log=not settings.custom_api_key, ) except InvalidRequestError as e: if e.user_message.startswith("The model:"): raise OpenAIError( e, f"Your API key does not have access to your current model. Please use a different model.", should_log=not settings.custom_api_key, ) raise OpenAIError(e, e.user_message) except AuthenticationError as e: raise OpenAIError( e, "Authentication error: Ensure a valid API key is being used.", should_log=not settings.custom_api_key, ) except RateLimitError as e: if e.user_message.startswith("You exceeded your current quota"): raise OpenAIError( e, f"Your API key exceeded your current quota, please check your plan and billing details.", should_log=not settings.custom_api_key, ) raise OpenAIError(e, e.user_message) except Exception as e: raise OpenAIError( e, "There was an unexpected issue getting a response from the AI model." ) async def call_model_with_handling( model: BaseChatModel, prompt: BasePromptTemplate, args: Dict[str, str], settings: ModelSettings, **kwargs: Any, ) -> str: chain = LLMChain(llm=model, prompt=prompt) return await openai_error_handler(chain.arun, args, settings=settings, **kwargs)
[ "langchain.LLMChain" ]
[((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BasePromptTemplate, LLMChain\n'), ((662, 738), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an issue parsing the response from the AI model."""'], {}), "(e, 'There was an issue parsing the response from the AI model.')\n", (673, 738), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((993, 1138), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""OpenAI is experiencing issues. Visit https://status.openai.com/ for more info."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n 'OpenAI is experiencing issues. Visit https://status.openai.com/ for more info.'\n , should_log=not settings.custom_api_key)\n", (1004, 1138), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1522, 1552), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (1533, 1552), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1604, 1729), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""Authentication error: Ensure a valid API key is being used."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n 'Authentication error: Ensure a valid API key is being used.',\n should_log=not settings.custom_api_key)\n", (1615, 1729), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2114, 2144), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (2125, 2144), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2186, 2275), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an unexpected issue getting a response from the AI model."""'], {}), "(e,\n 'There was an unexpected issue getting a response from the AI model.')\n", (2197, 2275), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1299, 1453), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key does not have access to your current model. Please use a different model."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n f'Your API key does not have access to your current model. Please use a different model.'\n , should_log=not settings.custom_api_key)\n", (1310, 1453), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1892, 2045), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key exceeded your current quota, please check your plan and billing details."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n f'Your API key exceeded your current quota, please check your plan and billing details.'\n , should_log=not settings.custom_api_key)\n", (1903, 2045), False, 'from reworkd_platform.web.api.errors import OpenAIError\n')]
import json import os.path import logging import time from langchain.vectorstores import FAISS from langchain import PromptTemplate from utils.references import References from utils.knowledge import Knowledge from utils.file_operations import make_archive, copy_templates from utils.tex_processing import create_copies from utils.gpt_interaction import GPTModel from utils.prompts import SYSTEM from utils.embeddings import EMBEDDINGS from utils.gpt_interaction import get_gpt_responses TOTAL_TOKENS = 0 TOTAL_PROMPTS_TOKENS = 0 TOTAL_COMPLETION_TOKENS = 0 def log_usage(usage, generating_target, print_out=True): global TOTAL_TOKENS global TOTAL_PROMPTS_TOKENS global TOTAL_COMPLETION_TOKENS prompts_tokens = usage['prompt_tokens'] completion_tokens = usage['completion_tokens'] total_tokens = usage['total_tokens'] TOTAL_TOKENS += total_tokens TOTAL_PROMPTS_TOKENS += prompts_tokens TOTAL_COMPLETION_TOKENS += completion_tokens message = f">>USAGE>> For generating {generating_target}, {total_tokens} tokens have been used " \ f"({prompts_tokens} for prompts; {completion_tokens} for completion). " \ f"{TOTAL_TOKENS} tokens have been used in total." if print_out: print(message) logging.info(message) def _generation_setup(title, template="Default", tldr=False, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048, # generating references knowledge_database=None, max_tokens_kd=2048, query_counts=10): llm = GPTModel(model="gpt-3.5-turbo-16k") bibtex_path, destination_folder = copy_templates(template, title) logging.basicConfig(level=logging.INFO, filename=os.path.join(destination_folder, "generation.log")) #generate key words keywords, usage = llm(systems=SYSTEM["keywords"], prompts=title, return_json=True) log_usage(usage, "keywords") keywords = {keyword: max_kw_refs for keyword in keywords} print("Keywords: \n", keywords) #generate references ref = References(title, bib_refs) ref.collect_papers(keywords, tldr=tldr) references = ref.to_prompts(max_tokens=max_tokens_ref) all_paper_ids = ref.to_bibtex(bibtex_path) #product domain knowledge prompts = f"Title: {title}" preliminaries_kw, _ = llm(systems=SYSTEM["preliminaries"], prompts=prompts) # check if the database exists or not db_path = f"utils/knowledge_databases/{knowledge_database}" db_config_path = os.path.join(db_path, "db_meta.json") db_index_path = os.path.join(db_path, "faiss_index") if os.path.isdir(db_path): try: with open(db_config_path, "r", encoding="utf-8") as f: db_config = json.load(f) model_name = db_config["embedding_model"] embeddings = EMBEDDINGS[model_name] db = FAISS.load_local(db_index_path, embeddings) knowledge = Knowledge(db=db) knowledge.collect_knowledge(preliminaries_kw, max_query=query_counts) domain_knowledge = knowledge.to_prompts(max_tokens_kd) except Exception as e: domain_knowledge='' prompts = f"Title: {title}" syetem_promot = "You are an assistant designed to propose necessary components of an survey papers. Your response should follow the JSON format." components, usage = llm(systems=syetem_promot, prompts=prompts, return_json=True) log_usage(usage, "media") print(f"The paper information has been initialized. References are saved to {bibtex_path}.") paper = {} paper["title"] = title paper["references"] = references paper["bibtex"] = bibtex_path paper["components"] = components paper["domain_knowledge"] = domain_knowledge return paper, destination_folder, all_paper_ids def section_generation(paper, section, save_to_path, model, research_field="machine learning"): """ The main pipeline of generating a section. 1. Generate prompts. 2. Get responses from AI assistant. 3. Extract the section text. 4. Save the text to .tex file. :return usage """ title = paper["title"] references = paper["references"] components = paper['components'] instruction = '- Discuss three to five main related fields to this paper. For each field, select five to ten key publications from references. For each reference, analyze its strengths and weaknesses in one or two sentences. Present the related works in a logical manner, often chronologically. Consider using a taxonomy or categorization to structure the discussion. Do not use \section{...} or \subsection{...}; use \paragraph{...} to list related fields.' fundamental_subprompt = "Your task is to write the {section} section of the paper with the title '{title}'. This paper has the following content: {components}\n" instruction_subprompt = "\n" \ "Your response should follow the following instructions:\n" \ "{instruction}\n" ref_instruction_subprompt = "- Read references. " \ "Every time you use information from the references, you need to appropriately cite it (using \citep or \citet)." \ "For example of \citep, the sentence where you use information from lei2022adaptive \citep{{lei2022adaptive}}. " \ "For example of \citet, \citet{{lei2022adaptive}} claims some information.\n" \ "- Avoid citing the same reference in a same paragraph.\n" \ "\n" \ "References:\n" \ "{references}" output_subprompt = "Ensure that it can be directly compiled by LeTaX." reivew_prompts = PromptTemplate( input_variables=["title", "components", "instruction", "section", "references"], template=fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt + output_subprompt) prompts = reivew_prompts.format(title=title, components=components, instruction=instruction, section=section, references=references) SECTION_GENERATION_SYSTEM = PromptTemplate(input_variables=["research_field"], template="You are an assistant designed to write academic papers in the field of {research_field} using LaTeX." ) output, usage = get_gpt_responses(SECTION_GENERATION_SYSTEM.format(research_field=research_field), prompts, model=model, temperature=0.4) output=output[25:] tex_file = os.path.join(save_to_path, f"{section}.tex") with open(tex_file, "w", encoding="utf-8") as f: f.write(output) use_md =True use_chinese = True if use_md: system_md = 'You are an translator between the LaTeX and .MD. here is a latex file where the content is: \n \n ' + output prompts_md = 'you should transfer the latex content to the .MD format seriously, and pay attention to the correctness of the citation format (use the number). you should directly output the new content without anyoter replay. you should add reference papers at the end of the paper, and add line breaks between two reference papers. The Title should be ' + paper['title'] output_md, usage_md = get_gpt_responses(system_md, prompts_md, model=model, temperature=0.4) md_file = os.path.join(save_to_path, f"{'survey'}.md") with open(md_file, "w", encoding="utf-8") as m: m.write(output_md) if use_chinese == True: system_md_chi = 'You are an translator between the english and chinese. here is a english file where the content is: \n \n ' + output prompts_md_chi = 'you should transfer the english to chinese and dont change anything others. you should directly output the new content without anyoter replay. you should keep the reference papers unchanged.' output_md_chi, usage_md_chi = get_gpt_responses(system_md_chi, prompts_md_chi, model=model, temperature=0.4) md_file_chi = os.path.join(save_to_path, f"{'survey_chinese'}.md") with open(md_file_chi, "w", encoding="utf-8") as c: c.write(output_md_chi) return usage def generate_draft(title, tldr=True, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048, knowledge_database=None, max_tokens_kd=2048, query_counts=10, section='related works', model="gpt-3.5-turbo-16k", template="Default" , save_zip=None): print("================START================") paper, destination_folder, _ = _generation_setup(title, template, tldr, max_kw_refs, bib_refs, max_tokens_ref=max_tokens_ref, max_tokens_kd=max_tokens_kd, query_counts=query_counts, knowledge_database=knowledge_database) # main components print(f"================PROCESSING================") usage = section_generation(paper, section, destination_folder, model=model) log_usage(usage, section) create_copies(destination_folder) print("\nPROCESSING COMPLETE\n") return make_archive(destination_folder, title+".zip") print("draft has been generated in " + destination_folder) if __name__ == "__main__": import openai openai.api_key = "your key" openai.api_base = 'https://api.openai.com/v1' #openai.proxy = "socks5h://localhost:7890 # if use the vpn target_title = "Reinforcement Learning for Robot Control" generate_draft(target_title, knowledge_database="ml_textbook_test",max_kw_refs=20)
[ "langchain.vectorstores.FAISS.load_local", "langchain.PromptTemplate" ]
[((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GPTModel\n'), ((1626, 1657), 'utils.file_operations.copy_templates', 'copy_templates', (['template', 'title'], {}), '(template, title)\n', (1640, 1657), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((2042, 2069), 'utils.references.References', 'References', (['title', 'bib_refs'], {}), '(title, bib_refs)\n', (2052, 2069), False, 'from utils.references import References\n'), ((5824, 6030), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['title', 'components', 'instruction', 'section', 'references']", 'template': '(fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt +\n output_subprompt)'}), "(input_variables=['title', 'components', 'instruction',\n 'section', 'references'], template=fundamental_subprompt +\n instruction_subprompt + ref_instruction_subprompt + output_subprompt)\n", (5838, 6030), False, 'from langchain import PromptTemplate\n'), ((6353, 6526), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['research_field']", 'template': '"""You are an assistant designed to write academic papers in the field of {research_field} using LaTeX."""'}), "(input_variables=['research_field'], template=\n 'You are an assistant designed to write academic papers in the field of {research_field} using LaTeX.'\n )\n", (6367, 6526), False, 'from langchain import PromptTemplate\n'), ((9472, 9505), 'utils.tex_processing.create_copies', 'create_copies', (['destination_folder'], {}), '(destination_folder)\n', (9485, 9505), False, 'from utils.tex_processing import create_copies\n'), ((9554, 9602), 'utils.file_operations.make_archive', 'make_archive', (['destination_folder', "(title + '.zip')"], {}), "(destination_folder, title + '.zip')\n", (9566, 9602), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((7503, 7573), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md', 'prompts_md'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md, prompts_md, model=model, temperature=0.4)\n', (7520, 7573), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2856, 2899), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['db_index_path', 'embeddings'], {}), '(db_index_path, embeddings)\n', (2872, 2899), False, 'from langchain.vectorstores import FAISS\n'), ((2924, 2940), 'utils.knowledge.Knowledge', 'Knowledge', ([], {'db': 'db'}), '(db=db)\n', (2933, 2940), False, 'from utils.knowledge import Knowledge\n'), ((8211, 8289), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md_chi', 'prompts_md_chi'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md_chi, prompts_md_chi, model=model, temperature=0.4)\n', (8228, 8289), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2724, 2736), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2733, 2736), False, 'import json\n')]
import json import os.path import logging import time from langchain.vectorstores import FAISS from langchain import PromptTemplate from utils.references import References from utils.knowledge import Knowledge from utils.file_operations import make_archive, copy_templates from utils.tex_processing import create_copies from utils.gpt_interaction import GPTModel from utils.prompts import SYSTEM from utils.embeddings import EMBEDDINGS from utils.gpt_interaction import get_gpt_responses TOTAL_TOKENS = 0 TOTAL_PROMPTS_TOKENS = 0 TOTAL_COMPLETION_TOKENS = 0 def log_usage(usage, generating_target, print_out=True): global TOTAL_TOKENS global TOTAL_PROMPTS_TOKENS global TOTAL_COMPLETION_TOKENS prompts_tokens = usage['prompt_tokens'] completion_tokens = usage['completion_tokens'] total_tokens = usage['total_tokens'] TOTAL_TOKENS += total_tokens TOTAL_PROMPTS_TOKENS += prompts_tokens TOTAL_COMPLETION_TOKENS += completion_tokens message = f">>USAGE>> For generating {generating_target}, {total_tokens} tokens have been used " \ f"({prompts_tokens} for prompts; {completion_tokens} for completion). " \ f"{TOTAL_TOKENS} tokens have been used in total." if print_out: print(message) logging.info(message) def _generation_setup(title, template="Default", tldr=False, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048, # generating references knowledge_database=None, max_tokens_kd=2048, query_counts=10): llm = GPTModel(model="gpt-3.5-turbo-16k") bibtex_path, destination_folder = copy_templates(template, title) logging.basicConfig(level=logging.INFO, filename=os.path.join(destination_folder, "generation.log")) #generate key words keywords, usage = llm(systems=SYSTEM["keywords"], prompts=title, return_json=True) log_usage(usage, "keywords") keywords = {keyword: max_kw_refs for keyword in keywords} print("Keywords: \n", keywords) #generate references ref = References(title, bib_refs) ref.collect_papers(keywords, tldr=tldr) references = ref.to_prompts(max_tokens=max_tokens_ref) all_paper_ids = ref.to_bibtex(bibtex_path) #product domain knowledge prompts = f"Title: {title}" preliminaries_kw, _ = llm(systems=SYSTEM["preliminaries"], prompts=prompts) # check if the database exists or not db_path = f"utils/knowledge_databases/{knowledge_database}" db_config_path = os.path.join(db_path, "db_meta.json") db_index_path = os.path.join(db_path, "faiss_index") if os.path.isdir(db_path): try: with open(db_config_path, "r", encoding="utf-8") as f: db_config = json.load(f) model_name = db_config["embedding_model"] embeddings = EMBEDDINGS[model_name] db = FAISS.load_local(db_index_path, embeddings) knowledge = Knowledge(db=db) knowledge.collect_knowledge(preliminaries_kw, max_query=query_counts) domain_knowledge = knowledge.to_prompts(max_tokens_kd) except Exception as e: domain_knowledge='' prompts = f"Title: {title}" syetem_promot = "You are an assistant designed to propose necessary components of an survey papers. Your response should follow the JSON format." components, usage = llm(systems=syetem_promot, prompts=prompts, return_json=True) log_usage(usage, "media") print(f"The paper information has been initialized. References are saved to {bibtex_path}.") paper = {} paper["title"] = title paper["references"] = references paper["bibtex"] = bibtex_path paper["components"] = components paper["domain_knowledge"] = domain_knowledge return paper, destination_folder, all_paper_ids def section_generation(paper, section, save_to_path, model, research_field="machine learning"): """ The main pipeline of generating a section. 1. Generate prompts. 2. Get responses from AI assistant. 3. Extract the section text. 4. Save the text to .tex file. :return usage """ title = paper["title"] references = paper["references"] components = paper['components'] instruction = '- Discuss three to five main related fields to this paper. For each field, select five to ten key publications from references. For each reference, analyze its strengths and weaknesses in one or two sentences. Present the related works in a logical manner, often chronologically. Consider using a taxonomy or categorization to structure the discussion. Do not use \section{...} or \subsection{...}; use \paragraph{...} to list related fields.' fundamental_subprompt = "Your task is to write the {section} section of the paper with the title '{title}'. This paper has the following content: {components}\n" instruction_subprompt = "\n" \ "Your response should follow the following instructions:\n" \ "{instruction}\n" ref_instruction_subprompt = "- Read references. " \ "Every time you use information from the references, you need to appropriately cite it (using \citep or \citet)." \ "For example of \citep, the sentence where you use information from lei2022adaptive \citep{{lei2022adaptive}}. " \ "For example of \citet, \citet{{lei2022adaptive}} claims some information.\n" \ "- Avoid citing the same reference in a same paragraph.\n" \ "\n" \ "References:\n" \ "{references}" output_subprompt = "Ensure that it can be directly compiled by LeTaX." reivew_prompts = PromptTemplate( input_variables=["title", "components", "instruction", "section", "references"], template=fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt + output_subprompt) prompts = reivew_prompts.format(title=title, components=components, instruction=instruction, section=section, references=references) SECTION_GENERATION_SYSTEM = PromptTemplate(input_variables=["research_field"], template="You are an assistant designed to write academic papers in the field of {research_field} using LaTeX." ) output, usage = get_gpt_responses(SECTION_GENERATION_SYSTEM.format(research_field=research_field), prompts, model=model, temperature=0.4) output=output[25:] tex_file = os.path.join(save_to_path, f"{section}.tex") with open(tex_file, "w", encoding="utf-8") as f: f.write(output) use_md =True use_chinese = True if use_md: system_md = 'You are an translator between the LaTeX and .MD. here is a latex file where the content is: \n \n ' + output prompts_md = 'you should transfer the latex content to the .MD format seriously, and pay attention to the correctness of the citation format (use the number). you should directly output the new content without anyoter replay. you should add reference papers at the end of the paper, and add line breaks between two reference papers. The Title should be ' + paper['title'] output_md, usage_md = get_gpt_responses(system_md, prompts_md, model=model, temperature=0.4) md_file = os.path.join(save_to_path, f"{'survey'}.md") with open(md_file, "w", encoding="utf-8") as m: m.write(output_md) if use_chinese == True: system_md_chi = 'You are an translator between the english and chinese. here is a english file where the content is: \n \n ' + output prompts_md_chi = 'you should transfer the english to chinese and dont change anything others. you should directly output the new content without anyoter replay. you should keep the reference papers unchanged.' output_md_chi, usage_md_chi = get_gpt_responses(system_md_chi, prompts_md_chi, model=model, temperature=0.4) md_file_chi = os.path.join(save_to_path, f"{'survey_chinese'}.md") with open(md_file_chi, "w", encoding="utf-8") as c: c.write(output_md_chi) return usage def generate_draft(title, tldr=True, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048, knowledge_database=None, max_tokens_kd=2048, query_counts=10, section='related works', model="gpt-3.5-turbo-16k", template="Default" , save_zip=None): print("================START================") paper, destination_folder, _ = _generation_setup(title, template, tldr, max_kw_refs, bib_refs, max_tokens_ref=max_tokens_ref, max_tokens_kd=max_tokens_kd, query_counts=query_counts, knowledge_database=knowledge_database) # main components print(f"================PROCESSING================") usage = section_generation(paper, section, destination_folder, model=model) log_usage(usage, section) create_copies(destination_folder) print("\nPROCESSING COMPLETE\n") return make_archive(destination_folder, title+".zip") print("draft has been generated in " + destination_folder) if __name__ == "__main__": import openai openai.api_key = "your key" openai.api_base = 'https://api.openai.com/v1' #openai.proxy = "socks5h://localhost:7890 # if use the vpn target_title = "Reinforcement Learning for Robot Control" generate_draft(target_title, knowledge_database="ml_textbook_test",max_kw_refs=20)
[ "langchain.vectorstores.FAISS.load_local", "langchain.PromptTemplate" ]
[((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GPTModel\n'), ((1626, 1657), 'utils.file_operations.copy_templates', 'copy_templates', (['template', 'title'], {}), '(template, title)\n', (1640, 1657), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((2042, 2069), 'utils.references.References', 'References', (['title', 'bib_refs'], {}), '(title, bib_refs)\n', (2052, 2069), False, 'from utils.references import References\n'), ((5824, 6030), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['title', 'components', 'instruction', 'section', 'references']", 'template': '(fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt +\n output_subprompt)'}), "(input_variables=['title', 'components', 'instruction',\n 'section', 'references'], template=fundamental_subprompt +\n instruction_subprompt + ref_instruction_subprompt + output_subprompt)\n", (5838, 6030), False, 'from langchain import PromptTemplate\n'), ((6353, 6526), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['research_field']", 'template': '"""You are an assistant designed to write academic papers in the field of {research_field} using LaTeX."""'}), "(input_variables=['research_field'], template=\n 'You are an assistant designed to write academic papers in the field of {research_field} using LaTeX.'\n )\n", (6367, 6526), False, 'from langchain import PromptTemplate\n'), ((9472, 9505), 'utils.tex_processing.create_copies', 'create_copies', (['destination_folder'], {}), '(destination_folder)\n', (9485, 9505), False, 'from utils.tex_processing import create_copies\n'), ((9554, 9602), 'utils.file_operations.make_archive', 'make_archive', (['destination_folder', "(title + '.zip')"], {}), "(destination_folder, title + '.zip')\n", (9566, 9602), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((7503, 7573), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md', 'prompts_md'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md, prompts_md, model=model, temperature=0.4)\n', (7520, 7573), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2856, 2899), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['db_index_path', 'embeddings'], {}), '(db_index_path, embeddings)\n', (2872, 2899), False, 'from langchain.vectorstores import FAISS\n'), ((2924, 2940), 'utils.knowledge.Knowledge', 'Knowledge', ([], {'db': 'db'}), '(db=db)\n', (2933, 2940), False, 'from utils.knowledge import Knowledge\n'), ((8211, 8289), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md_chi', 'prompts_md_chi'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md_chi, prompts_md_chi, model=model, temperature=0.4)\n', (8228, 8289), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2724, 2736), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2733, 2736), False, 'import json\n')]
import sys import os sys.path.append(os.path.dirname(os.path.realpath(__file__))) sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'NeuralSeq')) sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'text_to_audio/Make_An_Audio')) sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'audio_detection')) sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mono2binaural')) import gradio as gr import matplotlib import librosa import torch from langchain.agents.initialize import initialize_agent from langchain.agents.tools import Tool from langchain.chains.conversation.memory import ConversationBufferMemory from langchain.llms.openai import OpenAI import re import uuid import soundfile from PIL import Image import numpy as np from omegaconf import OmegaConf from einops import repeat from ldm.util import instantiate_from_config from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000 from vocoder.bigvgan.models import VocoderBigVGAN from ldm.models.diffusion.ddim import DDIMSampler import whisper from utils.hparams import set_hparams from utils.hparams import hparams as hp import scipy.io.wavfile as wavfile import librosa from audio_infer.utils import config as detection_config from audio_infer.pytorch.models import PVT import clip import numpy as np AUDIO_CHATGPT_PREFIX = """AudioGPT AudioGPT can not directly read audios, but it has a list of tools to finish different speech, audio, and singing voice tasks. Each audio will have a file name formed as "audio/xxx.wav". When talking about audios, AudioGPT is very strict to the file name and will never fabricate nonexistent files. AudioGPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the audio content and audio file name. It will remember to provide the file name from the last tool observation, if a new audio is generated. Human may provide new audios to AudioGPT with a description. The description helps AudioGPT to understand this audio, but AudioGPT should use tools to finish following tasks, rather than directly imagine from the description. Overall, AudioGPT is a powerful audio dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. TOOLS: ------ AudioGPT has access to the following tools:""" AUDIO_CHATGPT_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format: ``` Thought: Do I need to use a tool? Yes Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ``` When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format: ``` Thought: Do I need to use a tool? No {ai_prefix}: [your response here] ``` """ AUDIO_CHATGPT_SUFFIX = """You are very strict to the filename correctness and will never fake a file name if not exists. You will remember to provide the audio file name loyally if it's provided in the last tool observation. Begin! Previous conversation history: {chat_history} New input: {input} Thought: Do I need to use a tool? {agent_scratchpad}""" def cut_dialogue_history(history_memory, keep_last_n_words = 500): tokens = history_memory.split() n_tokens = len(tokens) print(f"history_memory:{history_memory}, n_tokens: {n_tokens}") if n_tokens < keep_last_n_words: return history_memory else: paragraphs = history_memory.split('\n') last_n_tokens = n_tokens while last_n_tokens >= keep_last_n_words: last_n_tokens = last_n_tokens - len(paragraphs[0].split(' ')) paragraphs = paragraphs[1:] return '\n' + '\n'.join(paragraphs) def merge_audio(audio_path_1, audio_path_2): merged_signal = [] sr_1, signal_1 = wavfile.read(audio_path_1) sr_2, signal_2 = wavfile.read(audio_path_2) merged_signal.append(signal_1) merged_signal.append(signal_2) merged_signal = np.hstack(merged_signal) merged_signal = np.asarray(merged_signal, dtype=np.int16) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") wavfile.write(audio_filename, sr_2, merged_signal) return audio_filename class T2I: def __init__(self, device): from transformers import AutoModelForCausalLM, AutoTokenizer from diffusers import StableDiffusionPipeline from transformers import pipeline print("Initializing T2I to %s" % device) self.device = device self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) self.text_refine_tokenizer = AutoTokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion") self.text_refine_model = AutoModelForCausalLM.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion") self.text_refine_gpt2_pipe = pipeline("text-generation", model=self.text_refine_model, tokenizer=self.text_refine_tokenizer, device=self.device) self.pipe.to(device) def inference(self, text): image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png") refined_text = self.text_refine_gpt2_pipe(text)[0]["generated_text"] print(f'{text} refined to {refined_text}') image = self.pipe(refined_text).images[0] image.save(image_filename) print(f"Processed T2I.run, text: {text}, image_filename: {image_filename}") return image_filename class ImageCaptioning: def __init__(self, device): from transformers import BlipProcessor, BlipForConditionalGeneration print("Initializing ImageCaptioning to %s" % device) self.device = device self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(self.device) def inference(self, image_path): inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device) out = self.model.generate(**inputs) captions = self.processor.decode(out[0], skip_special_tokens=True) return captions class T2A: def __init__(self, device): print("Initializing Make-An-Audio to %s" % device) self.device = device self.sampler = self._initialize_model('text_to_audio/Make_An_Audio/configs/text_to_audio/txt2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/ta40multi_epoch=000085.ckpt', device=device) self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device) def _initialize_model(self, config, ckpt, device): config = OmegaConf.load(config) model = instantiate_from_config(config.model) model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False) model = model.to(device) model.cond_stage_model.to(model.device) model.cond_stage_model.device = model.device sampler = DDIMSampler(model) return sampler def txt2audio(self, text, seed = 55, scale = 1.5, ddim_steps = 100, n_samples = 3, W = 624, H = 80): SAMPLE_RATE = 16000 prng = np.random.RandomState(seed) start_code = prng.randn(n_samples, self.sampler.model.first_stage_model.embed_dim, H // 8, W // 8) start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32) uc = self.sampler.model.get_learned_conditioning(n_samples * [""]) c = self.sampler.model.get_learned_conditioning(n_samples * [text]) shape = [self.sampler.model.first_stage_model.embed_dim, H//8, W//8] # (z_dim, 80//2^x, 848//2^x) samples_ddim, _ = self.sampler.sample(S = ddim_steps, conditioning = c, batch_size = n_samples, shape = shape, verbose = False, unconditional_guidance_scale = scale, unconditional_conditioning = uc, x_T = start_code) x_samples_ddim = self.sampler.model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) # [0, 1] wav_list = [] for idx,spec in enumerate(x_samples_ddim): wav = self.vocoder.vocode(spec) wav_list.append((SAMPLE_RATE,wav)) best_wav = self.select_best_audio(text, wav_list) return best_wav def select_best_audio(self, prompt, wav_list): from wav_evaluation.models.CLAPWrapper import CLAPWrapper clap_model = CLAPWrapper('text_to_audio/Make_An_Audio/useful_ckpts/CLAP/CLAP_weights_2022.pth', 'text_to_audio/Make_An_Audio/useful_ckpts/CLAP/config.yml', use_cuda=torch.cuda.is_available()) text_embeddings = clap_model.get_text_embeddings([prompt]) score_list = [] for data in wav_list: sr, wav = data audio_embeddings = clap_model.get_audio_embeddings([(torch.FloatTensor(wav), sr)], resample=True) score = clap_model.compute_similarity(audio_embeddings, text_embeddings, use_logit_scale=False).squeeze().cpu().numpy() score_list.append(score) max_index = np.array(score_list).argmax() print(score_list, max_index) return wav_list[max_index] def inference(self, text, seed = 55, scale = 1.5, ddim_steps = 100, n_samples = 3, W = 624, H = 80): melbins,mel_len = 80,624 with torch.no_grad(): result = self.txt2audio( text = text, H = melbins, W = mel_len ) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") soundfile.write(audio_filename, result[1], samplerate = 16000) print(f"Processed T2I.run, text: {text}, audio_filename: {audio_filename}") return audio_filename class I2A: def __init__(self, device): print("Initializing Make-An-Audio-Image to %s" % device) self.device = device self.sampler = self._initialize_model('text_to_audio/Make_An_Audio/configs/img_to_audio/img2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/ta54_epoch=000216.ckpt', device=device) self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device) def _initialize_model(self, config, ckpt, device): config = OmegaConf.load(config) model = instantiate_from_config(config.model) model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False) model = model.to(device) model.cond_stage_model.to(model.device) model.cond_stage_model.device = model.device sampler = DDIMSampler(model) return sampler def img2audio(self, image, seed = 55, scale = 3, ddim_steps = 100, W = 624, H = 80): SAMPLE_RATE = 16000 n_samples = 1 # only support 1 sample prng = np.random.RandomState(seed) start_code = prng.randn(n_samples, self.sampler.model.first_stage_model.embed_dim, H // 8, W // 8) start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32) uc = self.sampler.model.get_learned_conditioning(n_samples * [""]) #image = Image.fromarray(image) image = Image.open(image) image = self.sampler.model.cond_stage_model.preprocess(image).unsqueeze(0) image_embedding = self.sampler.model.cond_stage_model.forward_img(image) c = image_embedding.repeat(n_samples, 1, 1) shape = [self.sampler.model.first_stage_model.embed_dim, H//8, W//8] # (z_dim, 80//2^x, 848//2^x) samples_ddim, _ = self.sampler.sample(S=ddim_steps, conditioning=c, batch_size=n_samples, shape=shape, verbose=False, unconditional_guidance_scale=scale, unconditional_conditioning=uc, x_T=start_code) x_samples_ddim = self.sampler.model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) # [0, 1] wav_list = [] for idx,spec in enumerate(x_samples_ddim): wav = self.vocoder.vocode(spec) wav_list.append((SAMPLE_RATE,wav)) best_wav = wav_list[0] return best_wav def inference(self, image, seed = 55, scale = 3, ddim_steps = 100, W = 624, H = 80): melbins,mel_len = 80,624 with torch.no_grad(): result = self.img2audio( image=image, H=melbins, W=mel_len ) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") soundfile.write(audio_filename, result[1], samplerate = 16000) print(f"Processed I2a.run, image_filename: {image}, audio_filename: {audio_filename}") return audio_filename class TTS: def __init__(self, device=None): from inference.tts.PortaSpeech import TTSInference if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' print("Initializing PortaSpeech to %s" % device) self.device = device self.exp_name = 'checkpoints/ps_adv_baseline' self.set_model_hparams() self.inferencer = TTSInference(self.hp, device) def set_model_hparams(self): set_hparams(exp_name=self.exp_name, print_hparams=False) self.hp = hp def inference(self, text): self.set_model_hparams() inp = {"text": text} out = self.inferencer.infer_once(inp) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") soundfile.write(audio_filename, out, samplerate=22050) return audio_filename class T2S: def __init__(self, device= None): from inference.svs.ds_e2e import DiffSingerE2EInfer if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' print("Initializing DiffSinger to %s" % device) self.device = device self.exp_name = 'checkpoints/0831_opencpop_ds1000' self.config= 'NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds1000.yaml' self.set_model_hparams() self.pipe = DiffSingerE2EInfer(self.hp, device) self.default_inp = { 'text': '你 说 你 不 SP 懂 为 何 在 这 时 牵 手 AP', 'notes': 'D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | rest | D#4/Eb4 | D4 | D4 | D4 | D#4/Eb4 | F4 | D#4/Eb4 | D4 | rest', 'notes_duration': '0.113740 | 0.329060 | 0.287950 | 0.133480 | 0.150900 | 0.484730 | 0.242010 | 0.180820 | 0.343570 | 0.152050 | 0.266720 | 0.280310 | 0.633300 | 0.444590' } def set_model_hparams(self): set_hparams(config=self.config, exp_name=self.exp_name, print_hparams=False) self.hp = hp def inference(self, inputs): self.set_model_hparams() val = inputs.split(",") key = ['text', 'notes', 'notes_duration'] try: inp = {k: v for k, v in zip(key, val)} wav = self.pipe.infer_once(inp) except: print('Error occurs. Generate default audio sample.\n') inp = self.default_inp wav = self.pipe.infer_once(inp) #if inputs == '' or len(val) < len(key): # inp = self.default_inp #else: # inp = {k:v for k,v in zip(key,val)} #wav = self.pipe.infer_once(inp) wav *= 32767 audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") wavfile.write(audio_filename, self.hp['audio_sample_rate'], wav.astype(np.int16)) print(f"Processed T2S.run, audio_filename: {audio_filename}") return audio_filename class t2s_VISinger: def __init__(self, device=None): from espnet2.bin.svs_inference import SingingGenerate if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' print("Initializing VISingere to %s" % device) tag = 'AQuarterMile/opencpop_visinger1' self.model = SingingGenerate.from_pretrained( model_tag=str_or_none(tag), device=device, ) phn_dur = [[0. , 0.219 ], [0.219 , 0.50599998], [0.50599998, 0.71399999], [0.71399999, 1.097 ], [1.097 , 1.28799999], [1.28799999, 1.98300004], [1.98300004, 7.10500002], [7.10500002, 7.60400009]] phn = ['sh', 'i', 'q', 'v', 'n', 'i', 'SP', 'AP'] score = [[0, 0.50625, 'sh_i', 58, 'sh_i'], [0.50625, 1.09728, 'q_v', 56, 'q_v'], [1.09728, 1.9832100000000001, 'n_i', 53, 'n_i'], [1.9832100000000001, 7.105360000000001, 'SP', 0, 'SP'], [7.105360000000001, 7.604390000000001, 'AP', 0, 'AP']] tempo = 70 tmp = {} tmp["label"] = phn_dur, phn tmp["score"] = tempo, score self.default_inp = tmp def inference(self, inputs): val = inputs.split(",") key = ['text', 'notes', 'notes_duration'] try: # TODO: input will be update inp = {k: v for k, v in zip(key, val)} wav = self.model(text=inp)["wav"] except: print('Error occurs. Generate default audio sample.\n') inp = self.default_inp wav = self.model(text=inp)["wav"] audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") soundfile.write(audio_filename, wav, samplerate=self.model.fs) return audio_filename class TTS_OOD: def __init__(self, device): from inference.tts.GenerSpeech import GenerSpeechInfer if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' print("Initializing GenerSpeech to %s" % device) self.device = device self.exp_name = 'checkpoints/GenerSpeech' self.config = 'NeuralSeq/modules/GenerSpeech/config/generspeech.yaml' self.set_model_hparams() self.pipe = GenerSpeechInfer(self.hp, device) def set_model_hparams(self): set_hparams(config=self.config, exp_name=self.exp_name, print_hparams=False) f0_stats_fn = f'{hp["binary_data_dir"]}/train_f0s_mean_std.npy' if os.path.exists(f0_stats_fn): hp['f0_mean'], hp['f0_std'] = np.load(f0_stats_fn) hp['f0_mean'] = float(hp['f0_mean']) hp['f0_std'] = float(hp['f0_std']) hp['emotion_encoder_path'] = 'checkpoints/Emotion_encoder.pt' self.hp = hp def inference(self, inputs): self.set_model_hparams() key = ['ref_audio', 'text'] val = inputs.split(",") inp = {k: v for k, v in zip(key, val)} wav = self.pipe.infer_once(inp) wav *= 32767 audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") wavfile.write(audio_filename, self.hp['audio_sample_rate'], wav.astype(np.int16)) print( f"Processed GenerSpeech.run. Input text:{val[1]}. Input reference audio: {val[0]}. Output Audio_filename: {audio_filename}") return audio_filename class Inpaint: def __init__(self, device): print("Initializing Make-An-Audio-inpaint to %s" % device) self.device = device self.sampler = self._initialize_model_inpaint('text_to_audio/Make_An_Audio/configs/inpaint/txt2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/inpaint7_epoch00047.ckpt') self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device) self.cmap_transform = matplotlib.cm.viridis def _initialize_model_inpaint(self, config, ckpt): config = OmegaConf.load(config) model = instantiate_from_config(config.model) model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False) device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model = model.to(device) print(model.device, device, model.cond_stage_model.device) sampler = DDIMSampler(model) return sampler def make_batch_sd(self, mel, mask, num_samples=1): mel = torch.from_numpy(mel)[None,None,...].to(dtype=torch.float32) mask = torch.from_numpy(mask)[None,None,...].to(dtype=torch.float32) masked_mel = (1 - mask) * mel mel = mel * 2 - 1 mask = mask * 2 - 1 masked_mel = masked_mel * 2 -1 batch = { "mel": repeat(mel.to(device=self.device), "1 ... -> n ...", n=num_samples), "mask": repeat(mask.to(device=self.device), "1 ... -> n ...", n=num_samples), "masked_mel": repeat(masked_mel.to(device=self.device), "1 ... -> n ...", n=num_samples), } return batch def gen_mel(self, input_audio_path): SAMPLE_RATE = 16000 sr, ori_wav = wavfile.read(input_audio_path) print("gen_mel") print(sr,ori_wav.shape,ori_wav) ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0 if len(ori_wav.shape)==2:# stereo ori_wav = librosa.to_mono(ori_wav.T) print(sr,ori_wav.shape,ori_wav) ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE) mel_len,hop_size = 848,256 input_len = mel_len * hop_size if len(ori_wav) < input_len: input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0) else: input_wav = ori_wav[:input_len] mel = TRANSFORMS_16000(input_wav) return mel def gen_mel_audio(self, input_audio): SAMPLE_RATE = 16000 sr,ori_wav = input_audio print("gen_mel_audio") print(sr,ori_wav.shape,ori_wav) ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0 if len(ori_wav.shape)==2:# stereo ori_wav = librosa.to_mono(ori_wav.T) print(sr,ori_wav.shape,ori_wav) ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE) mel_len,hop_size = 848,256 input_len = mel_len * hop_size if len(ori_wav) < input_len: input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0) else: input_wav = ori_wav[:input_len] mel = TRANSFORMS_16000(input_wav) return mel def show_mel_fn(self, input_audio_path): crop_len = 500 crop_mel = self.gen_mel(input_audio_path)[:,:crop_len] color_mel = self.cmap_transform(crop_mel) image = Image.fromarray((color_mel*255).astype(np.uint8)) image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png") image.save(image_filename) return image_filename def inpaint(self, batch, seed, ddim_steps, num_samples=1, W=512, H=512): model = self.sampler.model prng = np.random.RandomState(seed) start_code = prng.randn(num_samples, model.first_stage_model.embed_dim, H // 8, W // 8) start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32) c = model.get_first_stage_encoding(model.encode_first_stage(batch["masked_mel"])) cc = torch.nn.functional.interpolate(batch["mask"], size=c.shape[-2:]) c = torch.cat((c, cc), dim=1) # (b,c+1,h,w) 1 is mask shape = (c.shape[1]-1,)+c.shape[2:] samples_ddim, _ = self.sampler.sample(S=ddim_steps, conditioning=c, batch_size=c.shape[0], shape=shape, verbose=False) x_samples_ddim = model.decode_first_stage(samples_ddim) mel = torch.clamp((batch["mel"]+1.0)/2.0,min=0.0, max=1.0) mask = torch.clamp((batch["mask"]+1.0)/2.0,min=0.0, max=1.0) predicted_mel = torch.clamp((x_samples_ddim+1.0)/2.0,min=0.0, max=1.0) inpainted = (1-mask)*mel+mask*predicted_mel inpainted = inpainted.cpu().numpy().squeeze() inapint_wav = self.vocoder.vocode(inpainted) return inpainted, inapint_wav def inference(self, input_audio, mel_and_mask, seed = 55, ddim_steps = 100): SAMPLE_RATE = 16000 torch.set_grad_enabled(False) mel_img = Image.open(mel_and_mask['image']) mask_img = Image.open(mel_and_mask["mask"]) show_mel = np.array(mel_img.convert("L"))/255 mask = np.array(mask_img.convert("L"))/255 mel_bins,mel_len = 80,848 input_mel = self.gen_mel_audio(input_audio)[:,:mel_len] mask = np.pad(mask,((0,0),(0,mel_len-mask.shape[1])),mode='constant',constant_values=0) print(mask.shape,input_mel.shape) with torch.no_grad(): batch = self.make_batch_sd(input_mel,mask,num_samples=1) inpainted,gen_wav = self.inpaint( batch=batch, seed=seed, ddim_steps=ddim_steps, num_samples=1, H=mel_bins, W=mel_len ) inpainted = inpainted[:,:show_mel.shape[1]] color_mel = self.cmap_transform(inpainted) input_len = int(input_audio[1].shape[0] * SAMPLE_RATE / input_audio[0]) gen_wav = (gen_wav * 32768).astype(np.int16)[:input_len] image = Image.fromarray((color_mel*255).astype(np.uint8)) image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png") image.save(image_filename) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") soundfile.write(audio_filename, gen_wav, samplerate = 16000) return image_filename, audio_filename class ASR: def __init__(self, device): print("Initializing Whisper to %s" % device) self.device = device self.model = whisper.load_model("base", device=device) def inference(self, audio_path): audio = whisper.load_audio(audio_path) audio = whisper.pad_or_trim(audio) mel = whisper.log_mel_spectrogram(audio).to(self.device) _, probs = self.model.detect_language(mel) options = whisper.DecodingOptions() result = whisper.decode(self.model, mel, options) return result.text def translate_english(self, audio_path): audio = self.model.transcribe(audio_path, language='English') return audio['text'] class A2T: def __init__(self, device): from audio_to_text.inference_waveform import AudioCapModel print("Initializing Audio-To-Text Model to %s" % device) self.device = device self.model = AudioCapModel("audio_to_text/audiocaps_cntrstv_cnn14rnn_trm") def inference(self, audio_path): audio = whisper.load_audio(audio_path) caption_text = self.model(audio) return caption_text[0] class GeneFace: def __init__(self, device=None): print("Initializing GeneFace model to %s" % device) from audio_to_face.GeneFace_binding import GeneFaceInfer if device is None: device = 'cuda' if torch.cuda.is_available() else 'cpu' self.device = device self.geneface_model = GeneFaceInfer(device) print("Loaded GeneFace model") def inference(self, audio_path): audio_base_name = os.path.basename(audio_path)[:-4] out_video_name = audio_path.replace("audio","video").replace(".wav", ".mp4") inp = { 'audio_source_name': audio_path, 'out_npy_name': f'geneface/tmp/{audio_base_name}.npy', 'cond_name': f'geneface/tmp/{audio_base_name}.npy', 'out_video_name': out_video_name, 'tmp_imgs_dir': f'video/tmp_imgs', } self.geneface_model.infer_once(inp) return out_video_name class SoundDetection: def __init__(self, device): self.device = device self.sample_rate = 32000 self.window_size = 1024 self.hop_size = 320 self.mel_bins = 64 self.fmin = 50 self.fmax = 14000 self.model_type = 'PVT' self.checkpoint_path = 'audio_detection/audio_infer/useful_ckpts/audio_detection.pth' self.classes_num = detection_config.classes_num self.labels = detection_config.labels self.frames_per_second = self.sample_rate // self.hop_size # Model = eval(self.model_type) self.model = PVT(sample_rate=self.sample_rate, window_size=self.window_size, hop_size=self.hop_size, mel_bins=self.mel_bins, fmin=self.fmin, fmax=self.fmax, classes_num=self.classes_num) checkpoint = torch.load(self.checkpoint_path, map_location=self.device) self.model.load_state_dict(checkpoint['model']) self.model.to(device) def inference(self, audio_path): # Forward (waveform, _) = librosa.core.load(audio_path, sr=self.sample_rate, mono=True) waveform = waveform[None, :] # (1, audio_length) waveform = torch.from_numpy(waveform) waveform = waveform.to(self.device) # Forward with torch.no_grad(): self.model.eval() batch_output_dict = self.model(waveform, None) framewise_output = batch_output_dict['framewise_output'].data.cpu().numpy()[0] """(time_steps, classes_num)""" # print('Sound event detection result (time_steps x classes_num): {}'.format( # framewise_output.shape)) import numpy as np import matplotlib.pyplot as plt sorted_indexes = np.argsort(np.max(framewise_output, axis=0))[::-1] top_k = 10 # Show top results top_result_mat = framewise_output[:, sorted_indexes[0 : top_k]] """(time_steps, top_k)""" # Plot result stft = librosa.core.stft(y=waveform[0].data.cpu().numpy(), n_fft=self.window_size, hop_length=self.hop_size, window='hann', center=True) frames_num = stft.shape[-1] fig, axs = plt.subplots(2, 1, sharex=True, figsize=(10, 4)) axs[0].matshow(np.log(np.abs(stft)), origin='lower', aspect='auto', cmap='jet') axs[0].set_ylabel('Frequency bins') axs[0].set_title('Log spectrogram') axs[1].matshow(top_result_mat.T, origin='upper', aspect='auto', cmap='jet', vmin=0, vmax=1) axs[1].xaxis.set_ticks(np.arange(0, frames_num, self.frames_per_second)) axs[1].xaxis.set_ticklabels(np.arange(0, frames_num / self.frames_per_second)) axs[1].yaxis.set_ticks(np.arange(0, top_k)) axs[1].yaxis.set_ticklabels(np.array(self.labels)[sorted_indexes[0 : top_k]]) axs[1].yaxis.grid(color='k', linestyle='solid', linewidth=0.3, alpha=0.3) axs[1].set_xlabel('Seconds') axs[1].xaxis.set_ticks_position('bottom') plt.tight_layout() image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png") plt.savefig(image_filename) return image_filename class SoundExtraction: def __init__(self, device): from sound_extraction.model.LASSNet import LASSNet from sound_extraction.utils.stft import STFT import torch.nn as nn self.device = device self.model_file = 'sound_extraction/useful_ckpts/LASSNet.pt' self.stft = STFT() self.model = nn.DataParallel(LASSNet(device)).to(device) checkpoint = torch.load(self.model_file) self.model.load_state_dict(checkpoint['model']) self.model.eval() def inference(self, inputs): #key = ['ref_audio', 'text'] from sound_extraction.utils.wav_io import load_wav, save_wav val = inputs.split(",") audio_path = val[0] # audio_path, text text = val[1] waveform = load_wav(audio_path) waveform = torch.tensor(waveform).transpose(1,0) mixed_mag, mixed_phase = self.stft.transform(waveform) text_query = ['[CLS] ' + text] mixed_mag = mixed_mag.transpose(2,1).unsqueeze(0).to(self.device) est_mask = self.model(mixed_mag, text_query) est_mag = est_mask * mixed_mag est_mag = est_mag.squeeze(1) est_mag = est_mag.permute(0, 2, 1) est_wav = self.stft.inverse(est_mag.cpu().detach(), mixed_phase) est_wav = est_wav.squeeze(0).squeeze(0).numpy() #est_path = f'output/est{i}.wav' audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") print('audio_filename ', audio_filename) save_wav(est_wav, audio_filename) return audio_filename class Binaural: def __init__(self, device): from src.models import BinauralNetwork self.device = device self.model_file = 'mono2binaural/useful_ckpts/m2b/binaural_network.net' self.position_file = ['mono2binaural/useful_ckpts/m2b/tx_positions.txt', 'mono2binaural/useful_ckpts/m2b/tx_positions2.txt', 'mono2binaural/useful_ckpts/m2b/tx_positions3.txt', 'mono2binaural/useful_ckpts/m2b/tx_positions4.txt', 'mono2binaural/useful_ckpts/m2b/tx_positions5.txt'] self.net = BinauralNetwork(view_dim=7, warpnet_layers=4, warpnet_channels=64, ) self.net.load_from_file(self.model_file) self.sr = 48000 def inference(self, audio_path): mono, sr = librosa.load(path=audio_path, sr=self.sr, mono=True) mono = torch.from_numpy(mono) mono = mono.unsqueeze(0) import numpy as np import random rand_int = random.randint(0,4) view = np.loadtxt(self.position_file[rand_int]).transpose().astype(np.float32) view = torch.from_numpy(view) if not view.shape[-1] * 400 == mono.shape[-1]: mono = mono[:,:(mono.shape[-1]//400)*400] # if view.shape[1]*400 > mono.shape[1]: m_a = view.shape[1] - mono.shape[-1]//400 rand_st = random.randint(0,m_a) view = view[:,m_a:m_a+(mono.shape[-1]//400)] # # binauralize and save output self.net.eval().to(self.device) mono, view = mono.to(self.device), view.to(self.device) chunk_size = 48000 # forward in chunks of 1s rec_field = 1000 # add 1000 samples as "safe bet" since warping has undefined rec. field rec_field -= rec_field % 400 # make sure rec_field is a multiple of 400 to match audio and view frequencies chunks = [ { "mono": mono[:, max(0, i-rec_field):i+chunk_size], "view": view[:, max(0, i-rec_field)//400:(i+chunk_size)//400] } for i in range(0, mono.shape[-1], chunk_size) ] for i, chunk in enumerate(chunks): with torch.no_grad(): mono = chunk["mono"].unsqueeze(0) view = chunk["view"].unsqueeze(0) binaural = self.net(mono, view).squeeze(0) if i > 0: binaural = binaural[:, -(mono.shape[-1]-rec_field):] chunk["binaural"] = binaural binaural = torch.cat([chunk["binaural"] for chunk in chunks], dim=-1) binaural = torch.clamp(binaural, min=-1, max=1).cpu() #binaural = chunked_forwarding(net, mono, view) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") import torchaudio torchaudio.save(audio_filename, binaural, sr) #soundfile.write(audio_filename, binaural, samplerate = 48000) print(f"Processed Binaural.run, audio_filename: {audio_filename}") return audio_filename class TargetSoundDetection: def __init__(self, device): from target_sound_detection.src import models as tsd_models from target_sound_detection.src.models import event_labels self.device = device self.MEL_ARGS = { 'n_mels': 64, 'n_fft': 2048, 'hop_length': int(22050 * 20 / 1000), 'win_length': int(22050 * 40 / 1000) } self.EPS = np.spacing(1) self.clip_model, _ = clip.load("ViT-B/32", device=self.device) self.event_labels = event_labels self.id_to_event = {i : label for i, label in enumerate(self.event_labels)} config = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth', map_location='cpu') config_parameters = dict(config) config_parameters['tao'] = 0.6 if 'thres' not in config_parameters.keys(): config_parameters['thres'] = 0.5 if 'time_resolution' not in config_parameters.keys(): config_parameters['time_resolution'] = 125 model_parameters = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt' , map_location=lambda storage, loc: storage) # load parameter self.model = getattr(tsd_models, config_parameters['model'])(config_parameters, inputdim=64, outputdim=2, time_resolution=config_parameters['time_resolution'], **config_parameters['model_args']) self.model.load_state_dict(model_parameters) self.model = self.model.to(self.device).eval() self.re_embeds = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth') self.ref_mel = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth') def extract_feature(self, fname): import soundfile as sf y, sr = sf.read(fname, dtype='float32') print('y ', y.shape) ti = y.shape[0]/sr if y.ndim > 1: y = y.mean(1) y = librosa.resample(y, sr, 22050) lms_feature = np.log(librosa.feature.melspectrogram(y, **self.MEL_ARGS) + self.EPS).T return lms_feature,ti def build_clip(self, text): text = clip.tokenize(text).to(self.device) # ["a diagram with dog", "a dog", "a cat"] text_features = self.clip_model.encode_text(text) return text_features def cal_similarity(self, target, retrievals): ans = [] #target =torch.from_numpy(target) for name in retrievals.keys(): tmp = retrievals[name] #tmp = torch.from_numpy(tmp) s = torch.cosine_similarity(target.squeeze(), tmp.squeeze(), dim=0) ans.append(s.item()) return ans.index(max(ans)) def inference(self, text, audio_path): from target_sound_detection.src.utils import median_filter, decode_with_timestamps target_emb = self.build_clip(text) # torch type idx = self.cal_similarity(target_emb, self.re_embeds) target_event = self.id_to_event[idx] embedding = self.ref_mel[target_event] embedding = torch.from_numpy(embedding) embedding = embedding.unsqueeze(0).to(self.device).float() #print('embedding ', embedding.shape) inputs,ti = self.extract_feature(audio_path) #print('ti ', ti) inputs = torch.from_numpy(inputs) inputs = inputs.unsqueeze(0).to(self.device).float() #print('inputs ', inputs.shape) decision, decision_up, logit = self.model(inputs, embedding) pred = decision_up.detach().cpu().numpy() pred = pred[:,:,0] frame_num = decision_up.shape[1] time_ratio = ti / frame_num filtered_pred = median_filter(pred, window_size=1, threshold=0.5) #print('filtered_pred ', filtered_pred) time_predictions = [] for index_k in range(filtered_pred.shape[0]): decoded_pred = [] decoded_pred_ = decode_with_timestamps(target_event, filtered_pred[index_k,:]) if len(decoded_pred_) == 0: # neg deal decoded_pred_.append((target_event, 0, 0)) decoded_pred.append(decoded_pred_) for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1 cur_pred = pred[num_batch] # Save each frame output, for later visualization label_prediction = decoded_pred[num_batch] # frame predict # print(label_prediction) for event_label, onset, offset in label_prediction: time_predictions.append({ 'onset': onset*time_ratio, 'offset': offset*time_ratio,}) ans = '' for i,item in enumerate(time_predictions): ans = ans + 'segment' + str(i+1) + ' start_time: ' + str(item['onset']) + ' end_time: ' + str(item['offset']) + '\t' #print(ans) return ans # class Speech_Enh_SS_SC: # """Speech Enhancement or Separation in single-channel # Example usage: # enh_model = Speech_Enh_SS("cuda") # enh_wav = enh_model.inference("./test_chime4_audio_M05_440C0213_PED_REAL.wav") # """ # def __init__(self, device="cuda", model_name="lichenda/chime4_fasnet_dprnn_tac"): # self.model_name = model_name # self.device = device # print("Initializing ESPnet Enh to %s" % device) # self._initialize_model() # def _initialize_model(self): # from espnet_model_zoo.downloader import ModelDownloader # from espnet2.bin.enh_inference import SeparateSpeech # d = ModelDownloader() # cfg = d.download_and_unpack(self.model_name) # self.separate_speech = SeparateSpeech( # train_config=cfg["train_config"], # model_file=cfg["model_file"], # # for segment-wise process on long speech # segment_size=2.4, # hop_size=0.8, # normalize_segment_scale=False, # show_progressbar=True, # ref_channel=None, # normalize_output_wav=True, # device=self.device, # ) # def inference(self, speech_path, ref_channel=0): # speech, sr = soundfile.read(speech_path) # speech = speech[:, ref_channel] # assert speech.dim() == 1 # enh_speech = self.separate_speech(speech[None, ], fs=sr) # if len(enh_speech) == 1: # return enh_speech[0] # return enh_speech # class Speech_Enh_SS_MC: # """Speech Enhancement or Separation in multi-channel""" # def __init__(self, device="cuda", model_name=None, ref_channel=4): # self.model_name = model_name # self.ref_channel = ref_channel # self.device = device # print("Initializing ESPnet Enh to %s" % device) # self._initialize_model() # def _initialize_model(self): # from espnet_model_zoo.downloader import ModelDownloader # from espnet2.bin.enh_inference import SeparateSpeech # d = ModelDownloader() # cfg = d.download_and_unpack(self.model_name) # self.separate_speech = SeparateSpeech( # train_config=cfg["train_config"], # model_file=cfg["model_file"], # # for segment-wise process on long speech # segment_size=2.4, # hop_size=0.8, # normalize_segment_scale=False, # show_progressbar=True, # ref_channel=self.ref_channel, # normalize_output_wav=True, # device=self.device, # ) # def inference(self, speech_path): # speech, sr = soundfile.read(speech_path) # speech = speech.T # enh_speech = self.separate_speech(speech[None, ...], fs=sr) # if len(enh_speech) == 1: # return enh_speech[0] # return enh_speech class Speech_Enh_SS_SC: """Speech Enhancement or Separation in single-channel Example usage: enh_model = Speech_Enh_SS("cuda") enh_wav = enh_model.inference("./test_chime4_audio_M05_440C0213_PED_REAL.wav") """ def __init__(self, device="cuda", model_name="espnet/Wangyou_Zhang_chime4_enh_train_enh_conv_tasnet_raw"): self.model_name = model_name self.device = device print("Initializing ESPnet Enh to %s" % device) self._initialize_model() def _initialize_model(self): from espnet_model_zoo.downloader import ModelDownloader from espnet2.bin.enh_inference import SeparateSpeech d = ModelDownloader() cfg = d.download_and_unpack(self.model_name) self.separate_speech = SeparateSpeech( train_config=cfg["train_config"], model_file=cfg["model_file"], # for segment-wise process on long speech segment_size=2.4, hop_size=0.8, normalize_segment_scale=False, show_progressbar=True, ref_channel=None, normalize_output_wav=True, device=self.device, ) def inference(self, speech_path, ref_channel=0): speech, sr = soundfile.read(speech_path) speech = speech[:, ref_channel] # speech = torch.from_numpy(speech) # assert speech.dim() == 1 enh_speech = self.separate_speech(speech[None, ...], fs=sr) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") # if len(enh_speech) == 1: soundfile.write(audio_filename, enh_speech[0].squeeze(), samplerate=sr) # return enh_speech[0] # return enh_speech # else: # print("############") # audio_filename_1 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") # soundfile.write(audio_filename_1, enh_speech[0].squeeze(), samplerate=sr) # audio_filename_2 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") # soundfile.write(audio_filename_2, enh_speech[1].squeeze(), samplerate=sr) # audio_filename = merge_audio(audio_filename_1, audio_filename_2) return audio_filename class Speech_SS: def __init__(self, device="cuda", model_name="lichenda/wsj0_2mix_skim_noncausal"): self.model_name = model_name self.device = device print("Initializing ESPnet SS to %s" % device) self._initialize_model() def _initialize_model(self): from espnet_model_zoo.downloader import ModelDownloader from espnet2.bin.enh_inference import SeparateSpeech d = ModelDownloader() cfg = d.download_and_unpack(self.model_name) self.separate_speech = SeparateSpeech( train_config=cfg["train_config"], model_file=cfg["model_file"], # for segment-wise process on long speech segment_size=2.4, hop_size=0.8, normalize_segment_scale=False, show_progressbar=True, ref_channel=None, normalize_output_wav=True, device=self.device, ) def inference(self, speech_path): speech, sr = soundfile.read(speech_path) enh_speech = self.separate_speech(speech[None, ...], fs=sr) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") if len(enh_speech) == 1: soundfile.write(audio_filename, enh_speech[0], samplerate=sr) else: # print("############") audio_filename_1 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") soundfile.write(audio_filename_1, enh_speech[0].squeeze(), samplerate=sr) audio_filename_2 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") soundfile.write(audio_filename_2, enh_speech[1].squeeze(), samplerate=sr) audio_filename = merge_audio(audio_filename_1, audio_filename_2) return audio_filename class ConversationBot: def __init__(self): print("Initializing AudioGPT") self.llm = OpenAI(temperature=0) self.t2i = T2I(device="cuda:1") self.i2t = ImageCaptioning(device="cuda:0") self.t2a = T2A(device="cuda:0") self.tts = TTS(device="cpu") self.t2s = T2S(device="cpu") self.i2a = I2A(device="cuda:0") self.a2t = A2T(device="cpu") self.asr = ASR(device="cuda:0") self.SE_SS_SC = Speech_Enh_SS_SC(device="cuda:0") # self.SE_SS_MC = Speech_Enh_SS_MC(device="cuda:0") self.SS = Speech_SS(device="cuda:0") self.inpaint = Inpaint(device="cuda:0") self.tts_ood = TTS_OOD(device="cpu") self.geneface = GeneFace(device="cuda:0") self.detection = SoundDetection(device="cpu") self.binaural = Binaural(device="cuda:0") self.extraction = SoundExtraction(device="cuda:0") self.TSD = TargetSoundDetection(device="cuda:0") self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output') def init_tools(self, interaction_type): if interaction_type == 'text': self.tools = [ Tool(name="Generate Image From User Input Text", func=self.t2i.inference, description="useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. " "The input to this tool should be a string, representing the text used to generate image. "), Tool(name="Get Photo Description", func=self.i2t.inference, description="useful for when you want to know what is inside the photo. receives image_path as input. " "The input to this tool should be a string, representing the image_path. "), Tool(name="Generate Audio From User Input Text", func=self.t2a.inference, description="useful for when you want to generate an audio from a user input text and it saved it to a file." "The input to this tool should be a string, representing the text used to generate audio."), Tool( name="Style Transfer", func= self.tts_ood.inference, description="useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice." "Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx." "The input to this tool should be a comma seperated string of two, representing reference audio path and input text."), Tool(name="Generate Singing Voice From User Input Text, Note and Duration Sequence", func= self.t2s.inference, description="useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file." "If Like: Generate a piece of singing voice, the input to this tool should be \"\" since there is no User Input Text, Note and Duration Sequence ." "If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. " "Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx." "The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."), Tool(name="Synthesize Speech Given the User Input Text", func=self.tts.inference, description="useful for when you want to convert a user input text into speech audio it saved it to a file." "The input to this tool should be a string, representing the text used to be converted to speech."), # Tool(name="Speech Enhancement Or Separation In Single-Channel", func=self.SE_SS_SC.inference, # description="useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), " # "or separate each speech from the speech mixture (single-channel), receives audio_path as input." # "The input to this tool should be a string, representing the audio_path."), Tool(name="Speech Enhancement In Single-Channel", func=self.SE_SS_SC.inference, description="useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input." "The input to this tool should be a string, representing the audio_path."), Tool(name="Speech Separation In Single-Channel", func=self.SS.inference, description="useful for when you want to separate each speech from the speech mixture, receives audio_path as input." "The input to this tool should be a string, representing the audio_path."), # Tool(name="Speech Enhancement In Multi-Channel", func=self.SE_SS_MC.inference, # description="useful for when you want to enhance the quality of the speech signal by reducing background noise (multi-channel), receives audio_path as input." # "The input to this tool should be a string, representing the audio_path."), Tool(name="Generate Audio From The Image", func=self.i2a.inference, description="useful for when you want to generate an audio based on an image." "The input to this tool should be a string, representing the image_path. "), Tool(name="Generate Text From The Audio", func=self.a2t.inference, description="useful for when you want to describe an audio in text, receives audio_path as input." "The input to this tool should be a string, representing the audio_path."), Tool(name="Audio Inpainting", func=self.inpaint.show_mel_fn, description="useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, " "The input to this tool should be a string, representing the audio_path."), Tool(name="Transcribe Speech", func=self.asr.inference, description="useful for when you want to know the text corresponding to a human speech, receives audio_path as input." "The input to this tool should be a string, representing the audio_path."), Tool(name="Generate a talking human portrait video given a input Audio", func=self.geneface.inference, description="useful for when you want to generate a talking human portrait video given a input audio." "The input to this tool should be a string, representing the audio_path."), Tool(name="Detect The Sound Event From The Audio", func=self.detection.inference, description="useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. " "The input to this tool should be a string, representing the audio_path. "), Tool(name="Sythesize Binaural Audio From A Mono Audio Input", func=self.binaural.inference, description="useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. " "The input to this tool should be a string, representing the audio_path. "), Tool(name="Extract Sound Event From Mixture Audio Based On Language Description", func=self.extraction.inference, description="useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. " "The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."), Tool(name="Target Sound Detection", func=self.TSD.inference, description="useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. " "The input to this tool should be a comma seperated string of two, representing audio path and the text description. ")] self.agent = initialize_agent( self.tools, self.llm, agent="conversational-react-description", verbose=True, memory=self.memory, return_intermediate_steps=True, agent_kwargs={'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions': AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}, ) return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False) else: self.tools = [ Tool(name="Generate Audio From User Input Text", func=self.t2a.inference, description="useful for when you want to generate an audio from a user input text and it saved it to a file." "The input to this tool should be a string, representing the text used to generate audio."), Tool( name="Style Transfer", func= self.tts_ood.inference, description="useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice." "Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx." "The input to this tool should be a comma seperated string of two, representing reference audio path and input text."), Tool(name="Generate Singing Voice From User Input Text, Note and Duration Sequence", func= self.t2s.inference, description="useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file." "If Like: Generate a piece of singing voice, the input to this tool should be \"\" since there is no User Input Text, Note and Duration Sequence ." "If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. " "Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx." "The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."), Tool(name="Synthesize Speech Given the User Input Text", func=self.tts.inference, description="useful for when you want to convert a user input text into speech audio it saved it to a file." "The input to this tool should be a string, representing the text used to be converted to speech."), Tool(name="Generate Text From The Audio", func=self.a2t.inference, description="useful for when you want to describe an audio in text, receives audio_path as input." "The input to this tool should be a string, representing the audio_path."), Tool(name="Generate a talking human portrait video given a input Audio", func=self.geneface.inference, description="useful for when you want to generate a talking human portrait video given a input audio." "The input to this tool should be a string, representing the audio_path."), Tool(name="Generate Binaural Audio From A Mono Audio Input", func=self.binaural.inference, description="useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. " "The input to this tool should be a string, representing the audio_path. "), Tool(name="Extract Sound Event From Mixture Audio Based On Language Description", func=self.extraction.inference, description="useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. " "The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."), Tool(name="Target Sound Detection", func=self.TSD.inference, description="useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. " "The input to this tool should be a comma seperated string of two, representing audio path and the text description. ")] self.agent = initialize_agent( self.tools, self.llm, agent="conversational-react-description", verbose=True, memory=self.memory, return_intermediate_steps=True, agent_kwargs={'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions': AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}, ) return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True) def run_text(self, text, state): print("===============Running run_text =============") print("Inputs:", text, state) print("======>Previous memory:\n %s" % self.agent.memory) self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500) res = self.agent({"input": text}) if res['intermediate_steps'] == []: print("======>Current memory:\n %s" % self.agent.memory) response = res['output'] state = state + [(text, response)] print("Outputs:", state) return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False) else: tool = res['intermediate_steps'][0][0].tool if tool == "Generate Image From User Input Text" or tool == "Generate Text From The Audio" or tool == "Target Sound Detection": print("======>Current memory:\n %s" % self.agent.memory) response = re.sub('(image/\S*png)', lambda m: f'![](/file={m.group(0)})*{m.group(0)}*', res['output']) state = state + [(text, response)] print("Outputs:", state) return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False) elif tool == "Transcribe Speech": response = res['output'] state = state + [(text, response)] print("Outputs:", state) return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False) elif tool == "Detect The Sound Event From The Audio": image_filename = res['intermediate_steps'][0][1] response = res['output'] + f"![](/file={image_filename})*{image_filename}*" state = state + [(text, response)] print("Outputs:", state) return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False) elif tool == "Audio Inpainting": audio_filename = res['intermediate_steps'][0][0].tool_input image_filename = res['intermediate_steps'][0][1] print("======>Current memory:\n %s" % self.agent.memory) response = res['output'] state = state + [(text, response)] print("Outputs:", state) return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False), gr.Image.update(value=image_filename,visible=True), gr.Button.update(visible=True) elif tool == "Generate a talking human portrait video given a input Audio": video_filename = res['intermediate_steps'][0][1] print("======>Current memory:\n %s" % self.agent.memory) response = res['output'] state = state + [(text, response)] print("Outputs:", state) return state, state, gr.Audio.update(visible=False), gr.Video.update(value=video_filename,visible=True), gr.Image.update(visible=False), gr.Button.update(visible=False) print("======>Current memory:\n %s" % self.agent.memory) response = re.sub('(image/\S*png)', lambda m: f'![](/file={m.group(0)})*{m.group(0)}*', res['output']) audio_filename = res['intermediate_steps'][0][1] state = state + [(text, response)] print("Outputs:", state) return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False) def run_image_or_audio(self, file, state, txt): file_type = file.name[-3:] if file_type == "wav": print("===============Running run_audio =============") print("Inputs:", file, state) print("======>Previous memory:\n %s" % self.agent.memory) audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") # audio_load = whisper.load_audio(file.name) audio_load, sr = soundfile.read(file.name) soundfile.write(audio_filename, audio_load, samplerate = sr) description = self.a2t.inference(audio_filename) Human_prompt = "\nHuman: provide an audio named {}. The description is: {}. This information helps you to understand this audio, but you should use tools to finish following tasks, " \ "rather than directly imagine from my description. If you understand, say \"Received\". \n".format(audio_filename, description) AI_prompt = "Received. " self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt print("======>Current memory:\n %s" % self.agent.memory) #state = state + [(f"<audio src=audio_filename controls=controls></audio>*{audio_filename}*", AI_prompt)] state = state + [(f"*{audio_filename}*", AI_prompt)] print("Outputs:", state) return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False) else: print("===============Running run_image =============") print("Inputs:", file, state) print("======>Previous memory:\n %s" % self.agent.memory) image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png") print("======>Auto Resize Image...") img = Image.open(file.name) width, height = img.size ratio = min(512 / width, 512 / height) width_new, height_new = (round(width * ratio), round(height * ratio)) img = img.resize((width_new, height_new)) img = img.convert('RGB') img.save(image_filename, "PNG") print(f"Resize image form {width}x{height} to {width_new}x{height_new}") description = self.i2t.inference(image_filename) Human_prompt = "\nHuman: provide a figure named {}. The description is: {}. This information helps you to understand this image, but you should use tools to finish following tasks, " \ "rather than directly imagine from my description. If you understand, say \"Received\". \n".format(image_filename, description) AI_prompt = "Received. " self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt print("======>Current memory:\n %s" % self.agent.memory) state = state + [(f"![](/file={image_filename})*{image_filename}*", AI_prompt)] print("Outputs:", state) return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False) def speech(self, speech_input, state): input_audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav") text = self.asr.translate_english(speech_input) print("Inputs:", text, state) print("======>Previous memory:\n %s" % self.agent.memory) self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500) res = self.agent({"input": text}) if res['intermediate_steps'] == []: print("======>Current memory:\n %s" % self.agent.memory) response = res['output'] output_audio_filename = self.tts.inference(response) state = state + [(text, response)] print("Outputs:", state) return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False) else: tool = res['intermediate_steps'][0][0].tool if tool == "Generate Image From User Input Text" or tool == "Generate Text From The Audio" or tool == "Target Sound Detection": print("======>Current memory:\n %s" % self.agent.memory) response = re.sub('(image/\S*png)', lambda m: f'![](/file={m.group(0)})*{m.group(0)}*', res['output']) output_audio_filename = self.tts.inference(res['output']) state = state + [(text, response)] print("Outputs:", state) return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False) elif tool == "Transcribe Speech": print("======>Current memory:\n %s" % self.agent.memory) output_audio_filename = self.tts.inference(res['output']) response = res['output'] state = state + [(text, response)] print("Outputs:", state) return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False) elif tool == "Detect The Sound Event From The Audio": print("======>Current memory:\n %s" % self.agent.memory) image_filename = res['intermediate_steps'][0][1] output_audio_filename = self.tts.inference(res['output']) response = res['output'] + f"![](/file={image_filename})*{image_filename}*" state = state + [(text, response)] print("Outputs:", state) return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False) elif tool == "Generate a talking human portrait video given a input Audio": video_filename = res['intermediate_steps'][0][1] print("======>Current memory:\n %s" % self.agent.memory) response = res['output'] output_audio_filename = self.tts.inference(res['output']) state = state + [(text, response)] print("Outputs:", state) return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(value=video_filename,visible=True) print("======>Current memory:\n %s" % self.agent.memory) response = re.sub('(image/\S*png)', lambda m: f'![](/file={m.group(0)})*{m.group(0)}*', res['output']) audio_filename = res['intermediate_steps'][0][1] Res = "The audio file has been generated and the audio is " output_audio_filename = merge_audio(self.tts.inference(Res), audio_filename) print(output_audio_filename) state = state + [(text, response)] response = res['output'] print("Outputs:", state) return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False) def inpainting(self, state, audio_filename, image_filename): print("===============Running inpainting =============") print("Inputs:", state) print("======>Previous memory:\n %s" % self.agent.memory) new_image_filename, new_audio_filename = self.inpaint.inference(audio_filename, image_filename) AI_prompt = "Here are the predict audio and the mel spectrum." + f"*{new_audio_filename}*" + f"![](/file={new_image_filename})*{new_image_filename}*" output_audio_filename = self.tts.inference(AI_prompt) self.agent.memory.buffer = self.agent.memory.buffer + 'AI: ' + AI_prompt print("======>Current memory:\n %s" % self.agent.memory) state = state + [(f"Audio Inpainting", AI_prompt)] print("Outputs:", state) return state, state, gr.Image.update(visible=False), gr.Audio.update(value=new_audio_filename, visible=True), gr.Video.update(visible=False), gr.Button.update(visible=False) def clear_audio(self): return gr.Audio.update(value=None, visible=False) def clear_input_audio(self): return gr.Audio.update(value=None) def clear_image(self): return gr.Image.update(value=None, visible=False) def clear_video(self): return gr.Video.update(value=None, visible=False) def clear_button(self): return gr.Button.update(visible=False) if __name__ == '__main__': bot = ConversationBot() with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo: with gr.Row(): gr.Markdown("## AudioGPT") chatbot = gr.Chatbot(elem_id="chatbot", label="AudioGPT", visible=False) state = gr.State([]) with gr.Row() as select_raws: with gr.Column(scale=0.7): interaction_type = gr.Radio(choices=['text', 'speech'], value='text', label='Interaction Type') with gr.Column(scale=0.3, min_width=0): select = gr.Button("Select") with gr.Row(visible=False) as text_input_raws: with gr.Column(scale=0.7): txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False) with gr.Column(scale=0.1, min_width=0): run = gr.Button("🏃‍♂️Run") with gr.Column(scale=0.1, min_width=0): clear_txt = gr.Button("🔄Clear️") with gr.Column(scale=0.1, min_width=0): btn = gr.UploadButton("🖼️Upload", file_types=["image","audio"]) with gr.Row(): outaudio = gr.Audio(visible=False) with gr.Row(): with gr.Column(scale=0.3, min_width=0): outvideo = gr.Video(visible=False) with gr.Row(): show_mel = gr.Image(type="filepath",tool='sketch',visible=False) with gr.Row(): run_button = gr.Button("Predict Masked Place",visible=False) with gr.Row(visible=False) as speech_input_raws: with gr.Column(scale=0.7): speech_input = gr.Audio(source="microphone", type="filepath", label="Input") with gr.Column(scale=0.15, min_width=0): submit_btn = gr.Button("🏃‍♂️Submit") with gr.Column(scale=0.15, min_width=0): clear_speech = gr.Button("🔄Clear️") with gr.Row(): speech_output = gr.Audio(label="Output",visible=False) select.click(bot.init_tools, [interaction_type], [chatbot, select_raws, text_input_raws, speech_input_raws]) txt.submit(bot.run_text, [txt, state], [chatbot, state, outaudio, outvideo, show_mel, run_button]) txt.submit(lambda: "", None, txt) run.click(bot.run_text, [txt, state], [chatbot, state, outaudio, outvideo, show_mel, run_button]) run.click(lambda: "", None, txt) btn.upload(bot.run_image_or_audio, [btn, state, txt], [chatbot, state, outaudio, outvideo]) run_button.click(bot.inpainting, [state, outaudio, show_mel], [chatbot, state, show_mel, outaudio, outvideo, run_button]) clear_txt.click(bot.memory.clear) clear_txt.click(lambda: [], None, chatbot) clear_txt.click(lambda: [], None, state) clear_txt.click(lambda:None, None, txt) clear_txt.click(bot.clear_button, None, run_button) clear_txt.click(bot.clear_image, None, show_mel) clear_txt.click(bot.clear_audio, None, outaudio) clear_txt.click(bot.clear_video, None, outvideo) submit_btn.click(bot.speech, [speech_input, state], [speech_input, speech_output, state, outvideo]) clear_speech.click(bot.clear_input_audio, None, speech_input) clear_speech.click(bot.clear_audio, None, speech_output) clear_speech.click(lambda: [], None, state) clear_speech.click(bot.clear_video, None, outvideo) demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
[ "langchain.llms.openai.OpenAI", "langchain.chains.conversation.memory.ConversationBufferMemory", "langchain.agents.initialize.initialize_agent", "langchain.agents.tools.Tool" ]
[((3966, 3992), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_1'], {}), '(audio_path_1)\n', (3978, 3992), True, 'import scipy.io.wavfile as wavfile\n'), ((4014, 4040), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_2'], {}), '(audio_path_2)\n', (4026, 4040), True, 'import scipy.io.wavfile as wavfile\n'), ((4131, 4155), 'numpy.hstack', 'np.hstack', (['merged_signal'], {}), '(merged_signal)\n', (4140, 4155), True, 'import numpy as np\n'), ((4176, 4217), 'numpy.asarray', 'np.asarray', (['merged_signal'], {'dtype': 'np.int16'}), '(merged_signal, dtype=np.int16)\n', (4186, 4217), True, 'import numpy as np\n'), ((4298, 4348), 'scipy.io.wavfile.write', 'wavfile.write', (['audio_filename', 'sr_2', 'merged_signal'], {}), '(audio_filename, sr_2, merged_signal)\n', (4311, 4348), True, 'import scipy.io.wavfile as wavfile\n'), ((53, 79), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (69, 79), False, 'import os\n'), ((4682, 4786), 'diffusers.StableDiffusionPipeline.from_pretrained', 'StableDiffusionPipeline.from_pretrained', (['"""runwayml/stable-diffusion-v1-5"""'], {'torch_dtype': 'torch.float16'}), "('runwayml/stable-diffusion-v1-5',\n torch_dtype=torch.float16)\n", (4721, 4786), False, 'from diffusers import StableDiffusionPipeline\n'), ((4820, 4892), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""Gustavosta/MagicPrompt-Stable-Diffusion"""'], {}), "('Gustavosta/MagicPrompt-Stable-Diffusion')\n", (4849, 4892), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer\n'), ((4926, 5005), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['"""Gustavosta/MagicPrompt-Stable-Diffusion"""'], {}), "('Gustavosta/MagicPrompt-Stable-Diffusion')\n", (4962, 5005), False, 'from transformers import AutoModelForCausalLM, AutoTokenizer\n'), ((5043, 5163), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'self.text_refine_model', 'tokenizer': 'self.text_refine_tokenizer', 'device': 'self.device'}), "('text-generation', model=self.text_refine_model, tokenizer=self.\n text_refine_tokenizer, device=self.device)\n", (5051, 5163), False, 'from transformers import pipeline\n'), ((5875, 5945), 'transformers.BlipProcessor.from_pretrained', 'BlipProcessor.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "('Salesforce/blip-image-captioning-base')\n", (5904, 5945), False, 'from transformers import BlipProcessor, BlipForConditionalGeneration\n'), ((6704, 6792), 'vocoder.bigvgan.models.VocoderBigVGAN', 'VocoderBigVGAN', (['"""text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w"""'], {'device': 'device'}), "('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',\n device=device)\n", (6718, 6792), False, 'from vocoder.bigvgan.models import VocoderBigVGAN\n'), ((6861, 6883), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['config'], {}), '(config)\n', (6875, 6883), False, 'from omegaconf import OmegaConf\n'), ((6900, 6937), 'ldm.util.instantiate_from_config', 'instantiate_from_config', (['config.model'], {}), '(config.model)\n', (6923, 6937), False, 'from ldm.util import instantiate_from_config\n'), ((7187, 7205), 'ldm.models.diffusion.ddim.DDIMSampler', 'DDIMSampler', (['model'], {}), '(model)\n', (7198, 7205), False, 'from ldm.models.diffusion.ddim import DDIMSampler\n'), ((7378, 7405), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (7399, 7405), True, 'import numpy as np\n'), ((8501, 8560), 'torch.clamp', 'torch.clamp', (['((x_samples_ddim + 1.0) / 2.0)'], {'min': '(0.0)', 'max': '(1.0)'}), '((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)\n', (8512, 8560), False, 'import torch\n'), ((10157, 10217), 'soundfile.write', 'soundfile.write', (['audio_filename', 'result[1]'], {'samplerate': '(16000)'}), '(audio_filename, result[1], samplerate=16000)\n', (10172, 10217), False, 'import soundfile\n'), ((10695, 10783), 'vocoder.bigvgan.models.VocoderBigVGAN', 'VocoderBigVGAN', (['"""text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w"""'], {'device': 'device'}), "('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',\n device=device)\n", (10709, 10783), False, 'from vocoder.bigvgan.models import VocoderBigVGAN\n'), ((10852, 10874), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['config'], {}), '(config)\n', (10866, 10874), False, 'from omegaconf import OmegaConf\n'), ((10891, 10928), 'ldm.util.instantiate_from_config', 'instantiate_from_config', (['config.model'], {}), '(config.model)\n', (10914, 10928), False, 'from ldm.util import instantiate_from_config\n'), ((11178, 11196), 'ldm.models.diffusion.ddim.DDIMSampler', 'DDIMSampler', (['model'], {}), '(model)\n', (11189, 11196), False, 'from ldm.models.diffusion.ddim import DDIMSampler\n'), ((11399, 11426), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (11420, 11426), True, 'import numpy as np\n'), ((11759, 11776), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (11769, 11776), False, 'from PIL import Image\n'), ((12720, 12779), 'torch.clamp', 'torch.clamp', (['((x_samples_ddim + 1.0) / 2.0)'], {'min': '(0.0)', 'max': '(1.0)'}), '((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)\n', (12731, 12779), False, 'import torch\n'), ((13377, 13437), 'soundfile.write', 'soundfile.write', (['audio_filename', 'result[1]'], {'samplerate': '(16000)'}), '(audio_filename, result[1], samplerate=16000)\n', (13392, 13437), False, 'import soundfile\n'), ((13967, 13996), 'inference.tts.PortaSpeech.TTSInference', 'TTSInference', (['self.hp', 'device'], {}), '(self.hp, device)\n', (13979, 13996), False, 'from inference.tts.PortaSpeech import TTSInference\n'), ((14039, 14095), 'utils.hparams.set_hparams', 'set_hparams', ([], {'exp_name': 'self.exp_name', 'print_hparams': '(False)'}), '(exp_name=self.exp_name, print_hparams=False)\n', (14050, 14095), False, 'from utils.hparams import set_hparams\n'), ((14345, 14399), 'soundfile.write', 'soundfile.write', (['audio_filename', 'out'], {'samplerate': '(22050)'}), '(audio_filename, out, samplerate=22050)\n', (14360, 14399), False, 'import soundfile\n'), ((14913, 14948), 'inference.svs.ds_e2e.DiffSingerE2EInfer', 'DiffSingerE2EInfer', (['self.hp', 'device'], {}), '(self.hp, device)\n', (14931, 14948), False, 'from inference.svs.ds_e2e import DiffSingerE2EInfer\n'), ((15398, 15474), 'utils.hparams.set_hparams', 'set_hparams', ([], {'config': 'self.config', 'exp_name': 'self.exp_name', 'print_hparams': '(False)'}), '(config=self.config, exp_name=self.exp_name, print_hparams=False)\n', (15409, 15474), False, 'from utils.hparams import set_hparams\n'), ((18112, 18174), 'soundfile.write', 'soundfile.write', (['audio_filename', 'wav'], {'samplerate': 'self.model.fs'}), '(audio_filename, wav, samplerate=self.model.fs)\n', (18127, 18174), False, 'import soundfile\n'), ((18678, 18711), 'inference.tts.GenerSpeech.GenerSpeechInfer', 'GenerSpeechInfer', (['self.hp', 'device'], {}), '(self.hp, device)\n', (18694, 18711), False, 'from inference.tts.GenerSpeech import GenerSpeechInfer\n'), ((18754, 18830), 'utils.hparams.set_hparams', 'set_hparams', ([], {'config': 'self.config', 'exp_name': 'self.exp_name', 'print_hparams': '(False)'}), '(config=self.config, exp_name=self.exp_name, print_hparams=False)\n', (18765, 18830), False, 'from utils.hparams import set_hparams\n'), ((18914, 18941), 'os.path.exists', 'os.path.exists', (['f0_stats_fn'], {}), '(f0_stats_fn)\n', (18928, 18941), False, 'import os\n'), ((20145, 20233), 'vocoder.bigvgan.models.VocoderBigVGAN', 'VocoderBigVGAN', (['"""text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w"""'], {'device': 'device'}), "('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',\n device=device)\n", (20159, 20233), False, 'from vocoder.bigvgan.models import VocoderBigVGAN\n'), ((20354, 20376), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['config'], {}), '(config)\n', (20368, 20376), False, 'from omegaconf import OmegaConf\n'), ((20393, 20430), 'ldm.util.instantiate_from_config', 'instantiate_from_config', (['config.model'], {}), '(config.model)\n', (20416, 20430), False, 'from ldm.util import instantiate_from_config\n'), ((20737, 20755), 'ldm.models.diffusion.ddim.DDIMSampler', 'DDIMSampler', (['model'], {}), '(model)\n', (20748, 20755), False, 'from ldm.models.diffusion.ddim import DDIMSampler\n'), ((21544, 21574), 'scipy.io.wavfile.read', 'wavfile.read', (['input_audio_path'], {}), '(input_audio_path)\n', (21556, 21574), True, 'import scipy.io.wavfile as wavfile\n'), ((21855, 21915), 'librosa.resample', 'librosa.resample', (['ori_wav'], {'orig_sr': 'sr', 'target_sr': 'SAMPLE_RATE'}), '(ori_wav, orig_sr=sr, target_sr=SAMPLE_RATE)\n', (21871, 21915), False, 'import librosa\n'), ((22182, 22209), 'ldm.data.extract_mel_spectrogram.TRANSFORMS_16000', 'TRANSFORMS_16000', (['input_wav'], {}), '(input_wav)\n', (22198, 22209), False, 'from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000\n'), ((22619, 22679), 'librosa.resample', 'librosa.resample', (['ori_wav'], {'orig_sr': 'sr', 'target_sr': 'SAMPLE_RATE'}), '(ori_wav, orig_sr=sr, target_sr=SAMPLE_RATE)\n', (22635, 22679), False, 'import librosa\n'), ((22945, 22972), 'ldm.data.extract_mel_spectrogram.TRANSFORMS_16000', 'TRANSFORMS_16000', (['input_wav'], {}), '(input_wav)\n', (22961, 22972), False, 'from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000\n'), ((23512, 23539), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (23533, 23539), True, 'import numpy as np\n'), ((23834, 23899), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (["batch['mask']"], {'size': 'c.shape[-2:]'}), "(batch['mask'], size=c.shape[-2:])\n", (23865, 23899), False, 'import torch\n'), ((23960, 23985), 'torch.cat', 'torch.cat', (['(c, cc)'], {'dim': '(1)'}), '((c, cc), dim=1)\n', (23969, 23985), False, 'import torch\n'), ((24438, 24495), 'torch.clamp', 'torch.clamp', (["((batch['mel'] + 1.0) / 2.0)"], {'min': '(0.0)', 'max': '(1.0)'}), "((batch['mel'] + 1.0) / 2.0, min=0.0, max=1.0)\n", (24449, 24495), False, 'import torch\n'), ((24506, 24564), 'torch.clamp', 'torch.clamp', (["((batch['mask'] + 1.0) / 2.0)"], {'min': '(0.0)', 'max': '(1.0)'}), "((batch['mask'] + 1.0) / 2.0, min=0.0, max=1.0)\n", (24517, 24564), False, 'import torch\n'), ((24584, 24643), 'torch.clamp', 'torch.clamp', (['((x_samples_ddim + 1.0) / 2.0)'], {'min': '(0.0)', 'max': '(1.0)'}), '((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)\n', (24595, 24643), False, 'import torch\n'), ((24954, 24983), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (24976, 24983), False, 'import torch\n'), ((25002, 25035), 'PIL.Image.open', 'Image.open', (["mel_and_mask['image']"], {}), "(mel_and_mask['image'])\n", (25012, 25035), False, 'from PIL import Image\n'), ((25055, 25087), 'PIL.Image.open', 'Image.open', (["mel_and_mask['mask']"], {}), "(mel_and_mask['mask'])\n", (25065, 25087), False, 'from PIL import Image\n'), ((25306, 25398), 'numpy.pad', 'np.pad', (['mask', '((0, 0), (0, mel_len - mask.shape[1]))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(mask, ((0, 0), (0, mel_len - mask.shape[1])), mode='constant',\n constant_values=0)\n", (25312, 25398), True, 'import numpy as np\n'), ((26269, 26327), 'soundfile.write', 'soundfile.write', (['audio_filename', 'gen_wav'], {'samplerate': '(16000)'}), '(audio_filename, gen_wav, samplerate=16000)\n', (26284, 26327), False, 'import soundfile\n'), ((26527, 26568), 'whisper.load_model', 'whisper.load_model', (['"""base"""'], {'device': 'device'}), "('base', device=device)\n", (26545, 26568), False, 'import whisper\n'), ((26623, 26653), 'whisper.load_audio', 'whisper.load_audio', (['audio_path'], {}), '(audio_path)\n', (26641, 26653), False, 'import whisper\n'), ((26670, 26696), 'whisper.pad_or_trim', 'whisper.pad_or_trim', (['audio'], {}), '(audio)\n', (26689, 26696), False, 'import whisper\n'), ((26831, 26856), 'whisper.DecodingOptions', 'whisper.DecodingOptions', ([], {}), '()\n', (26854, 26856), False, 'import whisper\n'), ((26874, 26914), 'whisper.decode', 'whisper.decode', (['self.model', 'mel', 'options'], {}), '(self.model, mel, options)\n', (26888, 26914), False, 'import whisper\n'), ((27312, 27373), 'audio_to_text.inference_waveform.AudioCapModel', 'AudioCapModel', (['"""audio_to_text/audiocaps_cntrstv_cnn14rnn_trm"""'], {}), "('audio_to_text/audiocaps_cntrstv_cnn14rnn_trm')\n", (27325, 27373), False, 'from audio_to_text.inference_waveform import AudioCapModel\n'), ((27427, 27457), 'whisper.load_audio', 'whisper.load_audio', (['audio_path'], {}), '(audio_path)\n', (27445, 27457), False, 'import whisper\n'), ((27863, 27884), 'audio_to_face.GeneFace_binding.GeneFaceInfer', 'GeneFaceInfer', (['device'], {}), '(device)\n', (27876, 27884), False, 'from audio_to_face.GeneFace_binding import GeneFaceInfer\n'), ((29085, 29267), 'audio_infer.pytorch.models.PVT', 'PVT', ([], {'sample_rate': 'self.sample_rate', 'window_size': 'self.window_size', 'hop_size': 'self.hop_size', 'mel_bins': 'self.mel_bins', 'fmin': 'self.fmin', 'fmax': 'self.fmax', 'classes_num': 'self.classes_num'}), '(sample_rate=self.sample_rate, window_size=self.window_size, hop_size=\n self.hop_size, mel_bins=self.mel_bins, fmin=self.fmin, fmax=self.fmax,\n classes_num=self.classes_num)\n', (29088, 29267), False, 'from audio_infer.pytorch.models import PVT\n'), ((29306, 29364), 'torch.load', 'torch.load', (['self.checkpoint_path'], {'map_location': 'self.device'}), '(self.checkpoint_path, map_location=self.device)\n', (29316, 29364), False, 'import torch\n'), ((29531, 29592), 'librosa.core.load', 'librosa.core.load', (['audio_path'], {'sr': 'self.sample_rate', 'mono': '(True)'}), '(audio_path, sr=self.sample_rate, mono=True)\n', (29548, 29592), False, 'import librosa\n'), ((29672, 29698), 'torch.from_numpy', 'torch.from_numpy', (['waveform'], {}), '(waveform)\n', (29688, 29698), False, 'import torch\n'), ((30663, 30711), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)', 'figsize': '(10, 4)'}), '(2, 1, sharex=True, figsize=(10, 4))\n', (30675, 30711), True, 'import matplotlib.pyplot as plt\n'), ((31471, 31489), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (31487, 31489), True, 'import matplotlib.pyplot as plt\n'), ((31578, 31605), 'matplotlib.pyplot.savefig', 'plt.savefig', (['image_filename'], {}), '(image_filename)\n', (31589, 31605), True, 'import matplotlib.pyplot as plt\n'), ((31952, 31958), 'sound_extraction.utils.stft.STFT', 'STFT', ([], {}), '()\n', (31956, 31958), False, 'from sound_extraction.utils.stft import STFT\n'), ((32046, 32073), 'torch.load', 'torch.load', (['self.model_file'], {}), '(self.model_file)\n', (32056, 32073), False, 'import torch\n'), ((32416, 32436), 'sound_extraction.utils.wav_io.load_wav', 'load_wav', (['audio_path'], {}), '(audio_path)\n', (32424, 32436), False, 'from sound_extraction.utils.wav_io import load_wav, save_wav\n'), ((33156, 33189), 'sound_extraction.utils.wav_io.save_wav', 'save_wav', (['est_wav', 'audio_filename'], {}), '(est_wav, audio_filename)\n', (33164, 33189), False, 'from sound_extraction.utils.wav_io import load_wav, save_wav\n'), ((33854, 33920), 'src.models.BinauralNetwork', 'BinauralNetwork', ([], {'view_dim': '(7)', 'warpnet_layers': '(4)', 'warpnet_channels': '(64)'}), '(view_dim=7, warpnet_layers=4, warpnet_channels=64)\n', (33869, 33920), False, 'from src.models import BinauralNetwork\n'), ((34119, 34171), 'librosa.load', 'librosa.load', ([], {'path': 'audio_path', 'sr': 'self.sr', 'mono': '(True)'}), '(path=audio_path, sr=self.sr, mono=True)\n', (34131, 34171), False, 'import librosa\n'), ((34187, 34209), 'torch.from_numpy', 'torch.from_numpy', (['mono'], {}), '(mono)\n', (34203, 34209), False, 'import torch\n'), ((34311, 34331), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (34325, 34331), False, 'import random\n'), ((34433, 34455), 'torch.from_numpy', 'torch.from_numpy', (['view'], {}), '(view)\n', (34449, 34455), False, 'import torch\n'), ((35860, 35918), 'torch.cat', 'torch.cat', (["[chunk['binaural'] for chunk in chunks]"], {'dim': '(-1)'}), "([chunk['binaural'] for chunk in chunks], dim=-1)\n", (35869, 35918), False, 'import torch\n'), ((36151, 36196), 'torchaudio.save', 'torchaudio.save', (['audio_filename', 'binaural', 'sr'], {}), '(audio_filename, binaural, sr)\n', (36166, 36196), False, 'import torchaudio\n'), ((36806, 36819), 'numpy.spacing', 'np.spacing', (['(1)'], {}), '(1)\n', (36816, 36819), True, 'import numpy as np\n'), ((36849, 36890), 'clip.load', 'clip.load', (['"""ViT-B/32"""'], {'device': 'self.device'}), "('ViT-B/32', device=self.device)\n", (36858, 36890), False, 'import clip\n'), ((37034, 37147), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth"""'], {'map_location': '"""cpu"""'}), "(\n 'audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth',\n map_location='cpu')\n", (37044, 37147), False, 'import torch\n'), ((37460, 37610), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt"""'], {'map_location': '(lambda storage, loc: storage)'}), "(\n 'audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt'\n , map_location=lambda storage, loc: storage)\n", (37470, 37610), False, 'import torch\n'), ((38016, 38103), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth"""'], {}), "(\n 'audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth')\n", (38026, 38103), False, 'import torch\n'), ((38122, 38208), 'torch.load', 'torch.load', (['"""audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth"""'], {}), "(\n 'audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth')\n", (38132, 38208), False, 'import torch\n'), ((38290, 38321), 'soundfile.read', 'sf.read', (['fname'], {'dtype': '"""float32"""'}), "(fname, dtype='float32')\n", (38297, 38321), True, 'import soundfile as sf\n'), ((38439, 38469), 'librosa.resample', 'librosa.resample', (['y', 'sr', '(22050)'], {}), '(y, sr, 22050)\n', (38455, 38469), False, 'import librosa\n'), ((39559, 39586), 'torch.from_numpy', 'torch.from_numpy', (['embedding'], {}), '(embedding)\n', (39575, 39586), False, 'import torch\n'), ((39796, 39820), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (39812, 39820), False, 'import torch\n'), ((40169, 40218), 'target_sound_detection.src.utils.median_filter', 'median_filter', (['pred'], {'window_size': '(1)', 'threshold': '(0.5)'}), '(pred, window_size=1, threshold=0.5)\n', (40182, 40218), False, 'from target_sound_detection.src.utils import median_filter, decode_with_timestamps\n'), ((45032, 45049), 'espnet_model_zoo.downloader.ModelDownloader', 'ModelDownloader', ([], {}), '()\n', (45047, 45049), False, 'from espnet_model_zoo.downloader import ModelDownloader\n'), ((45135, 45379), 'espnet2.bin.enh_inference.SeparateSpeech', 'SeparateSpeech', ([], {'train_config': "cfg['train_config']", 'model_file': "cfg['model_file']", 'segment_size': '(2.4)', 'hop_size': '(0.8)', 'normalize_segment_scale': '(False)', 'show_progressbar': '(True)', 'ref_channel': 'None', 'normalize_output_wav': '(True)', 'device': 'self.device'}), "(train_config=cfg['train_config'], model_file=cfg[\n 'model_file'], segment_size=2.4, hop_size=0.8, normalize_segment_scale=\n False, show_progressbar=True, ref_channel=None, normalize_output_wav=\n True, device=self.device)\n", (45149, 45379), False, 'from espnet2.bin.enh_inference import SeparateSpeech\n'), ((45613, 45640), 'soundfile.read', 'soundfile.read', (['speech_path'], {}), '(speech_path)\n', (45627, 45640), False, 'import soundfile\n'), ((47031, 47048), 'espnet_model_zoo.downloader.ModelDownloader', 'ModelDownloader', ([], {}), '()\n', (47046, 47048), False, 'from espnet_model_zoo.downloader import ModelDownloader\n'), ((47134, 47378), 'espnet2.bin.enh_inference.SeparateSpeech', 'SeparateSpeech', ([], {'train_config': "cfg['train_config']", 'model_file': "cfg['model_file']", 'segment_size': '(2.4)', 'hop_size': '(0.8)', 'normalize_segment_scale': '(False)', 'show_progressbar': '(True)', 'ref_channel': 'None', 'normalize_output_wav': '(True)', 'device': 'self.device'}), "(train_config=cfg['train_config'], model_file=cfg[\n 'model_file'], segment_size=2.4, hop_size=0.8, normalize_segment_scale=\n False, show_progressbar=True, ref_channel=None, normalize_output_wav=\n True, device=self.device)\n", (47148, 47378), False, 'from espnet2.bin.enh_inference import SeparateSpeech\n'), ((47597, 47624), 'soundfile.read', 'soundfile.read', (['speech_path'], {}), '(speech_path)\n', (47611, 47624), False, 'import soundfile\n'), ((48487, 48508), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (48493, 48508), False, 'from langchain.llms.openai import OpenAI\n'), ((49380, 49452), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(memory_key='chat_history', output_key='output')\n", (49404, 49452), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((75007, 75049), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None', 'visible': '(False)'}), '(value=None, visible=False)\n', (75022, 75049), True, 'import gradio as gr\n'), ((75098, 75125), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (75113, 75125), True, 'import gradio as gr\n'), ((75168, 75210), 'gradio.Image.update', 'gr.Image.update', ([], {'value': 'None', 'visible': '(False)'}), '(value=None, visible=False)\n', (75183, 75210), True, 'import gradio as gr\n'), ((75253, 75295), 'gradio.Video.update', 'gr.Video.update', ([], {'value': 'None', 'visible': '(False)'}), '(value=None, visible=False)\n', (75268, 75295), True, 'import gradio as gr\n'), ((75339, 75370), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (75355, 75370), True, 'import gradio as gr\n'), ((75437, 75493), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#chatbot .overflow-y-auto{height:500px}"""'}), "(css='#chatbot .overflow-y-auto{height:500px}')\n", (75446, 75493), True, 'import gradio as gr\n'), ((75583, 75645), 'gradio.Chatbot', 'gr.Chatbot', ([], {'elem_id': '"""chatbot"""', 'label': '"""AudioGPT"""', 'visible': '(False)'}), "(elem_id='chatbot', label='AudioGPT', visible=False)\n", (75593, 75645), True, 'import gradio as gr\n'), ((75663, 75675), 'gradio.State', 'gr.State', (['[]'], {}), '([])\n', (75671, 75675), True, 'import gradio as gr\n'), ((130, 156), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (146, 156), False, 'import os\n'), ((205, 231), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (221, 231), False, 'import os\n'), ((293, 319), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (309, 319), False, 'import os\n'), ((399, 425), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (415, 425), False, 'import os\n'), ((493, 519), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (509, 519), False, 'import os\n'), ((9915, 9930), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9928, 9930), False, 'import torch\n'), ((13139, 13154), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13152, 13154), False, 'import torch\n'), ((18985, 19005), 'numpy.load', 'np.load', (['f0_stats_fn'], {}), '(f0_stats_fn)\n', (18992, 19005), True, 'import numpy as np\n'), ((20568, 20593), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (20591, 20593), False, 'import torch\n'), ((20544, 20564), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (20556, 20564), False, 'import torch\n'), ((20599, 20618), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (20611, 20618), False, 'import torch\n'), ((21770, 21796), 'librosa.to_mono', 'librosa.to_mono', (['ori_wav.T'], {}), '(ori_wav.T)\n', (21785, 21796), False, 'import librosa\n'), ((22054, 22113), 'numpy.pad', 'np.pad', (['ori_wav', '(0, mel_len * hop_size)'], {'constant_values': '(0)'}), '(ori_wav, (0, mel_len * hop_size), constant_values=0)\n', (22060, 22113), True, 'import numpy as np\n'), ((22534, 22560), 'librosa.to_mono', 'librosa.to_mono', (['ori_wav.T'], {}), '(ori_wav.T)\n', (22549, 22560), False, 'import librosa\n'), ((22818, 22877), 'numpy.pad', 'np.pad', (['ori_wav', '(0, mel_len * hop_size)'], {'constant_values': '(0)'}), '(ori_wav, (0, mel_len * hop_size), constant_values=0)\n', (22824, 22877), True, 'import numpy as np\n'), ((25442, 25457), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25455, 25457), False, 'import torch\n'), ((27988, 28016), 'os.path.basename', 'os.path.basename', (['audio_path'], {}), '(audio_path)\n', (28004, 28016), False, 'import os\n'), ((29774, 29789), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (29787, 29789), False, 'import torch\n'), ((31019, 31067), 'numpy.arange', 'np.arange', (['(0)', 'frames_num', 'self.frames_per_second'], {}), '(0, frames_num, self.frames_per_second)\n', (31028, 31067), True, 'import numpy as np\n'), ((31105, 31154), 'numpy.arange', 'np.arange', (['(0)', '(frames_num / self.frames_per_second)'], {}), '(0, frames_num / self.frames_per_second)\n', (31114, 31154), True, 'import numpy as np\n'), ((31187, 31206), 'numpy.arange', 'np.arange', (['(0)', 'top_k'], {}), '(0, top_k)\n', (31196, 31206), True, 'import numpy as np\n'), ((40409, 40472), 'target_sound_detection.src.utils.decode_with_timestamps', 'decode_with_timestamps', (['target_event', 'filtered_pred[index_k, :]'], {}), '(target_event, filtered_pred[index_k, :])\n', (40431, 40472), False, 'from target_sound_detection.src.utils import median_filter, decode_with_timestamps\n'), ((47818, 47879), 'soundfile.write', 'soundfile.write', (['audio_filename', 'enh_speech[0]'], {'samplerate': 'sr'}), '(audio_filename, enh_speech[0], samplerate=sr)\n', (47833, 47879), False, 'import soundfile\n'), ((57651, 57951), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions':\n AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}"}), "(self.tools, self.llm, agent=\n 'conversational-react-description', verbose=True, memory=self.memory,\n return_intermediate_steps=True, agent_kwargs={'prefix':\n AUDIO_CHATGPT_PREFIX, 'format_instructions':\n AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX})\n", (57667, 57951), False, 'from langchain.agents.initialize import initialize_agent\n'), ((62445, 62745), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions':\n AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}"}), "(self.tools, self.llm, agent=\n 'conversational-react-description', verbose=True, memory=self.memory,\n return_intermediate_steps=True, agent_kwargs={'prefix':\n AUDIO_CHATGPT_PREFIX, 'format_instructions':\n AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX})\n", (62461, 62745), False, 'from langchain.agents.initialize import initialize_agent\n'), ((67323, 67348), 'soundfile.read', 'soundfile.read', (['file.name'], {}), '(file.name)\n', (67337, 67348), False, 'import soundfile\n'), ((67361, 67419), 'soundfile.write', 'soundfile.write', (['audio_filename', 'audio_load'], {'samplerate': 'sr'}), '(audio_filename, audio_load, samplerate=sr)\n', (67376, 67419), False, 'import soundfile\n'), ((68723, 68744), 'PIL.Image.open', 'Image.open', (['file.name'], {}), '(file.name)\n', (68733, 68744), False, 'from PIL import Image\n'), ((74811, 74841), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (74826, 74841), True, 'import gradio as gr\n'), ((74843, 74898), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'new_audio_filename', 'visible': '(True)'}), '(value=new_audio_filename, visible=True)\n', (74858, 74898), True, 'import gradio as gr\n'), ((74900, 74930), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (74915, 74930), True, 'import gradio as gr\n'), ((74932, 74963), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (74948, 74963), True, 'import gradio as gr\n'), ((75516, 75524), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (75522, 75524), True, 'import gradio as gr\n'), ((75538, 75564), 'gradio.Markdown', 'gr.Markdown', (['"""## AudioGPT"""'], {}), "('## AudioGPT')\n", (75549, 75564), True, 'import gradio as gr\n'), ((75690, 75698), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (75696, 75698), True, 'import gradio as gr\n'), ((75985, 76006), 'gradio.Row', 'gr.Row', ([], {'visible': '(False)'}), '(visible=False)\n', (75991, 76006), True, 'import gradio as gr\n'), ((76544, 76552), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76550, 76552), True, 'import gradio as gr\n'), ((76577, 76600), 'gradio.Audio', 'gr.Audio', ([], {'visible': '(False)'}), '(visible=False)\n', (76585, 76600), True, 'import gradio as gr\n'), ((76614, 76622), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76620, 76622), True, 'import gradio as gr\n'), ((76740, 76748), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76746, 76748), True, 'import gradio as gr\n'), ((76773, 76828), 'gradio.Image', 'gr.Image', ([], {'type': '"""filepath"""', 'tool': '"""sketch"""', 'visible': '(False)'}), "(type='filepath', tool='sketch', visible=False)\n", (76781, 76828), True, 'import gradio as gr\n'), ((76840, 76848), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (76846, 76848), True, 'import gradio as gr\n'), ((76875, 76923), 'gradio.Button', 'gr.Button', (['"""Predict Masked Place"""'], {'visible': '(False)'}), "('Predict Masked Place', visible=False)\n", (76884, 76923), True, 'import gradio as gr\n'), ((76945, 76966), 'gradio.Row', 'gr.Row', ([], {'visible': '(False)'}), '(visible=False)\n', (76951, 76966), True, 'import gradio as gr\n'), ((5967, 6057), 'transformers.BlipForConditionalGeneration.from_pretrained', 'BlipForConditionalGeneration.from_pretrained', (['"""Salesforce/blip-image-captioning-base"""'], {}), "(\n 'Salesforce/blip-image-captioning-base')\n", (6011, 6057), False, 'from transformers import BlipProcessor, BlipForConditionalGeneration\n'), ((6968, 7004), 'torch.load', 'torch.load', (['ckpt'], {'map_location': '"""cpu"""'}), "(ckpt, map_location='cpu')\n", (6978, 7004), False, 'import torch\n'), ((7534, 7562), 'torch.from_numpy', 'torch.from_numpy', (['start_code'], {}), '(start_code)\n', (7550, 7562), False, 'import torch\n'), ((9137, 9162), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9160, 9162), False, 'import torch\n'), ((9661, 9681), 'numpy.array', 'np.array', (['score_list'], {}), '(score_list)\n', (9669, 9681), True, 'import numpy as np\n'), ((10959, 10995), 'torch.load', 'torch.load', (['ckpt'], {'map_location': '"""cpu"""'}), "(ckpt, map_location='cpu')\n", (10969, 10995), False, 'import torch\n'), ((11555, 11583), 'torch.from_numpy', 'torch.from_numpy', (['start_code'], {}), '(start_code)\n', (11571, 11583), False, 'import torch\n'), ((13731, 13756), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13754, 13756), False, 'import torch\n'), ((14598, 14623), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14621, 14623), False, 'import torch\n'), ((16575, 16600), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16598, 16600), False, 'import torch\n'), ((18374, 18399), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (18397, 18399), False, 'import torch\n'), ((20461, 20497), 'torch.load', 'torch.load', (['ckpt'], {'map_location': '"""cpu"""'}), "(ckpt, map_location='cpu')\n", (20471, 20497), False, 'import torch\n'), ((23657, 23685), 'torch.from_numpy', 'torch.from_numpy', (['start_code'], {}), '(start_code)\n', (23673, 23685), False, 'import torch\n'), ((26711, 26745), 'whisper.log_mel_spectrogram', 'whisper.log_mel_spectrogram', (['audio'], {}), '(audio)\n', (26738, 26745), False, 'import whisper\n'), ((27767, 27792), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (27790, 27792), False, 'import torch\n'), ((30235, 30267), 'numpy.max', 'np.max', (['framewise_output'], {'axis': '(0)'}), '(framewise_output, axis=0)\n', (30241, 30267), True, 'import numpy as np\n'), ((30742, 30754), 'numpy.abs', 'np.abs', (['stft'], {}), '(stft)\n', (30748, 30754), True, 'import numpy as np\n'), ((31244, 31265), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (31252, 31265), True, 'import numpy as np\n'), ((32456, 32478), 'torch.tensor', 'torch.tensor', (['waveform'], {}), '(waveform)\n', (32468, 32478), False, 'import torch\n'), ((34703, 34725), 'random.randint', 'random.randint', (['(0)', 'm_a'], {}), '(0, m_a)\n', (34717, 34725), False, 'import random\n'), ((35521, 35536), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (35534, 35536), False, 'import torch\n'), ((35938, 35974), 'torch.clamp', 'torch.clamp', (['binaural'], {'min': '(-1)', 'max': '(1)'}), '(binaural, min=-1, max=1)\n', (35949, 35974), False, 'import torch\n'), ((38646, 38665), 'clip.tokenize', 'clip.tokenize', (['text'], {}), '(text)\n', (38659, 38665), False, 'import clip\n'), ((49580, 49968), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Image From User Input Text"""', 'func': 'self.t2i.inference', 'description': '"""useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. The input to this tool should be a string, representing the text used to generate image. """'}), "(name='Generate Image From User Input Text', func=self.t2i.inference,\n description=\n 'useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. The input to this tool should be a string, representing the text used to generate image. '\n )\n", (49584, 49968), False, 'from langchain.agents.tools import Tool\n'), ((50029, 50275), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Get Photo Description"""', 'func': 'self.i2t.inference', 'description': '"""useful for when you want to know what is inside the photo. receives image_path as input. The input to this tool should be a string, representing the image_path. """'}), "(name='Get Photo Description', func=self.i2t.inference, description=\n 'useful for when you want to know what is inside the photo. receives image_path as input. The input to this tool should be a string, representing the image_path. '\n )\n", (50033, 50275), False, 'from langchain.agents.tools import Tool\n'), ((50340, 50626), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Audio From User Input Text"""', 'func': 'self.t2a.inference', 'description': '"""useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio."""'}), "(name='Generate Audio From User Input Text', func=self.t2a.inference,\n description=\n 'useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio.'\n )\n", (50344, 50626), False, 'from langchain.agents.tools import Tool\n'), ((50687, 51161), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Style Transfer"""', 'func': 'self.tts_ood.inference', 'description': '"""useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text."""'}), "(name='Style Transfer', func=self.tts_ood.inference, description=\n 'useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text.'\n )\n", (50691, 51161), False, 'from langchain.agents.tools import Tool\n'), ((51281, 52061), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Singing Voice From User Input Text, Note and Duration Sequence"""', 'func': 'self.t2s.inference', 'description': '"""useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."""'}), '(name=\n \'Generate Singing Voice From User Input Text, Note and Duration Sequence\',\n func=self.t2s.inference, description=\n \'useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided.\'\n )\n', (51285, 52061), False, 'from langchain.agents.tools import Tool\n'), ((52228, 52530), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Synthesize Speech Given the User Input Text"""', 'func': 'self.tts.inference', 'description': '"""useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech."""'}), "(name='Synthesize Speech Given the User Input Text', func=self.tts.\n inference, description=\n 'useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech.'\n )\n", (52232, 52530), False, 'from langchain.agents.tools import Tool\n'), ((53100, 53426), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Speech Enhancement In Single-Channel"""', 'func': 'self.SE_SS_SC.inference', 'description': '"""useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Speech Enhancement In Single-Channel', func=self.SE_SS_SC.\n inference, description=\n 'useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n )\n", (53104, 53426), False, 'from langchain.agents.tools import Tool\n'), ((53486, 53762), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Speech Separation In Single-Channel"""', 'func': 'self.SS.inference', 'description': '"""useful for when you want to separate each speech from the speech mixture, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Speech Separation In Single-Channel', func=self.SS.inference,\n description=\n 'useful for when you want to separate each speech from the speech mixture, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n )\n", (53490, 53762), False, 'from langchain.agents.tools import Tool\n'), ((54246, 54479), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Audio From The Image"""', 'func': 'self.i2a.inference', 'description': '"""useful for when you want to generate an audio based on an image.The input to this tool should be a string, representing the image_path. """'}), "(name='Generate Audio From The Image', func=self.i2a.inference,\n description=\n 'useful for when you want to generate an audio based on an image.The input to this tool should be a string, representing the image_path. '\n )\n", (54250, 54479), False, 'from langchain.agents.tools import Tool\n'), ((54541, 54792), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Text From The Audio"""', 'func': 'self.a2t.inference', 'description': '"""useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate Text From The Audio', func=self.a2t.inference,\n description=\n 'useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n )\n", (54545, 54792), False, 'from langchain.agents.tools import Tool\n'), ((54854, 55191), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Audio Inpainting"""', 'func': 'self.inpaint.show_mel_fn', 'description': '"""useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, The input to this tool should be a string, representing the audio_path."""'}), "(name='Audio Inpainting', func=self.inpaint.show_mel_fn, description=\n 'useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, The input to this tool should be a string, representing the audio_path.'\n )\n", (54858, 55191), False, 'from langchain.agents.tools import Tool\n'), ((55257, 55513), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Transcribe Speech"""', 'func': 'self.asr.inference', 'description': '"""useful for when you want to know the text corresponding to a human speech, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Transcribe Speech', func=self.asr.inference, description=\n 'useful for when you want to know the text corresponding to a human speech, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n )\n", (55261, 55513), False, 'from langchain.agents.tools import Tool\n'), ((55578, 55869), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate a talking human portrait video given a input Audio"""', 'func': 'self.geneface.inference', 'description': '"""useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate a talking human portrait video given a input Audio',\n func=self.geneface.inference, description=\n 'useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path.'\n )\n", (55582, 55869), False, 'from langchain.agents.tools import Tool\n'), ((55930, 56296), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Detect The Sound Event From The Audio"""', 'func': 'self.detection.inference', 'description': '"""useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. The input to this tool should be a string, representing the audio_path. """'}), "(name='Detect The Sound Event From The Audio', func=self.detection.\n inference, description=\n 'useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. The input to this tool should be a string, representing the audio_path. '\n )\n", (55934, 56296), False, 'from langchain.agents.tools import Tool\n'), ((56356, 56654), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Sythesize Binaural Audio From A Mono Audio Input"""', 'func': 'self.binaural.inference', 'description': '"""useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. """'}), "(name='Sythesize Binaural Audio From A Mono Audio Input', func=self.\n binaural.inference, description=\n 'useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. '\n )\n", (56360, 56654), False, 'from langchain.agents.tools import Tool\n'), ((56714, 57120), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Extract Sound Event From Mixture Audio Based On Language Description"""', 'func': 'self.extraction.inference', 'description': '"""useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."""'}), "(name=\n 'Extract Sound Event From Mixture Audio Based On Language Description',\n func=self.extraction.inference, description=\n 'useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text.'\n )\n", (56718, 57120), False, 'from langchain.agents.tools import Tool\n'), ((57176, 57569), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Target Sound Detection"""', 'func': 'self.TSD.inference', 'description': '"""useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. """'}), "(name='Target Sound Detection', func=self.TSD.inference, description=\n 'useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. '\n )\n", (57180, 57569), False, 'from langchain.agents.tools import Tool\n'), ((58070, 58093), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (58079, 58093), True, 'import gradio as gr\n'), ((58095, 58119), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (58104, 58119), True, 'import gradio as gr\n'), ((58121, 58144), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (58130, 58144), True, 'import gradio as gr\n'), ((58146, 58170), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (58155, 58170), True, 'import gradio as gr\n'), ((58228, 58514), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Audio From User Input Text"""', 'func': 'self.t2a.inference', 'description': '"""useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio."""'}), "(name='Generate Audio From User Input Text', func=self.t2a.inference,\n description=\n 'useful for when you want to generate an audio from a user input text and it saved it to a file.The input to this tool should be a string, representing the text used to generate audio.'\n )\n", (58232, 58514), False, 'from langchain.agents.tools import Tool\n'), ((58575, 59049), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Style Transfer"""', 'func': 'self.tts_ood.inference', 'description': '"""useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text."""'}), "(name='Style Transfer', func=self.tts_ood.inference, description=\n 'useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice.Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx.The input to this tool should be a comma seperated string of two, representing reference audio path and input text.'\n )\n", (58579, 59049), False, 'from langchain.agents.tools import Tool\n'), ((59169, 59949), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Singing Voice From User Input Text, Note and Duration Sequence"""', 'func': 'self.t2s.inference', 'description': '"""useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."""'}), '(name=\n \'Generate Singing Voice From User Input Text, Note and Duration Sequence\',\n func=self.t2s.inference, description=\n \'useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file.If Like: Generate a piece of singing voice, the input to this tool should be "" since there is no User Input Text, Note and Duration Sequence .If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx.The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided.\'\n )\n', (59173, 59949), False, 'from langchain.agents.tools import Tool\n'), ((60116, 60418), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Synthesize Speech Given the User Input Text"""', 'func': 'self.tts.inference', 'description': '"""useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech."""'}), "(name='Synthesize Speech Given the User Input Text', func=self.tts.\n inference, description=\n 'useful for when you want to convert a user input text into speech audio it saved it to a file.The input to this tool should be a string, representing the text used to be converted to speech.'\n )\n", (60120, 60418), False, 'from langchain.agents.tools import Tool\n'), ((60478, 60729), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Text From The Audio"""', 'func': 'self.a2t.inference', 'description': '"""useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate Text From The Audio', func=self.a2t.inference,\n description=\n 'useful for when you want to describe an audio in text, receives audio_path as input.The input to this tool should be a string, representing the audio_path.'\n )\n", (60482, 60729), False, 'from langchain.agents.tools import Tool\n'), ((60791, 61082), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate a talking human portrait video given a input Audio"""', 'func': 'self.geneface.inference', 'description': '"""useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path."""'}), "(name='Generate a talking human portrait video given a input Audio',\n func=self.geneface.inference, description=\n 'useful for when you want to generate a talking human portrait video given a input audio.The input to this tool should be a string, representing the audio_path.'\n )\n", (60795, 61082), False, 'from langchain.agents.tools import Tool\n'), ((61143, 61440), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Generate Binaural Audio From A Mono Audio Input"""', 'func': 'self.binaural.inference', 'description': '"""useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. """'}), "(name='Generate Binaural Audio From A Mono Audio Input', func=self.\n binaural.inference, description=\n 'useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. The input to this tool should be a string, representing the audio_path. '\n )\n", (61147, 61440), False, 'from langchain.agents.tools import Tool\n'), ((61500, 61906), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Extract Sound Event From Mixture Audio Based On Language Description"""', 'func': 'self.extraction.inference', 'description': '"""useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."""'}), "(name=\n 'Extract Sound Event From Mixture Audio Based On Language Description',\n func=self.extraction.inference, description=\n 'useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. The input to this tool should be a comma seperated string of two, representing mixture audio path and input text.'\n )\n", (61504, 61906), False, 'from langchain.agents.tools import Tool\n'), ((61962, 62355), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Target Sound Detection"""', 'func': 'self.TSD.inference', 'description': '"""useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. """'}), "(name='Target Sound Detection', func=self.TSD.inference, description=\n 'useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. The input to this tool should be a comma seperated string of two, representing audio path and the text description. '\n )\n", (61966, 62355), False, 'from langchain.agents.tools import Tool\n'), ((62864, 62888), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (62873, 62888), True, 'import gradio as gr\n'), ((62890, 62914), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (62899, 62914), True, 'import gradio as gr\n'), ((62916, 62940), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (62925, 62940), True, 'import gradio as gr\n'), ((62942, 62965), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (62951, 62965), True, 'import gradio as gr\n'), ((63585, 63615), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63600, 63615), True, 'import gradio as gr\n'), ((63617, 63647), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63632, 63647), True, 'import gradio as gr\n'), ((63649, 63679), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63664, 63679), True, 'import gradio as gr\n'), ((63681, 63712), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (63697, 63712), True, 'import gradio as gr\n'), ((66706, 66757), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'audio_filename', 'visible': '(True)'}), '(value=audio_filename, visible=True)\n', (66721, 66757), True, 'import gradio as gr\n'), ((66758, 66788), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66773, 66788), True, 'import gradio as gr\n'), ((66790, 66820), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66805, 66820), True, 'import gradio as gr\n'), ((66822, 66853), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66838, 66853), True, 'import gradio as gr\n'), ((68295, 68346), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'audio_filename', 'visible': '(True)'}), '(value=audio_filename, visible=True)\n', (68310, 68346), True, 'import gradio as gr\n'), ((68347, 68377), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (68362, 68377), True, 'import gradio as gr\n'), ((69917, 69947), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (69932, 69947), True, 'import gradio as gr\n'), ((69949, 69979), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (69964, 69979), True, 'import gradio as gr\n'), ((70735, 70762), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (70750, 70762), True, 'import gradio as gr\n'), ((70764, 70822), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (70779, 70822), True, 'import gradio as gr\n'), ((70830, 70860), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (70845, 70860), True, 'import gradio as gr\n'), ((73858, 73885), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (73873, 73885), True, 'import gradio as gr\n'), ((73887, 73945), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (73902, 73945), True, 'import gradio as gr\n'), ((73953, 73983), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (73968, 73983), True, 'import gradio as gr\n'), ((75732, 75752), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (75741, 75752), True, 'import gradio as gr\n'), ((75789, 75865), 'gradio.Radio', 'gr.Radio', ([], {'choices': "['text', 'speech']", 'value': '"""text"""', 'label': '"""Interaction Type"""'}), "(choices=['text', 'speech'], value='text', label='Interaction Type')\n", (75797, 75865), True, 'import gradio as gr\n'), ((75883, 75916), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.3)', 'min_width': '(0)'}), '(scale=0.3, min_width=0)\n', (75892, 75916), True, 'import gradio as gr\n'), ((75943, 75962), 'gradio.Button', 'gr.Button', (['"""Select"""'], {}), "('Select')\n", (75952, 75962), True, 'import gradio as gr\n'), ((76044, 76064), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (76053, 76064), True, 'import gradio as gr\n'), ((76219, 76252), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.1)', 'min_width': '(0)'}), '(scale=0.1, min_width=0)\n', (76228, 76252), True, 'import gradio as gr\n'), ((76276, 76301), 'gradio.Button', 'gr.Button', (['"""🏃\u200d♂️Run"""'], {}), "('🏃\\u200d♂️Run')\n", (76285, 76301), True, 'import gradio as gr\n'), ((76314, 76347), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.1)', 'min_width': '(0)'}), '(scale=0.1, min_width=0)\n', (76323, 76347), True, 'import gradio as gr\n'), ((76377, 76397), 'gradio.Button', 'gr.Button', (['"""🔄Clear️"""'], {}), "('🔄Clear️')\n", (76386, 76397), True, 'import gradio as gr\n'), ((76415, 76448), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.1)', 'min_width': '(0)'}), '(scale=0.1, min_width=0)\n', (76424, 76448), True, 'import gradio as gr\n'), ((76472, 76530), 'gradio.UploadButton', 'gr.UploadButton', (['"""🖼️Upload"""'], {'file_types': "['image', 'audio']"}), "('🖼️Upload', file_types=['image', 'audio'])\n", (76487, 76530), True, 'import gradio as gr\n'), ((76641, 76674), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.3)', 'min_width': '(0)'}), '(scale=0.3, min_width=0)\n', (76650, 76674), True, 'import gradio as gr\n'), ((76703, 76726), 'gradio.Video', 'gr.Video', ([], {'visible': '(False)'}), '(visible=False)\n', (76711, 76726), True, 'import gradio as gr\n'), ((77007, 77027), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (77016, 77027), True, 'import gradio as gr\n'), ((77060, 77121), 'gradio.Audio', 'gr.Audio', ([], {'source': '"""microphone"""', 'type': '"""filepath"""', 'label': '"""Input"""'}), "(source='microphone', type='filepath', label='Input')\n", (77068, 77121), True, 'import gradio as gr\n'), ((77139, 77173), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (77148, 77173), True, 'import gradio as gr\n'), ((77204, 77232), 'gradio.Button', 'gr.Button', (['"""🏃\u200d♂️Submit"""'], {}), "('🏃\\u200d♂️Submit')\n", (77213, 77232), True, 'import gradio as gr\n'), ((77245, 77279), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.15)', 'min_width': '(0)'}), '(scale=0.15, min_width=0)\n', (77254, 77279), True, 'import gradio as gr\n'), ((77312, 77332), 'gradio.Button', 'gr.Button', (['"""🔄Clear️"""'], {}), "('🔄Clear️')\n", (77321, 77332), True, 'import gradio as gr\n'), ((77350, 77358), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (77356, 77358), True, 'import gradio as gr\n'), ((77392, 77431), 'gradio.Audio', 'gr.Audio', ([], {'label': '"""Output"""', 'visible': '(False)'}), "(label='Output', visible=False)\n", (77400, 77431), True, 'import gradio as gr\n'), ((4265, 4277), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4275, 4277), False, 'import uuid\n'), ((6139, 6161), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (6149, 6161), False, 'from PIL import Image\n'), ((20850, 20871), 'torch.from_numpy', 'torch.from_numpy', (['mel'], {}), '(mel)\n', (20866, 20871), False, 'import torch\n'), ((20926, 20948), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (20942, 20948), False, 'import torch\n'), ((31997, 32012), 'sound_extraction.model.LASSNet.LASSNet', 'LASSNet', (['device'], {}), '(device)\n', (32004, 32012), False, 'from sound_extraction.model.LASSNet import LASSNet\n'), ((38499, 38549), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['y'], {}), '(y, **self.MEL_ARGS)\n', (38529, 38549), False, 'import librosa\n'), ((64244, 64274), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64259, 64274), True, 'import gradio as gr\n'), ((64276, 64306), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64291, 64306), True, 'import gradio as gr\n'), ((64308, 64338), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64323, 64338), True, 'import gradio as gr\n'), ((64340, 64371), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64356, 64371), True, 'import gradio as gr\n'), ((71452, 71479), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (71467, 71479), True, 'import gradio as gr\n'), ((71481, 71539), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (71496, 71539), True, 'import gradio as gr\n'), ((71547, 71577), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (71562, 71577), True, 'import gradio as gr\n'), ((5271, 5283), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5281, 5283), False, 'import uuid\n'), ((9377, 9399), 'torch.FloatTensor', 'torch.FloatTensor', (['wav'], {}), '(wav)\n', (9394, 9399), False, 'import torch\n'), ((10120, 10132), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10130, 10132), False, 'import uuid\n'), ((13340, 13352), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (13350, 13352), False, 'import uuid\n'), ((14308, 14320), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (14318, 14320), False, 'import uuid\n'), ((16178, 16190), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (16188, 16190), False, 'import uuid\n'), ((18075, 18087), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (18085, 18087), False, 'import uuid\n'), ((19487, 19499), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (19497, 19499), False, 'import uuid\n'), ((23290, 23302), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (23300, 23302), False, 'import uuid\n'), ((26117, 26129), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (26127, 26129), False, 'import uuid\n'), ((26232, 26244), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (26242, 26244), False, 'import uuid\n'), ((31541, 31553), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (31551, 31553), False, 'import uuid\n'), ((33070, 33082), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (33080, 33082), False, 'import uuid\n'), ((34346, 34386), 'numpy.loadtxt', 'np.loadtxt', (['self.position_file[rand_int]'], {}), '(self.position_file[rand_int])\n', (34356, 34386), True, 'import numpy as np\n'), ((36088, 36100), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (36098, 36100), False, 'import uuid\n'), ((45879, 45891), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (45889, 45891), False, 'import uuid\n'), ((47744, 47756), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (47754, 47756), False, 'import uuid\n'), ((64588, 64618), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64603, 64618), True, 'import gradio as gr\n'), ((64620, 64650), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64635, 64650), True, 'import gradio as gr\n'), ((64652, 64682), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64667, 64682), True, 'import gradio as gr\n'), ((64684, 64715), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (64700, 64715), True, 'import gradio as gr\n'), ((70081, 70093), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (70091, 70093), False, 'import uuid\n'), ((71927, 71954), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (71942, 71954), True, 'import gradio as gr\n'), ((71956, 72014), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (71971, 72014), True, 'import gradio as gr\n'), ((72022, 72052), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (72037, 72052), True, 'import gradio as gr\n'), ((76088, 76183), 'gradio.Textbox', 'gr.Textbox', ([], {'show_label': '(False)', 'placeholder': '"""Enter text and press enter, or upload an image"""'}), "(show_label=False, placeholder=\n 'Enter text and press enter, or upload an image')\n", (76098, 76183), True, 'import gradio as gr\n'), ((47987, 47999), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (47997, 47999), False, 'import uuid\n'), ((48159, 48171), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (48169, 48171), False, 'import uuid\n'), ((65068, 65098), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65083, 65098), True, 'import gradio as gr\n'), ((65100, 65130), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65115, 65130), True, 'import gradio as gr\n'), ((65132, 65162), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65147, 65162), True, 'import gradio as gr\n'), ((65164, 65195), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65180, 65195), True, 'import gradio as gr\n'), ((67208, 67220), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (67218, 67220), False, 'import uuid\n'), ((68627, 68639), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (68637, 68639), False, 'import uuid\n'), ((72538, 72565), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (72553, 72565), True, 'import gradio as gr\n'), ((72567, 72625), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (72582, 72625), True, 'import gradio as gr\n'), ((72633, 72663), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (72648, 72663), True, 'import gradio as gr\n'), ((65632, 65683), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'audio_filename', 'visible': '(True)'}), '(value=audio_filename, visible=True)\n', (65647, 65683), True, 'import gradio as gr\n'), ((65684, 65714), 'gradio.Video.update', 'gr.Video.update', ([], {'visible': '(False)'}), '(visible=False)\n', (65699, 65714), True, 'import gradio as gr\n'), ((65716, 65767), 'gradio.Image.update', 'gr.Image.update', ([], {'value': 'image_filename', 'visible': '(True)'}), '(value=image_filename, visible=True)\n', (65731, 65767), True, 'import gradio as gr\n'), ((65768, 65798), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(True)'}), '(visible=True)\n', (65784, 65798), True, 'import gradio as gr\n'), ((73124, 73151), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'None'}), '(value=None)\n', (73139, 73151), True, 'import gradio as gr\n'), ((73153, 73211), 'gradio.Audio.update', 'gr.Audio.update', ([], {'value': 'output_audio_filename', 'visible': '(True)'}), '(value=output_audio_filename, visible=True)\n', (73168, 73211), True, 'import gradio as gr\n'), ((73219, 73270), 'gradio.Video.update', 'gr.Video.update', ([], {'value': 'video_filename', 'visible': '(True)'}), '(value=video_filename, visible=True)\n', (73234, 73270), True, 'import gradio as gr\n'), ((66196, 66226), 'gradio.Audio.update', 'gr.Audio.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66211, 66226), True, 'import gradio as gr\n'), ((66228, 66279), 'gradio.Video.update', 'gr.Video.update', ([], {'value': 'video_filename', 'visible': '(True)'}), '(value=video_filename, visible=True)\n', (66243, 66279), True, 'import gradio as gr\n'), ((66280, 66310), 'gradio.Image.update', 'gr.Image.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66295, 66310), True, 'import gradio as gr\n'), ((66312, 66343), 'gradio.Button.update', 'gr.Button.update', ([], {'visible': '(False)'}), '(visible=False)\n', (66328, 66343), True, 'import gradio as gr\n')]
from langchain.indexes import VectorstoreIndexCreator from langchain_community.document_loaders import CSVLoader from langchain_community.vectorstores import FAISS loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv") docs = loader.load() index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS) index = index_creator.from_documents(docs) index.vectorstore.save_local("titanic_data")
[ "langchain.indexes.VectorstoreIndexCreator", "langchain_community.document_loaders.CSVLoader" ]
[((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreIndexCreator', 'VectorstoreIndexCreator', ([], {'vectorstore_cls': 'FAISS'}), '(vectorstore_cls=FAISS)\n', (291, 314), False, 'from langchain.indexes import VectorstoreIndexCreator\n')]
from langchain.indexes import VectorstoreIndexCreator from langchain_community.document_loaders import CSVLoader from langchain_community.vectorstores import FAISS loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv") docs = loader.load() index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS) index = index_creator.from_documents(docs) index.vectorstore.save_local("titanic_data")
[ "langchain.indexes.VectorstoreIndexCreator", "langchain_community.document_loaders.CSVLoader" ]
[((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreIndexCreator', 'VectorstoreIndexCreator', ([], {'vectorstore_cls': 'FAISS'}), '(vectorstore_cls=FAISS)\n', (291, 314), False, 'from langchain.indexes import VectorstoreIndexCreator\n')]
from langchain.indexes import VectorstoreIndexCreator from langchain_community.document_loaders import CSVLoader from langchain_community.vectorstores import FAISS loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv") docs = loader.load() index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS) index = index_creator.from_documents(docs) index.vectorstore.save_local("titanic_data")
[ "langchain.indexes.VectorstoreIndexCreator", "langchain_community.document_loaders.CSVLoader" ]
[((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreIndexCreator', 'VectorstoreIndexCreator', ([], {'vectorstore_cls': 'FAISS'}), '(vectorstore_cls=FAISS)\n', (291, 314), False, 'from langchain.indexes import VectorstoreIndexCreator\n')]
from langchain.indexes import VectorstoreIndexCreator from langchain_community.document_loaders import CSVLoader from langchain_community.vectorstores import FAISS loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv") docs = loader.load() index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS) index = index_creator.from_documents(docs) index.vectorstore.save_local("titanic_data")
[ "langchain.indexes.VectorstoreIndexCreator", "langchain_community.document_loaders.CSVLoader" ]
[((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreIndexCreator', 'VectorstoreIndexCreator', ([], {'vectorstore_cls': 'FAISS'}), '(vectorstore_cls=FAISS)\n', (291, 314), False, 'from langchain.indexes import VectorstoreIndexCreator\n')]
from typing import Any, Dict, List, Type, Union from langchain_community.graphs import NetworkxEntityGraph from langchain_community.graphs.networkx_graph import ( KnowledgeTriple, get_entities, parse_triples, ) from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Field from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key class ConversationKGMemory(BaseChatMemory): """Knowledge graph conversation memory. Integrates with external knowledge graph to store and retrieve information about knowledge triples in the conversation. """ k: int = 2 human_prefix: str = "Human" ai_prefix: str = "AI" kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph) knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT llm: BaseLanguageModel summary_message_cls: Type[BaseMessage] = SystemMessage """Number of previous utterances to include in the context.""" memory_key: str = "history" #: :meta private: def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" entities = self._get_current_entities(inputs) summary_strings = [] for entity in entities: knowledge = self.kg.get_entity_knowledge(entity) if knowledge: summary = f"On {entity}: {'. '.join(knowledge)}." summary_strings.append(summary) context: Union[str, List] if not summary_strings: context = [] if self.return_messages else "" elif self.return_messages: context = [ self.summary_message_cls(content=text) for text in summary_strings ] else: context = "\n".join(summary_strings) return {self.memory_key: context} @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str: """Get the output key for the prompt.""" if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") return list(outputs.keys())[0] return self.output_key def get_current_entities(self, input_string: str) -> List[str]: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, ) return get_entities(output) def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]: """Get the current entities in the conversation.""" prompt_input_key = self._get_prompt_input_key(inputs) return self.get_current_entities(inputs[prompt_input_key]) def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]: chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, verbose=True, ) knowledge = parse_triples(output) return knowledge def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None: """Get and update knowledge graph from the conversation history.""" prompt_input_key = self._get_prompt_input_key(inputs) knowledge = self.get_knowledge_triplets(inputs[prompt_input_key]) for triple in knowledge: self.kg.add_triple(triple) def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self._get_and_update_kg(inputs) def clear(self) -> None: """Clear memory contents.""" super().clear() self.kg.clear()
[ "langchain_core.messages.get_buffer_string", "langchain.chains.llm.LLMChain", "langchain_community.graphs.networkx_graph.parse_triples", "langchain.memory.utils.get_prompt_input_key", "langchain_community.graphs.networkx_graph.get_entities", "langchain_core.pydantic_v1.Field" ]
[((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (3171, 3223), False, 'from langchain.chains.llm import LLMChain\n'), ((3248, 3369), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (3265, 3369), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((3537, 3557), 'langchain_community.graphs.networkx_graph.get_entities', 'get_entities', (['output'], {}), '(output)\n', (3549, 3557), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((3921, 3984), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.knowledge_extraction_prompt'}), '(llm=self.llm, prompt=self.knowledge_extraction_prompt)\n', (3929, 3984), False, 'from langchain.chains.llm import LLMChain\n'), ((4009, 4130), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (4026, 4130), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((4329, 4350), 'langchain_community.graphs.networkx_graph.parse_triples', 'parse_triples', (['output'], {}), '(output)\n', (4342, 4350), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((2649, 2700), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (2669, 2700), False, 'from langchain.memory.utils import get_prompt_input_key\n')]
from typing import Any, Dict, List, Type, Union from langchain_community.graphs import NetworkxEntityGraph from langchain_community.graphs.networkx_graph import ( KnowledgeTriple, get_entities, parse_triples, ) from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Field from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key class ConversationKGMemory(BaseChatMemory): """Knowledge graph conversation memory. Integrates with external knowledge graph to store and retrieve information about knowledge triples in the conversation. """ k: int = 2 human_prefix: str = "Human" ai_prefix: str = "AI" kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph) knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT llm: BaseLanguageModel summary_message_cls: Type[BaseMessage] = SystemMessage """Number of previous utterances to include in the context.""" memory_key: str = "history" #: :meta private: def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" entities = self._get_current_entities(inputs) summary_strings = [] for entity in entities: knowledge = self.kg.get_entity_knowledge(entity) if knowledge: summary = f"On {entity}: {'. '.join(knowledge)}." summary_strings.append(summary) context: Union[str, List] if not summary_strings: context = [] if self.return_messages else "" elif self.return_messages: context = [ self.summary_message_cls(content=text) for text in summary_strings ] else: context = "\n".join(summary_strings) return {self.memory_key: context} @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str: """Get the output key for the prompt.""" if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") return list(outputs.keys())[0] return self.output_key def get_current_entities(self, input_string: str) -> List[str]: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, ) return get_entities(output) def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]: """Get the current entities in the conversation.""" prompt_input_key = self._get_prompt_input_key(inputs) return self.get_current_entities(inputs[prompt_input_key]) def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]: chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, verbose=True, ) knowledge = parse_triples(output) return knowledge def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None: """Get and update knowledge graph from the conversation history.""" prompt_input_key = self._get_prompt_input_key(inputs) knowledge = self.get_knowledge_triplets(inputs[prompt_input_key]) for triple in knowledge: self.kg.add_triple(triple) def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self._get_and_update_kg(inputs) def clear(self) -> None: """Clear memory contents.""" super().clear() self.kg.clear()
[ "langchain_core.messages.get_buffer_string", "langchain.chains.llm.LLMChain", "langchain_community.graphs.networkx_graph.parse_triples", "langchain.memory.utils.get_prompt_input_key", "langchain_community.graphs.networkx_graph.get_entities", "langchain_core.pydantic_v1.Field" ]
[((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (3171, 3223), False, 'from langchain.chains.llm import LLMChain\n'), ((3248, 3369), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (3265, 3369), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((3537, 3557), 'langchain_community.graphs.networkx_graph.get_entities', 'get_entities', (['output'], {}), '(output)\n', (3549, 3557), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((3921, 3984), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.knowledge_extraction_prompt'}), '(llm=self.llm, prompt=self.knowledge_extraction_prompt)\n', (3929, 3984), False, 'from langchain.chains.llm import LLMChain\n'), ((4009, 4130), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (4026, 4130), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((4329, 4350), 'langchain_community.graphs.networkx_graph.parse_triples', 'parse_triples', (['output'], {}), '(output)\n', (4342, 4350), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((2649, 2700), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (2669, 2700), False, 'from langchain.memory.utils import get_prompt_input_key\n')]
from typing import Any, Dict, List, Type, Union from langchain_community.graphs import NetworkxEntityGraph from langchain_community.graphs.networkx_graph import ( KnowledgeTriple, get_entities, parse_triples, ) from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Field from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key class ConversationKGMemory(BaseChatMemory): """Knowledge graph conversation memory. Integrates with external knowledge graph to store and retrieve information about knowledge triples in the conversation. """ k: int = 2 human_prefix: str = "Human" ai_prefix: str = "AI" kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph) knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT llm: BaseLanguageModel summary_message_cls: Type[BaseMessage] = SystemMessage """Number of previous utterances to include in the context.""" memory_key: str = "history" #: :meta private: def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" entities = self._get_current_entities(inputs) summary_strings = [] for entity in entities: knowledge = self.kg.get_entity_knowledge(entity) if knowledge: summary = f"On {entity}: {'. '.join(knowledge)}." summary_strings.append(summary) context: Union[str, List] if not summary_strings: context = [] if self.return_messages else "" elif self.return_messages: context = [ self.summary_message_cls(content=text) for text in summary_strings ] else: context = "\n".join(summary_strings) return {self.memory_key: context} @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str: """Get the output key for the prompt.""" if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") return list(outputs.keys())[0] return self.output_key def get_current_entities(self, input_string: str) -> List[str]: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, ) return get_entities(output) def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]: """Get the current entities in the conversation.""" prompt_input_key = self._get_prompt_input_key(inputs) return self.get_current_entities(inputs[prompt_input_key]) def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]: chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, verbose=True, ) knowledge = parse_triples(output) return knowledge def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None: """Get and update knowledge graph from the conversation history.""" prompt_input_key = self._get_prompt_input_key(inputs) knowledge = self.get_knowledge_triplets(inputs[prompt_input_key]) for triple in knowledge: self.kg.add_triple(triple) def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self._get_and_update_kg(inputs) def clear(self) -> None: """Clear memory contents.""" super().clear() self.kg.clear()
[ "langchain_core.messages.get_buffer_string", "langchain.chains.llm.LLMChain", "langchain_community.graphs.networkx_graph.parse_triples", "langchain.memory.utils.get_prompt_input_key", "langchain_community.graphs.networkx_graph.get_entities", "langchain_core.pydantic_v1.Field" ]
[((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (3171, 3223), False, 'from langchain.chains.llm import LLMChain\n'), ((3248, 3369), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (3265, 3369), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((3537, 3557), 'langchain_community.graphs.networkx_graph.get_entities', 'get_entities', (['output'], {}), '(output)\n', (3549, 3557), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((3921, 3984), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.knowledge_extraction_prompt'}), '(llm=self.llm, prompt=self.knowledge_extraction_prompt)\n', (3929, 3984), False, 'from langchain.chains.llm import LLMChain\n'), ((4009, 4130), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (4026, 4130), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((4329, 4350), 'langchain_community.graphs.networkx_graph.parse_triples', 'parse_triples', (['output'], {}), '(output)\n', (4342, 4350), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((2649, 2700), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (2669, 2700), False, 'from langchain.memory.utils import get_prompt_input_key\n')]
from typing import Any, Dict, List, Type, Union from langchain_community.graphs import NetworkxEntityGraph from langchain_community.graphs.networkx_graph import ( KnowledgeTriple, get_entities, parse_triples, ) from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import Field from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key class ConversationKGMemory(BaseChatMemory): """Knowledge graph conversation memory. Integrates with external knowledge graph to store and retrieve information about knowledge triples in the conversation. """ k: int = 2 human_prefix: str = "Human" ai_prefix: str = "AI" kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph) knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT llm: BaseLanguageModel summary_message_cls: Type[BaseMessage] = SystemMessage """Number of previous utterances to include in the context.""" memory_key: str = "history" #: :meta private: def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" entities = self._get_current_entities(inputs) summary_strings = [] for entity in entities: knowledge = self.kg.get_entity_knowledge(entity) if knowledge: summary = f"On {entity}: {'. '.join(knowledge)}." summary_strings.append(summary) context: Union[str, List] if not summary_strings: context = [] if self.return_messages else "" elif self.return_messages: context = [ self.summary_message_cls(content=text) for text in summary_strings ] else: context = "\n".join(summary_strings) return {self.memory_key: context} @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: """Get the input key for the prompt.""" if self.input_key is None: return get_prompt_input_key(inputs, self.memory_variables) return self.input_key def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str: """Get the output key for the prompt.""" if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") return list(outputs.keys())[0] return self.output_key def get_current_entities(self, input_string: str) -> List[str]: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, ) return get_entities(output) def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]: """Get the current entities in the conversation.""" prompt_input_key = self._get_prompt_input_key(inputs) return self.get_current_entities(inputs[prompt_input_key]) def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]: chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt) buffer_string = get_buffer_string( self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) output = chain.predict( history=buffer_string, input=input_string, verbose=True, ) knowledge = parse_triples(output) return knowledge def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None: """Get and update knowledge graph from the conversation history.""" prompt_input_key = self._get_prompt_input_key(inputs) knowledge = self.get_knowledge_triplets(inputs[prompt_input_key]) for triple in knowledge: self.kg.add_triple(triple) def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" super().save_context(inputs, outputs) self._get_and_update_kg(inputs) def clear(self) -> None: """Clear memory contents.""" super().clear() self.kg.clear()
[ "langchain_core.messages.get_buffer_string", "langchain.chains.llm.LLMChain", "langchain_community.graphs.networkx_graph.parse_triples", "langchain.memory.utils.get_prompt_input_key", "langchain_community.graphs.networkx_graph.get_entities", "langchain_core.pydantic_v1.Field" ]
[((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (3171, 3223), False, 'from langchain.chains.llm import LLMChain\n'), ((3248, 3369), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (3265, 3369), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((3537, 3557), 'langchain_community.graphs.networkx_graph.get_entities', 'get_entities', (['output'], {}), '(output)\n', (3549, 3557), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((3921, 3984), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.knowledge_extraction_prompt'}), '(llm=self.llm, prompt=self.knowledge_extraction_prompt)\n', (3929, 3984), False, 'from langchain.chains.llm import LLMChain\n'), ((4009, 4130), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.chat_memory.messages[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.chat_memory.messages[-self.k * 2:], human_prefix=\n self.human_prefix, ai_prefix=self.ai_prefix)\n', (4026, 4130), False, 'from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string\n'), ((4329, 4350), 'langchain_community.graphs.networkx_graph.parse_triples', 'parse_triples', (['output'], {}), '(output)\n', (4342, 4350), False, 'from langchain_community.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples\n'), ((2649, 2700), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (2669, 2700), False, 'from langchain.memory.utils import get_prompt_input_key\n')]
""" **LLM** classes provide access to the large language model (**LLM**) APIs and services. **Class hierarchy:** .. code-block:: BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI **Main helpers:** .. code-block:: LLMResult, PromptValue, CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun, CallbackManager, AsyncCallbackManager, AIMessage, BaseMessage """ # noqa: E501 import warnings from typing import Any, Callable, Dict, Type from langchain_core._api import LangChainDeprecationWarning from langchain_core.language_models.llms import BaseLLM from langchain.utils.interactive_env import is_interactive_env def _import_ai21() -> Any: from langchain_community.llms.ai21 import AI21 return AI21 def _import_aleph_alpha() -> Any: from langchain_community.llms.aleph_alpha import AlephAlpha return AlephAlpha def _import_amazon_api_gateway() -> Any: from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway return AmazonAPIGateway def _import_anthropic() -> Any: from langchain_community.llms.anthropic import Anthropic return Anthropic def _import_anyscale() -> Any: from langchain_community.llms.anyscale import Anyscale return Anyscale def _import_arcee() -> Any: from langchain_community.llms.arcee import Arcee return Arcee def _import_aviary() -> Any: from langchain_community.llms.aviary import Aviary return Aviary def _import_azureml_endpoint() -> Any: from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint return AzureMLOnlineEndpoint def _import_baidu_qianfan_endpoint() -> Any: from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint return QianfanLLMEndpoint def _import_bananadev() -> Any: from langchain_community.llms.bananadev import Banana return Banana def _import_baseten() -> Any: from langchain_community.llms.baseten import Baseten return Baseten def _import_beam() -> Any: from langchain_community.llms.beam import Beam return Beam def _import_bedrock() -> Any: from langchain_community.llms.bedrock import Bedrock return Bedrock def _import_bittensor() -> Any: from langchain_community.llms.bittensor import NIBittensorLLM return NIBittensorLLM def _import_cerebriumai() -> Any: from langchain_community.llms.cerebriumai import CerebriumAI return CerebriumAI def _import_chatglm() -> Any: from langchain_community.llms.chatglm import ChatGLM return ChatGLM def _import_clarifai() -> Any: from langchain_community.llms.clarifai import Clarifai return Clarifai def _import_cohere() -> Any: from langchain_community.llms.cohere import Cohere return Cohere def _import_ctransformers() -> Any: from langchain_community.llms.ctransformers import CTransformers return CTransformers def _import_ctranslate2() -> Any: from langchain_community.llms.ctranslate2 import CTranslate2 return CTranslate2 def _import_databricks() -> Any: from langchain_community.llms.databricks import Databricks return Databricks def _import_databricks_chat() -> Any: from langchain_community.chat_models.databricks import ChatDatabricks return ChatDatabricks def _import_deepinfra() -> Any: from langchain_community.llms.deepinfra import DeepInfra return DeepInfra def _import_deepsparse() -> Any: from langchain_community.llms.deepsparse import DeepSparse return DeepSparse def _import_edenai() -> Any: from langchain_community.llms.edenai import EdenAI return EdenAI def _import_fake() -> Any: from langchain_community.llms.fake import FakeListLLM return FakeListLLM def _import_fireworks() -> Any: from langchain_community.llms.fireworks import Fireworks return Fireworks def _import_forefrontai() -> Any: from langchain_community.llms.forefrontai import ForefrontAI return ForefrontAI def _import_gigachat() -> Any: from langchain_community.llms.gigachat import GigaChat return GigaChat def _import_google_palm() -> Any: from langchain_community.llms.google_palm import GooglePalm return GooglePalm def _import_gooseai() -> Any: from langchain_community.llms.gooseai import GooseAI return GooseAI def _import_gpt4all() -> Any: from langchain_community.llms.gpt4all import GPT4All return GPT4All def _import_gradient_ai() -> Any: from langchain_community.llms.gradient_ai import GradientLLM return GradientLLM def _import_huggingface_endpoint() -> Any: from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint return HuggingFaceEndpoint def _import_huggingface_hub() -> Any: from langchain_community.llms.huggingface_hub import HuggingFaceHub return HuggingFaceHub def _import_huggingface_pipeline() -> Any: from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline return HuggingFacePipeline def _import_huggingface_text_gen_inference() -> Any: from langchain_community.llms.huggingface_text_gen_inference import ( HuggingFaceTextGenInference, ) return HuggingFaceTextGenInference def _import_human() -> Any: from langchain_community.llms.human import HumanInputLLM return HumanInputLLM def _import_javelin_ai_gateway() -> Any: from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway return JavelinAIGateway def _import_koboldai() -> Any: from langchain_community.llms.koboldai import KoboldApiLLM return KoboldApiLLM def _import_llamacpp() -> Any: from langchain_community.llms.llamacpp import LlamaCpp return LlamaCpp def _import_manifest() -> Any: from langchain_community.llms.manifest import ManifestWrapper return ManifestWrapper def _import_minimax() -> Any: from langchain_community.llms.minimax import Minimax return Minimax def _import_mlflow() -> Any: from langchain_community.llms.mlflow import Mlflow return Mlflow def _import_mlflow_chat() -> Any: from langchain_community.chat_models.mlflow import ChatMlflow return ChatMlflow def _import_mlflow_ai_gateway() -> Any: from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway return MlflowAIGateway def _import_modal() -> Any: from langchain_community.llms.modal import Modal return Modal def _import_mosaicml() -> Any: from langchain_community.llms.mosaicml import MosaicML return MosaicML def _import_nlpcloud() -> Any: from langchain_community.llms.nlpcloud import NLPCloud return NLPCloud def _import_octoai_endpoint() -> Any: from langchain_community.llms.octoai_endpoint import OctoAIEndpoint return OctoAIEndpoint def _import_ollama() -> Any: from langchain_community.llms.ollama import Ollama return Ollama def _import_opaqueprompts() -> Any: from langchain_community.llms.opaqueprompts import OpaquePrompts return OpaquePrompts def _import_azure_openai() -> Any: from langchain_community.llms.openai import AzureOpenAI return AzureOpenAI def _import_openai() -> Any: from langchain_community.llms.openai import OpenAI return OpenAI def _import_openai_chat() -> Any: from langchain_community.llms.openai import OpenAIChat return OpenAIChat def _import_openllm() -> Any: from langchain_community.llms.openllm import OpenLLM return OpenLLM def _import_openlm() -> Any: from langchain_community.llms.openlm import OpenLM return OpenLM def _import_pai_eas_endpoint() -> Any: from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint return PaiEasEndpoint def _import_petals() -> Any: from langchain_community.llms.petals import Petals return Petals def _import_pipelineai() -> Any: from langchain_community.llms.pipelineai import PipelineAI return PipelineAI def _import_predibase() -> Any: from langchain_community.llms.predibase import Predibase return Predibase def _import_predictionguard() -> Any: from langchain_community.llms.predictionguard import PredictionGuard return PredictionGuard def _import_promptlayer() -> Any: from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI return PromptLayerOpenAI def _import_promptlayer_chat() -> Any: from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat return PromptLayerOpenAIChat def _import_replicate() -> Any: from langchain_community.llms.replicate import Replicate return Replicate def _import_rwkv() -> Any: from langchain_community.llms.rwkv import RWKV return RWKV def _import_sagemaker_endpoint() -> Any: from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint return SagemakerEndpoint def _import_self_hosted() -> Any: from langchain_community.llms.self_hosted import SelfHostedPipeline return SelfHostedPipeline def _import_self_hosted_hugging_face() -> Any: from langchain_community.llms.self_hosted_hugging_face import ( SelfHostedHuggingFaceLLM, ) return SelfHostedHuggingFaceLLM def _import_stochasticai() -> Any: from langchain_community.llms.stochasticai import StochasticAI return StochasticAI def _import_symblai_nebula() -> Any: from langchain_community.llms.symblai_nebula import Nebula return Nebula def _import_textgen() -> Any: from langchain_community.llms.textgen import TextGen return TextGen def _import_titan_takeoff() -> Any: from langchain_community.llms.titan_takeoff import TitanTakeoff return TitanTakeoff def _import_titan_takeoff_pro() -> Any: from langchain_community.llms.titan_takeoff_pro import TitanTakeoffPro return TitanTakeoffPro def _import_together() -> Any: from langchain_community.llms.together import Together return Together def _import_tongyi() -> Any: from langchain_community.llms.tongyi import Tongyi return Tongyi def _import_vertex() -> Any: from langchain_community.llms.vertexai import VertexAI return VertexAI def _import_vertex_model_garden() -> Any: from langchain_community.llms.vertexai import VertexAIModelGarden return VertexAIModelGarden def _import_vllm() -> Any: from langchain_community.llms.vllm import VLLM return VLLM def _import_vllm_openai() -> Any: from langchain_community.llms.vllm import VLLMOpenAI return VLLMOpenAI def _import_watsonxllm() -> Any: from langchain_community.llms.watsonxllm import WatsonxLLM return WatsonxLLM def _import_writer() -> Any: from langchain_community.llms.writer import Writer return Writer def _import_xinference() -> Any: from langchain_community.llms.xinference import Xinference return Xinference def _import_yandex_gpt() -> Any: from langchain_community.llms.yandex import YandexGPT return YandexGPT def _import_volcengine_maas() -> Any: from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM return VolcEngineMaasLLM def __getattr__(name: str) -> Any: from langchain_community import llms # If not in interactive env, raise warning. if not is_interactive_env(): warnings.warn( "Importing LLMs from langchain is deprecated. Importing from " "langchain will no longer be supported as of langchain==0.2.0. " "Please import from langchain-community instead:\n\n" f"`from langchain_community.llms import {name}`.\n\n" "To install langchain-community run `pip install -U langchain-community`.", category=LangChainDeprecationWarning, ) if name == "type_to_cls_dict": # for backwards compatibility type_to_cls_dict: Dict[str, Type[BaseLLM]] = { k: v() for k, v in get_type_to_cls_dict().items() } return type_to_cls_dict else: return getattr(llms, name) __all__ = [ "AI21", "AlephAlpha", "AmazonAPIGateway", "Anthropic", "Anyscale", "Arcee", "Aviary", "AzureMLOnlineEndpoint", "AzureOpenAI", "Banana", "Baseten", "Beam", "Bedrock", "CTransformers", "CTranslate2", "CerebriumAI", "ChatGLM", "Clarifai", "Cohere", "Databricks", "DeepInfra", "DeepSparse", "EdenAI", "FakeListLLM", "Fireworks", "ForefrontAI", "GigaChat", "GPT4All", "GooglePalm", "GooseAI", "GradientLLM", "HuggingFaceEndpoint", "HuggingFaceHub", "HuggingFacePipeline", "HuggingFaceTextGenInference", "HumanInputLLM", "KoboldApiLLM", "LlamaCpp", "TextGen", "ManifestWrapper", "Minimax", "MlflowAIGateway", "Modal", "MosaicML", "Nebula", "NIBittensorLLM", "NLPCloud", "Ollama", "OpenAI", "OpenAIChat", "OpenLLM", "OpenLM", "PaiEasEndpoint", "Petals", "PipelineAI", "Predibase", "PredictionGuard", "PromptLayerOpenAI", "PromptLayerOpenAIChat", "OpaquePrompts", "RWKV", "Replicate", "SagemakerEndpoint", "SelfHostedHuggingFaceLLM", "SelfHostedPipeline", "StochasticAI", "TitanTakeoff", "TitanTakeoffPro", "Tongyi", "VertexAI", "VertexAIModelGarden", "VLLM", "VLLMOpenAI", "WatsonxLLM", "Writer", "OctoAIEndpoint", "Xinference", "JavelinAIGateway", "QianfanLLMEndpoint", "YandexGPT", "VolcEngineMaasLLM", ] def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]: return { "ai21": _import_ai21, "aleph_alpha": _import_aleph_alpha, "amazon_api_gateway": _import_amazon_api_gateway, "amazon_bedrock": _import_bedrock, "anthropic": _import_anthropic, "anyscale": _import_anyscale, "arcee": _import_arcee, "aviary": _import_aviary, "azure": _import_azure_openai, "azureml_endpoint": _import_azureml_endpoint, "bananadev": _import_bananadev, "baseten": _import_baseten, "beam": _import_beam, "cerebriumai": _import_cerebriumai, "chat_glm": _import_chatglm, "clarifai": _import_clarifai, "cohere": _import_cohere, "ctransformers": _import_ctransformers, "ctranslate2": _import_ctranslate2, "databricks": _import_databricks, "databricks-chat": _import_databricks_chat, "deepinfra": _import_deepinfra, "deepsparse": _import_deepsparse, "edenai": _import_edenai, "fake-list": _import_fake, "forefrontai": _import_forefrontai, "giga-chat-model": _import_gigachat, "google_palm": _import_google_palm, "gooseai": _import_gooseai, "gradient": _import_gradient_ai, "gpt4all": _import_gpt4all, "huggingface_endpoint": _import_huggingface_endpoint, "huggingface_hub": _import_huggingface_hub, "huggingface_pipeline": _import_huggingface_pipeline, "huggingface_textgen_inference": _import_huggingface_text_gen_inference, "human-input": _import_human, "koboldai": _import_koboldai, "llamacpp": _import_llamacpp, "textgen": _import_textgen, "minimax": _import_minimax, "mlflow": _import_mlflow, "mlflow-chat": _import_mlflow_chat, "mlflow-ai-gateway": _import_mlflow_ai_gateway, "modal": _import_modal, "mosaic": _import_mosaicml, "nebula": _import_symblai_nebula, "nibittensor": _import_bittensor, "nlpcloud": _import_nlpcloud, "ollama": _import_ollama, "openai": _import_openai, "openlm": _import_openlm, "pai_eas_endpoint": _import_pai_eas_endpoint, "petals": _import_petals, "pipelineai": _import_pipelineai, "predibase": _import_predibase, "opaqueprompts": _import_opaqueprompts, "replicate": _import_replicate, "rwkv": _import_rwkv, "sagemaker_endpoint": _import_sagemaker_endpoint, "self_hosted": _import_self_hosted, "self_hosted_hugging_face": _import_self_hosted_hugging_face, "stochasticai": _import_stochasticai, "together": _import_together, "tongyi": _import_tongyi, "titan_takeoff": _import_titan_takeoff, "titan_takeoff_pro": _import_titan_takeoff_pro, "vertexai": _import_vertex, "vertexai_model_garden": _import_vertex_model_garden, "openllm": _import_openllm, "openllm_client": _import_openllm, "vllm": _import_vllm, "vllm_openai": _import_vllm_openai, "watsonxllm": _import_watsonxllm, "writer": _import_writer, "xinference": _import_xinference, "javelin-ai-gateway": _import_javelin_ai_gateway, "qianfan_endpoint": _import_baidu_qianfan_endpoint, "yandex_gpt": _import_yandex_gpt, "VolcEngineMaasLLM": _import_volcengine_maas, }
[ "langchain.utils.interactive_env.is_interactive_env" ]
[((11338, 11358), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (11356, 11358), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((11368, 11729), 'warnings.warn', 'warnings.warn', (['f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (11381, 11729), False, 'import warnings\n')]
""" **LLM** classes provide access to the large language model (**LLM**) APIs and services. **Class hierarchy:** .. code-block:: BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI **Main helpers:** .. code-block:: LLMResult, PromptValue, CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun, CallbackManager, AsyncCallbackManager, AIMessage, BaseMessage """ # noqa: E501 import warnings from typing import Any, Callable, Dict, Type from langchain_core._api import LangChainDeprecationWarning from langchain_core.language_models.llms import BaseLLM from langchain.utils.interactive_env import is_interactive_env def _import_ai21() -> Any: from langchain_community.llms.ai21 import AI21 return AI21 def _import_aleph_alpha() -> Any: from langchain_community.llms.aleph_alpha import AlephAlpha return AlephAlpha def _import_amazon_api_gateway() -> Any: from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway return AmazonAPIGateway def _import_anthropic() -> Any: from langchain_community.llms.anthropic import Anthropic return Anthropic def _import_anyscale() -> Any: from langchain_community.llms.anyscale import Anyscale return Anyscale def _import_arcee() -> Any: from langchain_community.llms.arcee import Arcee return Arcee def _import_aviary() -> Any: from langchain_community.llms.aviary import Aviary return Aviary def _import_azureml_endpoint() -> Any: from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint return AzureMLOnlineEndpoint def _import_baidu_qianfan_endpoint() -> Any: from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint return QianfanLLMEndpoint def _import_bananadev() -> Any: from langchain_community.llms.bananadev import Banana return Banana def _import_baseten() -> Any: from langchain_community.llms.baseten import Baseten return Baseten def _import_beam() -> Any: from langchain_community.llms.beam import Beam return Beam def _import_bedrock() -> Any: from langchain_community.llms.bedrock import Bedrock return Bedrock def _import_bittensor() -> Any: from langchain_community.llms.bittensor import NIBittensorLLM return NIBittensorLLM def _import_cerebriumai() -> Any: from langchain_community.llms.cerebriumai import CerebriumAI return CerebriumAI def _import_chatglm() -> Any: from langchain_community.llms.chatglm import ChatGLM return ChatGLM def _import_clarifai() -> Any: from langchain_community.llms.clarifai import Clarifai return Clarifai def _import_cohere() -> Any: from langchain_community.llms.cohere import Cohere return Cohere def _import_ctransformers() -> Any: from langchain_community.llms.ctransformers import CTransformers return CTransformers def _import_ctranslate2() -> Any: from langchain_community.llms.ctranslate2 import CTranslate2 return CTranslate2 def _import_databricks() -> Any: from langchain_community.llms.databricks import Databricks return Databricks def _import_databricks_chat() -> Any: from langchain_community.chat_models.databricks import ChatDatabricks return ChatDatabricks def _import_deepinfra() -> Any: from langchain_community.llms.deepinfra import DeepInfra return DeepInfra def _import_deepsparse() -> Any: from langchain_community.llms.deepsparse import DeepSparse return DeepSparse def _import_edenai() -> Any: from langchain_community.llms.edenai import EdenAI return EdenAI def _import_fake() -> Any: from langchain_community.llms.fake import FakeListLLM return FakeListLLM def _import_fireworks() -> Any: from langchain_community.llms.fireworks import Fireworks return Fireworks def _import_forefrontai() -> Any: from langchain_community.llms.forefrontai import ForefrontAI return ForefrontAI def _import_gigachat() -> Any: from langchain_community.llms.gigachat import GigaChat return GigaChat def _import_google_palm() -> Any: from langchain_community.llms.google_palm import GooglePalm return GooglePalm def _import_gooseai() -> Any: from langchain_community.llms.gooseai import GooseAI return GooseAI def _import_gpt4all() -> Any: from langchain_community.llms.gpt4all import GPT4All return GPT4All def _import_gradient_ai() -> Any: from langchain_community.llms.gradient_ai import GradientLLM return GradientLLM def _import_huggingface_endpoint() -> Any: from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint return HuggingFaceEndpoint def _import_huggingface_hub() -> Any: from langchain_community.llms.huggingface_hub import HuggingFaceHub return HuggingFaceHub def _import_huggingface_pipeline() -> Any: from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline return HuggingFacePipeline def _import_huggingface_text_gen_inference() -> Any: from langchain_community.llms.huggingface_text_gen_inference import ( HuggingFaceTextGenInference, ) return HuggingFaceTextGenInference def _import_human() -> Any: from langchain_community.llms.human import HumanInputLLM return HumanInputLLM def _import_javelin_ai_gateway() -> Any: from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway return JavelinAIGateway def _import_koboldai() -> Any: from langchain_community.llms.koboldai import KoboldApiLLM return KoboldApiLLM def _import_llamacpp() -> Any: from langchain_community.llms.llamacpp import LlamaCpp return LlamaCpp def _import_manifest() -> Any: from langchain_community.llms.manifest import ManifestWrapper return ManifestWrapper def _import_minimax() -> Any: from langchain_community.llms.minimax import Minimax return Minimax def _import_mlflow() -> Any: from langchain_community.llms.mlflow import Mlflow return Mlflow def _import_mlflow_chat() -> Any: from langchain_community.chat_models.mlflow import ChatMlflow return ChatMlflow def _import_mlflow_ai_gateway() -> Any: from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway return MlflowAIGateway def _import_modal() -> Any: from langchain_community.llms.modal import Modal return Modal def _import_mosaicml() -> Any: from langchain_community.llms.mosaicml import MosaicML return MosaicML def _import_nlpcloud() -> Any: from langchain_community.llms.nlpcloud import NLPCloud return NLPCloud def _import_octoai_endpoint() -> Any: from langchain_community.llms.octoai_endpoint import OctoAIEndpoint return OctoAIEndpoint def _import_ollama() -> Any: from langchain_community.llms.ollama import Ollama return Ollama def _import_opaqueprompts() -> Any: from langchain_community.llms.opaqueprompts import OpaquePrompts return OpaquePrompts def _import_azure_openai() -> Any: from langchain_community.llms.openai import AzureOpenAI return AzureOpenAI def _import_openai() -> Any: from langchain_community.llms.openai import OpenAI return OpenAI def _import_openai_chat() -> Any: from langchain_community.llms.openai import OpenAIChat return OpenAIChat def _import_openllm() -> Any: from langchain_community.llms.openllm import OpenLLM return OpenLLM def _import_openlm() -> Any: from langchain_community.llms.openlm import OpenLM return OpenLM def _import_pai_eas_endpoint() -> Any: from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint return PaiEasEndpoint def _import_petals() -> Any: from langchain_community.llms.petals import Petals return Petals def _import_pipelineai() -> Any: from langchain_community.llms.pipelineai import PipelineAI return PipelineAI def _import_predibase() -> Any: from langchain_community.llms.predibase import Predibase return Predibase def _import_predictionguard() -> Any: from langchain_community.llms.predictionguard import PredictionGuard return PredictionGuard def _import_promptlayer() -> Any: from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI return PromptLayerOpenAI def _import_promptlayer_chat() -> Any: from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat return PromptLayerOpenAIChat def _import_replicate() -> Any: from langchain_community.llms.replicate import Replicate return Replicate def _import_rwkv() -> Any: from langchain_community.llms.rwkv import RWKV return RWKV def _import_sagemaker_endpoint() -> Any: from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint return SagemakerEndpoint def _import_self_hosted() -> Any: from langchain_community.llms.self_hosted import SelfHostedPipeline return SelfHostedPipeline def _import_self_hosted_hugging_face() -> Any: from langchain_community.llms.self_hosted_hugging_face import ( SelfHostedHuggingFaceLLM, ) return SelfHostedHuggingFaceLLM def _import_stochasticai() -> Any: from langchain_community.llms.stochasticai import StochasticAI return StochasticAI def _import_symblai_nebula() -> Any: from langchain_community.llms.symblai_nebula import Nebula return Nebula def _import_textgen() -> Any: from langchain_community.llms.textgen import TextGen return TextGen def _import_titan_takeoff() -> Any: from langchain_community.llms.titan_takeoff import TitanTakeoff return TitanTakeoff def _import_titan_takeoff_pro() -> Any: from langchain_community.llms.titan_takeoff_pro import TitanTakeoffPro return TitanTakeoffPro def _import_together() -> Any: from langchain_community.llms.together import Together return Together def _import_tongyi() -> Any: from langchain_community.llms.tongyi import Tongyi return Tongyi def _import_vertex() -> Any: from langchain_community.llms.vertexai import VertexAI return VertexAI def _import_vertex_model_garden() -> Any: from langchain_community.llms.vertexai import VertexAIModelGarden return VertexAIModelGarden def _import_vllm() -> Any: from langchain_community.llms.vllm import VLLM return VLLM def _import_vllm_openai() -> Any: from langchain_community.llms.vllm import VLLMOpenAI return VLLMOpenAI def _import_watsonxllm() -> Any: from langchain_community.llms.watsonxllm import WatsonxLLM return WatsonxLLM def _import_writer() -> Any: from langchain_community.llms.writer import Writer return Writer def _import_xinference() -> Any: from langchain_community.llms.xinference import Xinference return Xinference def _import_yandex_gpt() -> Any: from langchain_community.llms.yandex import YandexGPT return YandexGPT def _import_volcengine_maas() -> Any: from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM return VolcEngineMaasLLM def __getattr__(name: str) -> Any: from langchain_community import llms # If not in interactive env, raise warning. if not is_interactive_env(): warnings.warn( "Importing LLMs from langchain is deprecated. Importing from " "langchain will no longer be supported as of langchain==0.2.0. " "Please import from langchain-community instead:\n\n" f"`from langchain_community.llms import {name}`.\n\n" "To install langchain-community run `pip install -U langchain-community`.", category=LangChainDeprecationWarning, ) if name == "type_to_cls_dict": # for backwards compatibility type_to_cls_dict: Dict[str, Type[BaseLLM]] = { k: v() for k, v in get_type_to_cls_dict().items() } return type_to_cls_dict else: return getattr(llms, name) __all__ = [ "AI21", "AlephAlpha", "AmazonAPIGateway", "Anthropic", "Anyscale", "Arcee", "Aviary", "AzureMLOnlineEndpoint", "AzureOpenAI", "Banana", "Baseten", "Beam", "Bedrock", "CTransformers", "CTranslate2", "CerebriumAI", "ChatGLM", "Clarifai", "Cohere", "Databricks", "DeepInfra", "DeepSparse", "EdenAI", "FakeListLLM", "Fireworks", "ForefrontAI", "GigaChat", "GPT4All", "GooglePalm", "GooseAI", "GradientLLM", "HuggingFaceEndpoint", "HuggingFaceHub", "HuggingFacePipeline", "HuggingFaceTextGenInference", "HumanInputLLM", "KoboldApiLLM", "LlamaCpp", "TextGen", "ManifestWrapper", "Minimax", "MlflowAIGateway", "Modal", "MosaicML", "Nebula", "NIBittensorLLM", "NLPCloud", "Ollama", "OpenAI", "OpenAIChat", "OpenLLM", "OpenLM", "PaiEasEndpoint", "Petals", "PipelineAI", "Predibase", "PredictionGuard", "PromptLayerOpenAI", "PromptLayerOpenAIChat", "OpaquePrompts", "RWKV", "Replicate", "SagemakerEndpoint", "SelfHostedHuggingFaceLLM", "SelfHostedPipeline", "StochasticAI", "TitanTakeoff", "TitanTakeoffPro", "Tongyi", "VertexAI", "VertexAIModelGarden", "VLLM", "VLLMOpenAI", "WatsonxLLM", "Writer", "OctoAIEndpoint", "Xinference", "JavelinAIGateway", "QianfanLLMEndpoint", "YandexGPT", "VolcEngineMaasLLM", ] def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]: return { "ai21": _import_ai21, "aleph_alpha": _import_aleph_alpha, "amazon_api_gateway": _import_amazon_api_gateway, "amazon_bedrock": _import_bedrock, "anthropic": _import_anthropic, "anyscale": _import_anyscale, "arcee": _import_arcee, "aviary": _import_aviary, "azure": _import_azure_openai, "azureml_endpoint": _import_azureml_endpoint, "bananadev": _import_bananadev, "baseten": _import_baseten, "beam": _import_beam, "cerebriumai": _import_cerebriumai, "chat_glm": _import_chatglm, "clarifai": _import_clarifai, "cohere": _import_cohere, "ctransformers": _import_ctransformers, "ctranslate2": _import_ctranslate2, "databricks": _import_databricks, "databricks-chat": _import_databricks_chat, "deepinfra": _import_deepinfra, "deepsparse": _import_deepsparse, "edenai": _import_edenai, "fake-list": _import_fake, "forefrontai": _import_forefrontai, "giga-chat-model": _import_gigachat, "google_palm": _import_google_palm, "gooseai": _import_gooseai, "gradient": _import_gradient_ai, "gpt4all": _import_gpt4all, "huggingface_endpoint": _import_huggingface_endpoint, "huggingface_hub": _import_huggingface_hub, "huggingface_pipeline": _import_huggingface_pipeline, "huggingface_textgen_inference": _import_huggingface_text_gen_inference, "human-input": _import_human, "koboldai": _import_koboldai, "llamacpp": _import_llamacpp, "textgen": _import_textgen, "minimax": _import_minimax, "mlflow": _import_mlflow, "mlflow-chat": _import_mlflow_chat, "mlflow-ai-gateway": _import_mlflow_ai_gateway, "modal": _import_modal, "mosaic": _import_mosaicml, "nebula": _import_symblai_nebula, "nibittensor": _import_bittensor, "nlpcloud": _import_nlpcloud, "ollama": _import_ollama, "openai": _import_openai, "openlm": _import_openlm, "pai_eas_endpoint": _import_pai_eas_endpoint, "petals": _import_petals, "pipelineai": _import_pipelineai, "predibase": _import_predibase, "opaqueprompts": _import_opaqueprompts, "replicate": _import_replicate, "rwkv": _import_rwkv, "sagemaker_endpoint": _import_sagemaker_endpoint, "self_hosted": _import_self_hosted, "self_hosted_hugging_face": _import_self_hosted_hugging_face, "stochasticai": _import_stochasticai, "together": _import_together, "tongyi": _import_tongyi, "titan_takeoff": _import_titan_takeoff, "titan_takeoff_pro": _import_titan_takeoff_pro, "vertexai": _import_vertex, "vertexai_model_garden": _import_vertex_model_garden, "openllm": _import_openllm, "openllm_client": _import_openllm, "vllm": _import_vllm, "vllm_openai": _import_vllm_openai, "watsonxllm": _import_watsonxllm, "writer": _import_writer, "xinference": _import_xinference, "javelin-ai-gateway": _import_javelin_ai_gateway, "qianfan_endpoint": _import_baidu_qianfan_endpoint, "yandex_gpt": _import_yandex_gpt, "VolcEngineMaasLLM": _import_volcengine_maas, }
[ "langchain.utils.interactive_env.is_interactive_env" ]
[((11338, 11358), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (11356, 11358), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((11368, 11729), 'warnings.warn', 'warnings.warn', (['f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (11381, 11729), False, 'import warnings\n')]
""" **LLM** classes provide access to the large language model (**LLM**) APIs and services. **Class hierarchy:** .. code-block:: BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI **Main helpers:** .. code-block:: LLMResult, PromptValue, CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun, CallbackManager, AsyncCallbackManager, AIMessage, BaseMessage """ # noqa: E501 import warnings from typing import Any, Callable, Dict, Type from langchain_core._api import LangChainDeprecationWarning from langchain_core.language_models.llms import BaseLLM from langchain.utils.interactive_env import is_interactive_env def _import_ai21() -> Any: from langchain_community.llms.ai21 import AI21 return AI21 def _import_aleph_alpha() -> Any: from langchain_community.llms.aleph_alpha import AlephAlpha return AlephAlpha def _import_amazon_api_gateway() -> Any: from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway return AmazonAPIGateway def _import_anthropic() -> Any: from langchain_community.llms.anthropic import Anthropic return Anthropic def _import_anyscale() -> Any: from langchain_community.llms.anyscale import Anyscale return Anyscale def _import_arcee() -> Any: from langchain_community.llms.arcee import Arcee return Arcee def _import_aviary() -> Any: from langchain_community.llms.aviary import Aviary return Aviary def _import_azureml_endpoint() -> Any: from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint return AzureMLOnlineEndpoint def _import_baidu_qianfan_endpoint() -> Any: from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint return QianfanLLMEndpoint def _import_bananadev() -> Any: from langchain_community.llms.bananadev import Banana return Banana def _import_baseten() -> Any: from langchain_community.llms.baseten import Baseten return Baseten def _import_beam() -> Any: from langchain_community.llms.beam import Beam return Beam def _import_bedrock() -> Any: from langchain_community.llms.bedrock import Bedrock return Bedrock def _import_bittensor() -> Any: from langchain_community.llms.bittensor import NIBittensorLLM return NIBittensorLLM def _import_cerebriumai() -> Any: from langchain_community.llms.cerebriumai import CerebriumAI return CerebriumAI def _import_chatglm() -> Any: from langchain_community.llms.chatglm import ChatGLM return ChatGLM def _import_clarifai() -> Any: from langchain_community.llms.clarifai import Clarifai return Clarifai def _import_cohere() -> Any: from langchain_community.llms.cohere import Cohere return Cohere def _import_ctransformers() -> Any: from langchain_community.llms.ctransformers import CTransformers return CTransformers def _import_ctranslate2() -> Any: from langchain_community.llms.ctranslate2 import CTranslate2 return CTranslate2 def _import_databricks() -> Any: from langchain_community.llms.databricks import Databricks return Databricks def _import_databricks_chat() -> Any: from langchain_community.chat_models.databricks import ChatDatabricks return ChatDatabricks def _import_deepinfra() -> Any: from langchain_community.llms.deepinfra import DeepInfra return DeepInfra def _import_deepsparse() -> Any: from langchain_community.llms.deepsparse import DeepSparse return DeepSparse def _import_edenai() -> Any: from langchain_community.llms.edenai import EdenAI return EdenAI def _import_fake() -> Any: from langchain_community.llms.fake import FakeListLLM return FakeListLLM def _import_fireworks() -> Any: from langchain_community.llms.fireworks import Fireworks return Fireworks def _import_forefrontai() -> Any: from langchain_community.llms.forefrontai import ForefrontAI return ForefrontAI def _import_gigachat() -> Any: from langchain_community.llms.gigachat import GigaChat return GigaChat def _import_google_palm() -> Any: from langchain_community.llms.google_palm import GooglePalm return GooglePalm def _import_gooseai() -> Any: from langchain_community.llms.gooseai import GooseAI return GooseAI def _import_gpt4all() -> Any: from langchain_community.llms.gpt4all import GPT4All return GPT4All def _import_gradient_ai() -> Any: from langchain_community.llms.gradient_ai import GradientLLM return GradientLLM def _import_huggingface_endpoint() -> Any: from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint return HuggingFaceEndpoint def _import_huggingface_hub() -> Any: from langchain_community.llms.huggingface_hub import HuggingFaceHub return HuggingFaceHub def _import_huggingface_pipeline() -> Any: from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline return HuggingFacePipeline def _import_huggingface_text_gen_inference() -> Any: from langchain_community.llms.huggingface_text_gen_inference import ( HuggingFaceTextGenInference, ) return HuggingFaceTextGenInference def _import_human() -> Any: from langchain_community.llms.human import HumanInputLLM return HumanInputLLM def _import_javelin_ai_gateway() -> Any: from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway return JavelinAIGateway def _import_koboldai() -> Any: from langchain_community.llms.koboldai import KoboldApiLLM return KoboldApiLLM def _import_llamacpp() -> Any: from langchain_community.llms.llamacpp import LlamaCpp return LlamaCpp def _import_manifest() -> Any: from langchain_community.llms.manifest import ManifestWrapper return ManifestWrapper def _import_minimax() -> Any: from langchain_community.llms.minimax import Minimax return Minimax def _import_mlflow() -> Any: from langchain_community.llms.mlflow import Mlflow return Mlflow def _import_mlflow_chat() -> Any: from langchain_community.chat_models.mlflow import ChatMlflow return ChatMlflow def _import_mlflow_ai_gateway() -> Any: from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway return MlflowAIGateway def _import_modal() -> Any: from langchain_community.llms.modal import Modal return Modal def _import_mosaicml() -> Any: from langchain_community.llms.mosaicml import MosaicML return MosaicML def _import_nlpcloud() -> Any: from langchain_community.llms.nlpcloud import NLPCloud return NLPCloud def _import_octoai_endpoint() -> Any: from langchain_community.llms.octoai_endpoint import OctoAIEndpoint return OctoAIEndpoint def _import_ollama() -> Any: from langchain_community.llms.ollama import Ollama return Ollama def _import_opaqueprompts() -> Any: from langchain_community.llms.opaqueprompts import OpaquePrompts return OpaquePrompts def _import_azure_openai() -> Any: from langchain_community.llms.openai import AzureOpenAI return AzureOpenAI def _import_openai() -> Any: from langchain_community.llms.openai import OpenAI return OpenAI def _import_openai_chat() -> Any: from langchain_community.llms.openai import OpenAIChat return OpenAIChat def _import_openllm() -> Any: from langchain_community.llms.openllm import OpenLLM return OpenLLM def _import_openlm() -> Any: from langchain_community.llms.openlm import OpenLM return OpenLM def _import_pai_eas_endpoint() -> Any: from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint return PaiEasEndpoint def _import_petals() -> Any: from langchain_community.llms.petals import Petals return Petals def _import_pipelineai() -> Any: from langchain_community.llms.pipelineai import PipelineAI return PipelineAI def _import_predibase() -> Any: from langchain_community.llms.predibase import Predibase return Predibase def _import_predictionguard() -> Any: from langchain_community.llms.predictionguard import PredictionGuard return PredictionGuard def _import_promptlayer() -> Any: from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI return PromptLayerOpenAI def _import_promptlayer_chat() -> Any: from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat return PromptLayerOpenAIChat def _import_replicate() -> Any: from langchain_community.llms.replicate import Replicate return Replicate def _import_rwkv() -> Any: from langchain_community.llms.rwkv import RWKV return RWKV def _import_sagemaker_endpoint() -> Any: from langchain_community.llms.sagemaker_endpoint import SagemakerEndpoint return SagemakerEndpoint def _import_self_hosted() -> Any: from langchain_community.llms.self_hosted import SelfHostedPipeline return SelfHostedPipeline def _import_self_hosted_hugging_face() -> Any: from langchain_community.llms.self_hosted_hugging_face import ( SelfHostedHuggingFaceLLM, ) return SelfHostedHuggingFaceLLM def _import_stochasticai() -> Any: from langchain_community.llms.stochasticai import StochasticAI return StochasticAI def _import_symblai_nebula() -> Any: from langchain_community.llms.symblai_nebula import Nebula return Nebula def _import_textgen() -> Any: from langchain_community.llms.textgen import TextGen return TextGen def _import_titan_takeoff() -> Any: from langchain_community.llms.titan_takeoff import TitanTakeoff return TitanTakeoff def _import_titan_takeoff_pro() -> Any: from langchain_community.llms.titan_takeoff_pro import TitanTakeoffPro return TitanTakeoffPro def _import_together() -> Any: from langchain_community.llms.together import Together return Together def _import_tongyi() -> Any: from langchain_community.llms.tongyi import Tongyi return Tongyi def _import_vertex() -> Any: from langchain_community.llms.vertexai import VertexAI return VertexAI def _import_vertex_model_garden() -> Any: from langchain_community.llms.vertexai import VertexAIModelGarden return VertexAIModelGarden def _import_vllm() -> Any: from langchain_community.llms.vllm import VLLM return VLLM def _import_vllm_openai() -> Any: from langchain_community.llms.vllm import VLLMOpenAI return VLLMOpenAI def _import_watsonxllm() -> Any: from langchain_community.llms.watsonxllm import WatsonxLLM return WatsonxLLM def _import_writer() -> Any: from langchain_community.llms.writer import Writer return Writer def _import_xinference() -> Any: from langchain_community.llms.xinference import Xinference return Xinference def _import_yandex_gpt() -> Any: from langchain_community.llms.yandex import YandexGPT return YandexGPT def _import_volcengine_maas() -> Any: from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM return VolcEngineMaasLLM def __getattr__(name: str) -> Any: from langchain_community import llms # If not in interactive env, raise warning. if not is_interactive_env(): warnings.warn( "Importing LLMs from langchain is deprecated. Importing from " "langchain will no longer be supported as of langchain==0.2.0. " "Please import from langchain-community instead:\n\n" f"`from langchain_community.llms import {name}`.\n\n" "To install langchain-community run `pip install -U langchain-community`.", category=LangChainDeprecationWarning, ) if name == "type_to_cls_dict": # for backwards compatibility type_to_cls_dict: Dict[str, Type[BaseLLM]] = { k: v() for k, v in get_type_to_cls_dict().items() } return type_to_cls_dict else: return getattr(llms, name) __all__ = [ "AI21", "AlephAlpha", "AmazonAPIGateway", "Anthropic", "Anyscale", "Arcee", "Aviary", "AzureMLOnlineEndpoint", "AzureOpenAI", "Banana", "Baseten", "Beam", "Bedrock", "CTransformers", "CTranslate2", "CerebriumAI", "ChatGLM", "Clarifai", "Cohere", "Databricks", "DeepInfra", "DeepSparse", "EdenAI", "FakeListLLM", "Fireworks", "ForefrontAI", "GigaChat", "GPT4All", "GooglePalm", "GooseAI", "GradientLLM", "HuggingFaceEndpoint", "HuggingFaceHub", "HuggingFacePipeline", "HuggingFaceTextGenInference", "HumanInputLLM", "KoboldApiLLM", "LlamaCpp", "TextGen", "ManifestWrapper", "Minimax", "MlflowAIGateway", "Modal", "MosaicML", "Nebula", "NIBittensorLLM", "NLPCloud", "Ollama", "OpenAI", "OpenAIChat", "OpenLLM", "OpenLM", "PaiEasEndpoint", "Petals", "PipelineAI", "Predibase", "PredictionGuard", "PromptLayerOpenAI", "PromptLayerOpenAIChat", "OpaquePrompts", "RWKV", "Replicate", "SagemakerEndpoint", "SelfHostedHuggingFaceLLM", "SelfHostedPipeline", "StochasticAI", "TitanTakeoff", "TitanTakeoffPro", "Tongyi", "VertexAI", "VertexAIModelGarden", "VLLM", "VLLMOpenAI", "WatsonxLLM", "Writer", "OctoAIEndpoint", "Xinference", "JavelinAIGateway", "QianfanLLMEndpoint", "YandexGPT", "VolcEngineMaasLLM", ] def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]: return { "ai21": _import_ai21, "aleph_alpha": _import_aleph_alpha, "amazon_api_gateway": _import_amazon_api_gateway, "amazon_bedrock": _import_bedrock, "anthropic": _import_anthropic, "anyscale": _import_anyscale, "arcee": _import_arcee, "aviary": _import_aviary, "azure": _import_azure_openai, "azureml_endpoint": _import_azureml_endpoint, "bananadev": _import_bananadev, "baseten": _import_baseten, "beam": _import_beam, "cerebriumai": _import_cerebriumai, "chat_glm": _import_chatglm, "clarifai": _import_clarifai, "cohere": _import_cohere, "ctransformers": _import_ctransformers, "ctranslate2": _import_ctranslate2, "databricks": _import_databricks, "databricks-chat": _import_databricks_chat, "deepinfra": _import_deepinfra, "deepsparse": _import_deepsparse, "edenai": _import_edenai, "fake-list": _import_fake, "forefrontai": _import_forefrontai, "giga-chat-model": _import_gigachat, "google_palm": _import_google_palm, "gooseai": _import_gooseai, "gradient": _import_gradient_ai, "gpt4all": _import_gpt4all, "huggingface_endpoint": _import_huggingface_endpoint, "huggingface_hub": _import_huggingface_hub, "huggingface_pipeline": _import_huggingface_pipeline, "huggingface_textgen_inference": _import_huggingface_text_gen_inference, "human-input": _import_human, "koboldai": _import_koboldai, "llamacpp": _import_llamacpp, "textgen": _import_textgen, "minimax": _import_minimax, "mlflow": _import_mlflow, "mlflow-chat": _import_mlflow_chat, "mlflow-ai-gateway": _import_mlflow_ai_gateway, "modal": _import_modal, "mosaic": _import_mosaicml, "nebula": _import_symblai_nebula, "nibittensor": _import_bittensor, "nlpcloud": _import_nlpcloud, "ollama": _import_ollama, "openai": _import_openai, "openlm": _import_openlm, "pai_eas_endpoint": _import_pai_eas_endpoint, "petals": _import_petals, "pipelineai": _import_pipelineai, "predibase": _import_predibase, "opaqueprompts": _import_opaqueprompts, "replicate": _import_replicate, "rwkv": _import_rwkv, "sagemaker_endpoint": _import_sagemaker_endpoint, "self_hosted": _import_self_hosted, "self_hosted_hugging_face": _import_self_hosted_hugging_face, "stochasticai": _import_stochasticai, "together": _import_together, "tongyi": _import_tongyi, "titan_takeoff": _import_titan_takeoff, "titan_takeoff_pro": _import_titan_takeoff_pro, "vertexai": _import_vertex, "vertexai_model_garden": _import_vertex_model_garden, "openllm": _import_openllm, "openllm_client": _import_openllm, "vllm": _import_vllm, "vllm_openai": _import_vllm_openai, "watsonxllm": _import_watsonxllm, "writer": _import_writer, "xinference": _import_xinference, "javelin-ai-gateway": _import_javelin_ai_gateway, "qianfan_endpoint": _import_baidu_qianfan_endpoint, "yandex_gpt": _import_yandex_gpt, "VolcEngineMaasLLM": _import_volcengine_maas, }
[ "langchain.utils.interactive_env.is_interactive_env" ]
[((11338, 11358), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (11356, 11358), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((11368, 11729), 'warnings.warn', 'warnings.warn', (['f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing LLMs from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.llms import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (11381, 11729), False, 'import warnings\n')]
import logging from abc import ABC, abstractmethod from itertools import islice from typing import Any, Dict, Iterable, List, Optional from langchain_community.utilities.redis import get_client from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, get_buffer_string from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, ENTITY_SUMMARIZATION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key logger = logging.getLogger(__name__) class BaseEntityStore(BaseModel, ABC): """Abstract base class for Entity store.""" @abstractmethod def get(self, key: str, default: Optional[str] = None) -> Optional[str]: """Get entity value from store.""" pass @abstractmethod def set(self, key: str, value: Optional[str]) -> None: """Set entity value in store.""" pass @abstractmethod def delete(self, key: str) -> None: """Delete entity value from store.""" pass @abstractmethod def exists(self, key: str) -> bool: """Check if entity exists in store.""" pass @abstractmethod def clear(self) -> None: """Delete all entities from store.""" pass class InMemoryEntityStore(BaseEntityStore): """In-memory Entity store.""" store: Dict[str, Optional[str]] = {} def get(self, key: str, default: Optional[str] = None) -> Optional[str]: return self.store.get(key, default) def set(self, key: str, value: Optional[str]) -> None: self.store[key] = value def delete(self, key: str) -> None: del self.store[key] def exists(self, key: str) -> bool: return key in self.store def clear(self) -> None: return self.store.clear() class UpstashRedisEntityStore(BaseEntityStore): """Upstash Redis backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ def __init__( self, session_id: str = "default", url: str = "", token: str = "", key_prefix: str = "memory_store", ttl: Optional[int] = 60 * 60 * 24, recall_ttl: Optional[int] = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): try: from upstash_redis import Redis except ImportError: raise ImportError( "Could not import upstash_redis python package. " "Please install it with `pip install upstash_redis`." ) super().__init__(*args, **kwargs) try: self.redis_client = Redis(url=url, token=token) except Exception: logger.error("Upstash Redis instance could not be initiated.") self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: return f"{self.key_prefix}:{self.session_id}" def get(self, key: str, default: Optional[str] = None) -> Optional[str]: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug(f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'") return res def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( f"Redis MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" ) def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 def clear(self) -> None: def scan_and_delete(cursor: int) -> int: cursor, keys_to_delete = self.redis_client.scan( cursor, f"{self.full_key_prefix}:*" ) self.redis_client.delete(*keys_to_delete) return cursor cursor = scan_and_delete(0) while cursor != 0: scan_and_delete(cursor) class RedisEntityStore(BaseEntityStore): """Redis-backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ redis_client: Any session_id: str = "default" key_prefix: str = "memory_store" ttl: Optional[int] = 60 * 60 * 24 recall_ttl: Optional[int] = 60 * 60 * 24 * 3 def __init__( self, session_id: str = "default", url: str = "redis://localhost:6379/0", key_prefix: str = "memory_store", ttl: Optional[int] = 60 * 60 * 24, recall_ttl: Optional[int] = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): try: import redis except ImportError: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) super().__init__(*args, **kwargs) try: self.redis_client = get_client(redis_url=url, decode_responses=True) except redis.exceptions.ConnectionError as error: logger.error(error) self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: return f"{self.key_prefix}:{self.session_id}" def get(self, key: str, default: Optional[str] = None) -> Optional[str]: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'") return res def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" ) def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 def clear(self) -> None: # iterate a list in batches of size batch_size def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]: iterator = iter(iterable) while batch := list(islice(iterator, batch_size)): yield batch for keybatch in batched( self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500 ): self.redis_client.delete(*keybatch) class SQLiteEntityStore(BaseEntityStore): """SQLite-backed Entity store""" session_id: str = "default" table_name: str = "memory_store" conn: Any = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def __init__( self, session_id: str = "default", db_file: str = "entities.db", table_name: str = "memory_store", *args: Any, **kwargs: Any, ): try: import sqlite3 except ImportError: raise ImportError( "Could not import sqlite3 python package. " "Please install it with `pip install sqlite3`." ) super().__init__(*args, **kwargs) self.conn = sqlite3.connect(db_file) self.session_id = session_id self.table_name = table_name self._create_table_if_not_exists() @property def full_table_name(self) -> str: return f"{self.table_name}_{self.session_id}" def _create_table_if_not_exists(self) -> None: create_table_query = f""" CREATE TABLE IF NOT EXISTS {self.full_table_name} ( key TEXT PRIMARY KEY, value TEXT ) """ with self.conn: self.conn.execute(create_table_query) def get(self, key: str, default: Optional[str] = None) -> Optional[str]: query = f""" SELECT value FROM {self.full_table_name} WHERE key = ? """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() if result is not None: value = result[0] return value return default def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) query = f""" INSERT OR REPLACE INTO {self.full_table_name} (key, value) VALUES (?, ?) """ with self.conn: self.conn.execute(query, (key, value)) def delete(self, key: str) -> None: query = f""" DELETE FROM {self.full_table_name} WHERE key = ? """ with self.conn: self.conn.execute(query, (key,)) def exists(self, key: str) -> bool: query = f""" SELECT 1 FROM {self.full_table_name} WHERE key = ? LIMIT 1 """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() return result is not None def clear(self) -> None: query = f""" DELETE FROM {self.full_table_name} """ with self.conn: self.conn.execute(query) class ConversationEntityMemory(BaseChatMemory): """Entity extractor & summarizer memory. Extracts named entities from the recent chat history and generates summaries. With a swappable entity store, persisting entities across conversations. Defaults to an in-memory entity store, and can be swapped out for a Redis, SQLite, or other entity store. """ human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT # Cache of recently detected entity names, if any # It is updated when load_memory_variables is called: entity_cache: List[str] = [] # Number of recent message pairs to consider when updating entities: k: int = 3 chat_history_key: str = "history" # Store to manage entity-related data: entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore) @property def buffer(self) -> List[BaseMessage]: """Access chat memory messages.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return ["entities", self.chat_history_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """ Returns chat history and all generated entities with summaries if available, and updates or clears the recent entity cache. New entity name can be found when calling this method, before the entity summaries are generated, so the entity cache values may be empty if no entity descriptions are generated yet. """ # Create an LLMChain for predicting entity names from the recent chat history: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key # Extract an arbitrary window of the last message pairs from # the chat history, where the hyperparameter k is the # number of message pairs: buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) # Generates a comma-separated list of named entities, # e.g. "Jane, White House, UFO" # or "NONE" if no named entities are extracted: output = chain.predict( history=buffer_string, input=inputs[prompt_input_key], ) # If no named entities are extracted, assigns an empty list. if output.strip() == "NONE": entities = [] else: # Make a list of the extracted entities: entities = [w.strip() for w in output.split(",")] # Make a dictionary of entities with summary if exists: entity_summaries = {} for entity in entities: entity_summaries[entity] = self.entity_store.get(entity, "") # Replaces the entity name cache with the most recently discussed entities, # or if no entities were extracted, clears the cache: self.entity_cache = entities # Should we return as message objects or as a string? if self.return_messages: # Get last `k` pair of chat messages: buffer: Any = self.buffer[-self.k * 2 :] else: # Reuse the string we made earlier: buffer = buffer_string return { self.chat_history_key: buffer, "entities": entity_summaries, } def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """ Save context from this conversation history to the entity store. Generates a summary for each entity in the entity cache by prompting the model, and saves these summaries to the entity store. """ super().save_context(inputs, outputs) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key # Extract an arbitrary window of the last message pairs from # the chat history, where the hyperparameter k is the # number of message pairs: buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) input_data = inputs[prompt_input_key] # Create an LLMChain for predicting entity summarization from the context chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt) # Generate new summaries for entities and save them in the entity store for entity in self.entity_cache: # Get existing summary if it exists existing_summary = self.entity_store.get(entity, "") output = chain.predict( summary=existing_summary, entity=entity, history=buffer_string, input=input_data, ) # Save the updated summary to the entity store self.entity_store.set(entity, output.strip()) def clear(self) -> None: """Clear memory contents.""" self.chat_memory.clear() self.entity_cache.clear() self.entity_store.clear()
[ "langchain_core.messages.get_buffer_string", "langchain.chains.llm.LLMChain", "langchain.memory.utils.get_prompt_input_key", "langchain_core.pydantic_v1.Field", "langchain_community.utilities.redis.get_client" ]
[((701, 728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (718, 728), False, 'import logging\n'), ((10994, 11036), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'InMemoryEntityStore'}), '(default_factory=InMemoryEntityStore)\n', (10999, 11036), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((8049, 8073), 'sqlite3.connect', 'sqlite3.connect', (['db_file'], {}), '(db_file)\n', (8064, 8073), False, 'import sqlite3\n'), ((11938, 11998), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (11946, 11998), False, 'from langchain.chains.llm import LLMChain\n'), ((12369, 12475), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (12386, 12475), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14600, 14706), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (14617, 14706), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14897, 14960), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_summarization_prompt'}), '(llm=self.llm, prompt=self.entity_summarization_prompt)\n', (14905, 14960), False, 'from langchain.chains.llm import LLMChain\n'), ((2881, 2908), 'upstash_redis.Redis', 'Redis', ([], {'url': 'url', 'token': 'token'}), '(url=url, token=token)\n', (2886, 2908), False, 'from upstash_redis import Redis\n'), ((5539, 5587), 'langchain_community.utilities.redis.get_client', 'get_client', ([], {'redis_url': 'url', 'decode_responses': '(True)'}), '(redis_url=url, decode_responses=True)\n', (5549, 5587), False, 'from langchain_community.utilities.redis import get_client\n'), ((12066, 12117), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (12086, 12117), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((14297, 14348), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (14317, 14348), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((7038, 7066), 'itertools.islice', 'islice', (['iterator', 'batch_size'], {}), '(iterator, batch_size)\n', (7044, 7066), False, 'from itertools import islice\n')]
import logging from abc import ABC, abstractmethod from itertools import islice from typing import Any, Dict, Iterable, List, Optional from langchain_community.utilities.redis import get_client from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, get_buffer_string from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, ENTITY_SUMMARIZATION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key logger = logging.getLogger(__name__) class BaseEntityStore(BaseModel, ABC): """Abstract base class for Entity store.""" @abstractmethod def get(self, key: str, default: Optional[str] = None) -> Optional[str]: """Get entity value from store.""" pass @abstractmethod def set(self, key: str, value: Optional[str]) -> None: """Set entity value in store.""" pass @abstractmethod def delete(self, key: str) -> None: """Delete entity value from store.""" pass @abstractmethod def exists(self, key: str) -> bool: """Check if entity exists in store.""" pass @abstractmethod def clear(self) -> None: """Delete all entities from store.""" pass class InMemoryEntityStore(BaseEntityStore): """In-memory Entity store.""" store: Dict[str, Optional[str]] = {} def get(self, key: str, default: Optional[str] = None) -> Optional[str]: return self.store.get(key, default) def set(self, key: str, value: Optional[str]) -> None: self.store[key] = value def delete(self, key: str) -> None: del self.store[key] def exists(self, key: str) -> bool: return key in self.store def clear(self) -> None: return self.store.clear() class UpstashRedisEntityStore(BaseEntityStore): """Upstash Redis backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ def __init__( self, session_id: str = "default", url: str = "", token: str = "", key_prefix: str = "memory_store", ttl: Optional[int] = 60 * 60 * 24, recall_ttl: Optional[int] = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): try: from upstash_redis import Redis except ImportError: raise ImportError( "Could not import upstash_redis python package. " "Please install it with `pip install upstash_redis`." ) super().__init__(*args, **kwargs) try: self.redis_client = Redis(url=url, token=token) except Exception: logger.error("Upstash Redis instance could not be initiated.") self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: return f"{self.key_prefix}:{self.session_id}" def get(self, key: str, default: Optional[str] = None) -> Optional[str]: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug(f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'") return res def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( f"Redis MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" ) def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 def clear(self) -> None: def scan_and_delete(cursor: int) -> int: cursor, keys_to_delete = self.redis_client.scan( cursor, f"{self.full_key_prefix}:*" ) self.redis_client.delete(*keys_to_delete) return cursor cursor = scan_and_delete(0) while cursor != 0: scan_and_delete(cursor) class RedisEntityStore(BaseEntityStore): """Redis-backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ redis_client: Any session_id: str = "default" key_prefix: str = "memory_store" ttl: Optional[int] = 60 * 60 * 24 recall_ttl: Optional[int] = 60 * 60 * 24 * 3 def __init__( self, session_id: str = "default", url: str = "redis://localhost:6379/0", key_prefix: str = "memory_store", ttl: Optional[int] = 60 * 60 * 24, recall_ttl: Optional[int] = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): try: import redis except ImportError: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) super().__init__(*args, **kwargs) try: self.redis_client = get_client(redis_url=url, decode_responses=True) except redis.exceptions.ConnectionError as error: logger.error(error) self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: return f"{self.key_prefix}:{self.session_id}" def get(self, key: str, default: Optional[str] = None) -> Optional[str]: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'") return res def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" ) def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 def clear(self) -> None: # iterate a list in batches of size batch_size def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]: iterator = iter(iterable) while batch := list(islice(iterator, batch_size)): yield batch for keybatch in batched( self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500 ): self.redis_client.delete(*keybatch) class SQLiteEntityStore(BaseEntityStore): """SQLite-backed Entity store""" session_id: str = "default" table_name: str = "memory_store" conn: Any = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def __init__( self, session_id: str = "default", db_file: str = "entities.db", table_name: str = "memory_store", *args: Any, **kwargs: Any, ): try: import sqlite3 except ImportError: raise ImportError( "Could not import sqlite3 python package. " "Please install it with `pip install sqlite3`." ) super().__init__(*args, **kwargs) self.conn = sqlite3.connect(db_file) self.session_id = session_id self.table_name = table_name self._create_table_if_not_exists() @property def full_table_name(self) -> str: return f"{self.table_name}_{self.session_id}" def _create_table_if_not_exists(self) -> None: create_table_query = f""" CREATE TABLE IF NOT EXISTS {self.full_table_name} ( key TEXT PRIMARY KEY, value TEXT ) """ with self.conn: self.conn.execute(create_table_query) def get(self, key: str, default: Optional[str] = None) -> Optional[str]: query = f""" SELECT value FROM {self.full_table_name} WHERE key = ? """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() if result is not None: value = result[0] return value return default def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) query = f""" INSERT OR REPLACE INTO {self.full_table_name} (key, value) VALUES (?, ?) """ with self.conn: self.conn.execute(query, (key, value)) def delete(self, key: str) -> None: query = f""" DELETE FROM {self.full_table_name} WHERE key = ? """ with self.conn: self.conn.execute(query, (key,)) def exists(self, key: str) -> bool: query = f""" SELECT 1 FROM {self.full_table_name} WHERE key = ? LIMIT 1 """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() return result is not None def clear(self) -> None: query = f""" DELETE FROM {self.full_table_name} """ with self.conn: self.conn.execute(query) class ConversationEntityMemory(BaseChatMemory): """Entity extractor & summarizer memory. Extracts named entities from the recent chat history and generates summaries. With a swappable entity store, persisting entities across conversations. Defaults to an in-memory entity store, and can be swapped out for a Redis, SQLite, or other entity store. """ human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT # Cache of recently detected entity names, if any # It is updated when load_memory_variables is called: entity_cache: List[str] = [] # Number of recent message pairs to consider when updating entities: k: int = 3 chat_history_key: str = "history" # Store to manage entity-related data: entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore) @property def buffer(self) -> List[BaseMessage]: """Access chat memory messages.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return ["entities", self.chat_history_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """ Returns chat history and all generated entities with summaries if available, and updates or clears the recent entity cache. New entity name can be found when calling this method, before the entity summaries are generated, so the entity cache values may be empty if no entity descriptions are generated yet. """ # Create an LLMChain for predicting entity names from the recent chat history: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key # Extract an arbitrary window of the last message pairs from # the chat history, where the hyperparameter k is the # number of message pairs: buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) # Generates a comma-separated list of named entities, # e.g. "Jane, White House, UFO" # or "NONE" if no named entities are extracted: output = chain.predict( history=buffer_string, input=inputs[prompt_input_key], ) # If no named entities are extracted, assigns an empty list. if output.strip() == "NONE": entities = [] else: # Make a list of the extracted entities: entities = [w.strip() for w in output.split(",")] # Make a dictionary of entities with summary if exists: entity_summaries = {} for entity in entities: entity_summaries[entity] = self.entity_store.get(entity, "") # Replaces the entity name cache with the most recently discussed entities, # or if no entities were extracted, clears the cache: self.entity_cache = entities # Should we return as message objects or as a string? if self.return_messages: # Get last `k` pair of chat messages: buffer: Any = self.buffer[-self.k * 2 :] else: # Reuse the string we made earlier: buffer = buffer_string return { self.chat_history_key: buffer, "entities": entity_summaries, } def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """ Save context from this conversation history to the entity store. Generates a summary for each entity in the entity cache by prompting the model, and saves these summaries to the entity store. """ super().save_context(inputs, outputs) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key # Extract an arbitrary window of the last message pairs from # the chat history, where the hyperparameter k is the # number of message pairs: buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) input_data = inputs[prompt_input_key] # Create an LLMChain for predicting entity summarization from the context chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt) # Generate new summaries for entities and save them in the entity store for entity in self.entity_cache: # Get existing summary if it exists existing_summary = self.entity_store.get(entity, "") output = chain.predict( summary=existing_summary, entity=entity, history=buffer_string, input=input_data, ) # Save the updated summary to the entity store self.entity_store.set(entity, output.strip()) def clear(self) -> None: """Clear memory contents.""" self.chat_memory.clear() self.entity_cache.clear() self.entity_store.clear()
[ "langchain_core.messages.get_buffer_string", "langchain.chains.llm.LLMChain", "langchain.memory.utils.get_prompt_input_key", "langchain_core.pydantic_v1.Field", "langchain_community.utilities.redis.get_client" ]
[((701, 728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (718, 728), False, 'import logging\n'), ((10994, 11036), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'InMemoryEntityStore'}), '(default_factory=InMemoryEntityStore)\n', (10999, 11036), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((8049, 8073), 'sqlite3.connect', 'sqlite3.connect', (['db_file'], {}), '(db_file)\n', (8064, 8073), False, 'import sqlite3\n'), ((11938, 11998), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (11946, 11998), False, 'from langchain.chains.llm import LLMChain\n'), ((12369, 12475), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (12386, 12475), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14600, 14706), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (14617, 14706), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14897, 14960), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_summarization_prompt'}), '(llm=self.llm, prompt=self.entity_summarization_prompt)\n', (14905, 14960), False, 'from langchain.chains.llm import LLMChain\n'), ((2881, 2908), 'upstash_redis.Redis', 'Redis', ([], {'url': 'url', 'token': 'token'}), '(url=url, token=token)\n', (2886, 2908), False, 'from upstash_redis import Redis\n'), ((5539, 5587), 'langchain_community.utilities.redis.get_client', 'get_client', ([], {'redis_url': 'url', 'decode_responses': '(True)'}), '(redis_url=url, decode_responses=True)\n', (5549, 5587), False, 'from langchain_community.utilities.redis import get_client\n'), ((12066, 12117), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (12086, 12117), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((14297, 14348), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (14317, 14348), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((7038, 7066), 'itertools.islice', 'islice', (['iterator', 'batch_size'], {}), '(iterator, batch_size)\n', (7044, 7066), False, 'from itertools import islice\n')]
import logging from abc import ABC, abstractmethod from itertools import islice from typing import Any, Dict, Iterable, List, Optional from langchain_community.utilities.redis import get_client from langchain_core.language_models import BaseLanguageModel from langchain_core.messages import BaseMessage, get_buffer_string from langchain_core.prompts import BasePromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.prompt import ( ENTITY_EXTRACTION_PROMPT, ENTITY_SUMMARIZATION_PROMPT, ) from langchain.memory.utils import get_prompt_input_key logger = logging.getLogger(__name__) class BaseEntityStore(BaseModel, ABC): """Abstract base class for Entity store.""" @abstractmethod def get(self, key: str, default: Optional[str] = None) -> Optional[str]: """Get entity value from store.""" pass @abstractmethod def set(self, key: str, value: Optional[str]) -> None: """Set entity value in store.""" pass @abstractmethod def delete(self, key: str) -> None: """Delete entity value from store.""" pass @abstractmethod def exists(self, key: str) -> bool: """Check if entity exists in store.""" pass @abstractmethod def clear(self) -> None: """Delete all entities from store.""" pass class InMemoryEntityStore(BaseEntityStore): """In-memory Entity store.""" store: Dict[str, Optional[str]] = {} def get(self, key: str, default: Optional[str] = None) -> Optional[str]: return self.store.get(key, default) def set(self, key: str, value: Optional[str]) -> None: self.store[key] = value def delete(self, key: str) -> None: del self.store[key] def exists(self, key: str) -> bool: return key in self.store def clear(self) -> None: return self.store.clear() class UpstashRedisEntityStore(BaseEntityStore): """Upstash Redis backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ def __init__( self, session_id: str = "default", url: str = "", token: str = "", key_prefix: str = "memory_store", ttl: Optional[int] = 60 * 60 * 24, recall_ttl: Optional[int] = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): try: from upstash_redis import Redis except ImportError: raise ImportError( "Could not import upstash_redis python package. " "Please install it with `pip install upstash_redis`." ) super().__init__(*args, **kwargs) try: self.redis_client = Redis(url=url, token=token) except Exception: logger.error("Upstash Redis instance could not be initiated.") self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: return f"{self.key_prefix}:{self.session_id}" def get(self, key: str, default: Optional[str] = None) -> Optional[str]: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug(f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'") return res def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( f"Redis MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" ) def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 def clear(self) -> None: def scan_and_delete(cursor: int) -> int: cursor, keys_to_delete = self.redis_client.scan( cursor, f"{self.full_key_prefix}:*" ) self.redis_client.delete(*keys_to_delete) return cursor cursor = scan_and_delete(0) while cursor != 0: scan_and_delete(cursor) class RedisEntityStore(BaseEntityStore): """Redis-backed Entity store. Entities get a TTL of 1 day by default, and that TTL is extended by 3 days every time the entity is read back. """ redis_client: Any session_id: str = "default" key_prefix: str = "memory_store" ttl: Optional[int] = 60 * 60 * 24 recall_ttl: Optional[int] = 60 * 60 * 24 * 3 def __init__( self, session_id: str = "default", url: str = "redis://localhost:6379/0", key_prefix: str = "memory_store", ttl: Optional[int] = 60 * 60 * 24, recall_ttl: Optional[int] = 60 * 60 * 24 * 3, *args: Any, **kwargs: Any, ): try: import redis except ImportError: raise ImportError( "Could not import redis python package. " "Please install it with `pip install redis`." ) super().__init__(*args, **kwargs) try: self.redis_client = get_client(redis_url=url, decode_responses=True) except redis.exceptions.ConnectionError as error: logger.error(error) self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl @property def full_key_prefix(self) -> str: return f"{self.key_prefix}:{self.session_id}" def get(self, key: str, default: Optional[str] = None) -> Optional[str]: res = ( self.redis_client.getex(f"{self.full_key_prefix}:{key}", ex=self.recall_ttl) or default or "" ) logger.debug(f"REDIS MEM get '{self.full_key_prefix}:{key}': '{res}'") return res def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) self.redis_client.set(f"{self.full_key_prefix}:{key}", value, ex=self.ttl) logger.debug( f"REDIS MEM set '{self.full_key_prefix}:{key}': '{value}' EX {self.ttl}" ) def delete(self, key: str) -> None: self.redis_client.delete(f"{self.full_key_prefix}:{key}") def exists(self, key: str) -> bool: return self.redis_client.exists(f"{self.full_key_prefix}:{key}") == 1 def clear(self) -> None: # iterate a list in batches of size batch_size def batched(iterable: Iterable[Any], batch_size: int) -> Iterable[Any]: iterator = iter(iterable) while batch := list(islice(iterator, batch_size)): yield batch for keybatch in batched( self.redis_client.scan_iter(f"{self.full_key_prefix}:*"), 500 ): self.redis_client.delete(*keybatch) class SQLiteEntityStore(BaseEntityStore): """SQLite-backed Entity store""" session_id: str = "default" table_name: str = "memory_store" conn: Any = None class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True def __init__( self, session_id: str = "default", db_file: str = "entities.db", table_name: str = "memory_store", *args: Any, **kwargs: Any, ): try: import sqlite3 except ImportError: raise ImportError( "Could not import sqlite3 python package. " "Please install it with `pip install sqlite3`." ) super().__init__(*args, **kwargs) self.conn = sqlite3.connect(db_file) self.session_id = session_id self.table_name = table_name self._create_table_if_not_exists() @property def full_table_name(self) -> str: return f"{self.table_name}_{self.session_id}" def _create_table_if_not_exists(self) -> None: create_table_query = f""" CREATE TABLE IF NOT EXISTS {self.full_table_name} ( key TEXT PRIMARY KEY, value TEXT ) """ with self.conn: self.conn.execute(create_table_query) def get(self, key: str, default: Optional[str] = None) -> Optional[str]: query = f""" SELECT value FROM {self.full_table_name} WHERE key = ? """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() if result is not None: value = result[0] return value return default def set(self, key: str, value: Optional[str]) -> None: if not value: return self.delete(key) query = f""" INSERT OR REPLACE INTO {self.full_table_name} (key, value) VALUES (?, ?) """ with self.conn: self.conn.execute(query, (key, value)) def delete(self, key: str) -> None: query = f""" DELETE FROM {self.full_table_name} WHERE key = ? """ with self.conn: self.conn.execute(query, (key,)) def exists(self, key: str) -> bool: query = f""" SELECT 1 FROM {self.full_table_name} WHERE key = ? LIMIT 1 """ cursor = self.conn.execute(query, (key,)) result = cursor.fetchone() return result is not None def clear(self) -> None: query = f""" DELETE FROM {self.full_table_name} """ with self.conn: self.conn.execute(query) class ConversationEntityMemory(BaseChatMemory): """Entity extractor & summarizer memory. Extracts named entities from the recent chat history and generates summaries. With a swappable entity store, persisting entities across conversations. Defaults to an in-memory entity store, and can be swapped out for a Redis, SQLite, or other entity store. """ human_prefix: str = "Human" ai_prefix: str = "AI" llm: BaseLanguageModel entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT entity_summarization_prompt: BasePromptTemplate = ENTITY_SUMMARIZATION_PROMPT # Cache of recently detected entity names, if any # It is updated when load_memory_variables is called: entity_cache: List[str] = [] # Number of recent message pairs to consider when updating entities: k: int = 3 chat_history_key: str = "history" # Store to manage entity-related data: entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore) @property def buffer(self) -> List[BaseMessage]: """Access chat memory messages.""" return self.chat_memory.messages @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return ["entities", self.chat_history_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """ Returns chat history and all generated entities with summaries if available, and updates or clears the recent entity cache. New entity name can be found when calling this method, before the entity summaries are generated, so the entity cache values may be empty if no entity descriptions are generated yet. """ # Create an LLMChain for predicting entity names from the recent chat history: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key # Extract an arbitrary window of the last message pairs from # the chat history, where the hyperparameter k is the # number of message pairs: buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) # Generates a comma-separated list of named entities, # e.g. "Jane, White House, UFO" # or "NONE" if no named entities are extracted: output = chain.predict( history=buffer_string, input=inputs[prompt_input_key], ) # If no named entities are extracted, assigns an empty list. if output.strip() == "NONE": entities = [] else: # Make a list of the extracted entities: entities = [w.strip() for w in output.split(",")] # Make a dictionary of entities with summary if exists: entity_summaries = {} for entity in entities: entity_summaries[entity] = self.entity_store.get(entity, "") # Replaces the entity name cache with the most recently discussed entities, # or if no entities were extracted, clears the cache: self.entity_cache = entities # Should we return as message objects or as a string? if self.return_messages: # Get last `k` pair of chat messages: buffer: Any = self.buffer[-self.k * 2 :] else: # Reuse the string we made earlier: buffer = buffer_string return { self.chat_history_key: buffer, "entities": entity_summaries, } def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """ Save context from this conversation history to the entity store. Generates a summary for each entity in the entity cache by prompting the model, and saves these summaries to the entity store. """ super().save_context(inputs, outputs) if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key # Extract an arbitrary window of the last message pairs from # the chat history, where the hyperparameter k is the # number of message pairs: buffer_string = get_buffer_string( self.buffer[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) input_data = inputs[prompt_input_key] # Create an LLMChain for predicting entity summarization from the context chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt) # Generate new summaries for entities and save them in the entity store for entity in self.entity_cache: # Get existing summary if it exists existing_summary = self.entity_store.get(entity, "") output = chain.predict( summary=existing_summary, entity=entity, history=buffer_string, input=input_data, ) # Save the updated summary to the entity store self.entity_store.set(entity, output.strip()) def clear(self) -> None: """Clear memory contents.""" self.chat_memory.clear() self.entity_cache.clear() self.entity_store.clear()
[ "langchain_core.messages.get_buffer_string", "langchain.chains.llm.LLMChain", "langchain.memory.utils.get_prompt_input_key", "langchain_core.pydantic_v1.Field", "langchain_community.utilities.redis.get_client" ]
[((701, 728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (718, 728), False, 'import logging\n'), ((10994, 11036), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'InMemoryEntityStore'}), '(default_factory=InMemoryEntityStore)\n', (10999, 11036), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((8049, 8073), 'sqlite3.connect', 'sqlite3.connect', (['db_file'], {}), '(db_file)\n', (8064, 8073), False, 'import sqlite3\n'), ((11938, 11998), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_extraction_prompt'}), '(llm=self.llm, prompt=self.entity_extraction_prompt)\n', (11946, 11998), False, 'from langchain.chains.llm import LLMChain\n'), ((12369, 12475), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (12386, 12475), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14600, 14706), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.buffer[-self.k * 2:]'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(self.buffer[-self.k * 2:], human_prefix=self.human_prefix,\n ai_prefix=self.ai_prefix)\n', (14617, 14706), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((14897, 14960), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt': 'self.entity_summarization_prompt'}), '(llm=self.llm, prompt=self.entity_summarization_prompt)\n', (14905, 14960), False, 'from langchain.chains.llm import LLMChain\n'), ((2881, 2908), 'upstash_redis.Redis', 'Redis', ([], {'url': 'url', 'token': 'token'}), '(url=url, token=token)\n', (2886, 2908), False, 'from upstash_redis import Redis\n'), ((5539, 5587), 'langchain_community.utilities.redis.get_client', 'get_client', ([], {'redis_url': 'url', 'decode_responses': '(True)'}), '(redis_url=url, decode_responses=True)\n', (5549, 5587), False, 'from langchain_community.utilities.redis import get_client\n'), ((12066, 12117), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (12086, 12117), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((14297, 14348), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (14317, 14348), False, 'from langchain.memory.utils import get_prompt_input_key\n'), ((7038, 7066), 'itertools.islice', 'islice', (['iterator', 'batch_size'], {}), '(iterator, batch_size)\n', (7044, 7066), False, 'from itertools import islice\n')]
from typing import Any, Dict, List, Optional from langchain_core.messages import BaseMessage, get_buffer_string from langchain_core.pydantic_v1 import root_validator from langchain.memory.chat_memory import BaseChatMemory, BaseMemory from langchain.memory.utils import get_prompt_input_key class ConversationBufferMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: @property def buffer(self) -> Any: """String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str async def abuffer(self) -> Any: """String buffer of memory.""" return ( await self.abuffer_as_messages() if self.return_messages else await self.abuffer_as_str() ) def _buffer_as_str(self, messages: List[BaseMessage]) -> str: return get_buffer_string( messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def buffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" return self._buffer_as_str(self.chat_memory.messages) async def abuffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" messages = await self.chat_memory.aget_messages() return self._buffer_as_str(messages) @property def buffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return self.chat_memory.messages async def abuffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return await self.chat_memory.aget_messages() @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer} async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return key-value pairs given the text input to the chain.""" buffer = await self.abuffer() return {self.memory_key: buffer} class ConversationStringBufferMemory(BaseMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" """Prefix to use for AI generated responses.""" buffer: str = "" output_key: Optional[str] = None input_key: Optional[str] = None memory_key: str = "history" #: :meta private: @root_validator() def validate_chains(cls, values: Dict) -> Dict: """Validate that return messages is not True.""" if values.get("return_messages", False): raise ValueError( "return_messages must be False for ConversationStringBufferMemory" ) return values @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" return {self.memory_key: self.buffer} async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" return self.load_memory_variables(inputs) def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") output_key = list(outputs.keys())[0] else: output_key = self.output_key human = f"{self.human_prefix}: " + inputs[prompt_input_key] ai = f"{self.ai_prefix}: " + outputs[output_key] self.buffer += "\n" + "\n".join([human, ai]) async def asave_context( self, inputs: Dict[str, Any], outputs: Dict[str, str] ) -> None: """Save context from this conversation to buffer.""" return self.save_context(inputs, outputs) def clear(self) -> None: """Clear memory contents.""" self.buffer = "" async def aclear(self) -> None: self.clear()
[ "langchain_core.messages.get_buffer_string", "langchain_core.pydantic_v1.root_validator", "langchain.memory.utils.get_prompt_input_key" ]
[((2888, 2904), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2902, 2904), False, 'from langchain_core.pydantic_v1 import root_validator\n'), ((983, 1073), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (1000, 1073), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((3946, 3997), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (3966, 3997), False, 'from langchain.memory.utils import get_prompt_input_key\n')]
from typing import Any, Dict, List, Optional from langchain_core.messages import BaseMessage, get_buffer_string from langchain_core.pydantic_v1 import root_validator from langchain.memory.chat_memory import BaseChatMemory, BaseMemory from langchain.memory.utils import get_prompt_input_key class ConversationBufferMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: @property def buffer(self) -> Any: """String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str async def abuffer(self) -> Any: """String buffer of memory.""" return ( await self.abuffer_as_messages() if self.return_messages else await self.abuffer_as_str() ) def _buffer_as_str(self, messages: List[BaseMessage]) -> str: return get_buffer_string( messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def buffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" return self._buffer_as_str(self.chat_memory.messages) async def abuffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" messages = await self.chat_memory.aget_messages() return self._buffer_as_str(messages) @property def buffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return self.chat_memory.messages async def abuffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return await self.chat_memory.aget_messages() @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer} async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return key-value pairs given the text input to the chain.""" buffer = await self.abuffer() return {self.memory_key: buffer} class ConversationStringBufferMemory(BaseMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" """Prefix to use for AI generated responses.""" buffer: str = "" output_key: Optional[str] = None input_key: Optional[str] = None memory_key: str = "history" #: :meta private: @root_validator() def validate_chains(cls, values: Dict) -> Dict: """Validate that return messages is not True.""" if values.get("return_messages", False): raise ValueError( "return_messages must be False for ConversationStringBufferMemory" ) return values @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" return {self.memory_key: self.buffer} async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" return self.load_memory_variables(inputs) def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") output_key = list(outputs.keys())[0] else: output_key = self.output_key human = f"{self.human_prefix}: " + inputs[prompt_input_key] ai = f"{self.ai_prefix}: " + outputs[output_key] self.buffer += "\n" + "\n".join([human, ai]) async def asave_context( self, inputs: Dict[str, Any], outputs: Dict[str, str] ) -> None: """Save context from this conversation to buffer.""" return self.save_context(inputs, outputs) def clear(self) -> None: """Clear memory contents.""" self.buffer = "" async def aclear(self) -> None: self.clear()
[ "langchain_core.messages.get_buffer_string", "langchain_core.pydantic_v1.root_validator", "langchain.memory.utils.get_prompt_input_key" ]
[((2888, 2904), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2902, 2904), False, 'from langchain_core.pydantic_v1 import root_validator\n'), ((983, 1073), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (1000, 1073), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((3946, 3997), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (3966, 3997), False, 'from langchain.memory.utils import get_prompt_input_key\n')]
from typing import Any, Dict, List, Optional from langchain_core.messages import BaseMessage, get_buffer_string from langchain_core.pydantic_v1 import root_validator from langchain.memory.chat_memory import BaseChatMemory, BaseMemory from langchain.memory.utils import get_prompt_input_key class ConversationBufferMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: @property def buffer(self) -> Any: """String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str async def abuffer(self) -> Any: """String buffer of memory.""" return ( await self.abuffer_as_messages() if self.return_messages else await self.abuffer_as_str() ) def _buffer_as_str(self, messages: List[BaseMessage]) -> str: return get_buffer_string( messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def buffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" return self._buffer_as_str(self.chat_memory.messages) async def abuffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" messages = await self.chat_memory.aget_messages() return self._buffer_as_str(messages) @property def buffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return self.chat_memory.messages async def abuffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return await self.chat_memory.aget_messages() @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer} async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return key-value pairs given the text input to the chain.""" buffer = await self.abuffer() return {self.memory_key: buffer} class ConversationStringBufferMemory(BaseMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" """Prefix to use for AI generated responses.""" buffer: str = "" output_key: Optional[str] = None input_key: Optional[str] = None memory_key: str = "history" #: :meta private: @root_validator() def validate_chains(cls, values: Dict) -> Dict: """Validate that return messages is not True.""" if values.get("return_messages", False): raise ValueError( "return_messages must be False for ConversationStringBufferMemory" ) return values @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" return {self.memory_key: self.buffer} async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" return self.load_memory_variables(inputs) def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") output_key = list(outputs.keys())[0] else: output_key = self.output_key human = f"{self.human_prefix}: " + inputs[prompt_input_key] ai = f"{self.ai_prefix}: " + outputs[output_key] self.buffer += "\n" + "\n".join([human, ai]) async def asave_context( self, inputs: Dict[str, Any], outputs: Dict[str, str] ) -> None: """Save context from this conversation to buffer.""" return self.save_context(inputs, outputs) def clear(self) -> None: """Clear memory contents.""" self.buffer = "" async def aclear(self) -> None: self.clear()
[ "langchain_core.messages.get_buffer_string", "langchain_core.pydantic_v1.root_validator", "langchain.memory.utils.get_prompt_input_key" ]
[((2888, 2904), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2902, 2904), False, 'from langchain_core.pydantic_v1 import root_validator\n'), ((983, 1073), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (1000, 1073), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((3946, 3997), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (3966, 3997), False, 'from langchain.memory.utils import get_prompt_input_key\n')]
from typing import Any, Dict, List, Optional from langchain_core.messages import BaseMessage, get_buffer_string from langchain_core.pydantic_v1 import root_validator from langchain.memory.chat_memory import BaseChatMemory, BaseMemory from langchain.memory.utils import get_prompt_input_key class ConversationBufferMemory(BaseChatMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: @property def buffer(self) -> Any: """String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str async def abuffer(self) -> Any: """String buffer of memory.""" return ( await self.abuffer_as_messages() if self.return_messages else await self.abuffer_as_str() ) def _buffer_as_str(self, messages: List[BaseMessage]) -> str: return get_buffer_string( messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def buffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" return self._buffer_as_str(self.chat_memory.messages) async def abuffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" messages = await self.chat_memory.aget_messages() return self._buffer_as_str(messages) @property def buffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return self.chat_memory.messages async def abuffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return await self.chat_memory.aget_messages() @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer} async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return key-value pairs given the text input to the chain.""" buffer = await self.abuffer() return {self.memory_key: buffer} class ConversationStringBufferMemory(BaseMemory): """Buffer for storing conversation memory.""" human_prefix: str = "Human" ai_prefix: str = "AI" """Prefix to use for AI generated responses.""" buffer: str = "" output_key: Optional[str] = None input_key: Optional[str] = None memory_key: str = "history" #: :meta private: @root_validator() def validate_chains(cls, values: Dict) -> Dict: """Validate that return messages is not True.""" if values.get("return_messages", False): raise ValueError( "return_messages must be False for ConversationStringBufferMemory" ) return values @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" return {self.memory_key: self.buffer} async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return history buffer.""" return self.load_memory_variables(inputs) def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save context from this conversation to buffer.""" if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") output_key = list(outputs.keys())[0] else: output_key = self.output_key human = f"{self.human_prefix}: " + inputs[prompt_input_key] ai = f"{self.ai_prefix}: " + outputs[output_key] self.buffer += "\n" + "\n".join([human, ai]) async def asave_context( self, inputs: Dict[str, Any], outputs: Dict[str, str] ) -> None: """Save context from this conversation to buffer.""" return self.save_context(inputs, outputs) def clear(self) -> None: """Clear memory contents.""" self.buffer = "" async def aclear(self) -> None: self.clear()
[ "langchain_core.messages.get_buffer_string", "langchain_core.pydantic_v1.root_validator", "langchain.memory.utils.get_prompt_input_key" ]
[((2888, 2904), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2902, 2904), False, 'from langchain_core.pydantic_v1 import root_validator\n'), ((983, 1073), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (1000, 1073), False, 'from langchain_core.messages import BaseMessage, get_buffer_string\n'), ((3946, 3997), 'langchain.memory.utils.get_prompt_input_key', 'get_prompt_input_key', (['inputs', 'self.memory_variables'], {}), '(inputs, self.memory_variables)\n', (3966, 3997), False, 'from langchain.memory.utils import get_prompt_input_key\n')]
"""**Tools** are classes that an Agent uses to interact with the world. Each tool has a **description**. Agent uses the description to choose the right tool for the job. **Class hierarchy:** .. code-block:: ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool <name> # Examples: BraveSearch, HumanInputRun **Main helpers:** .. code-block:: CallbackManagerForToolRun, AsyncCallbackManagerForToolRun """ import warnings from typing import Any from langchain_core._api import LangChainDeprecationWarning from langchain_core.tools import BaseTool, StructuredTool, Tool, tool from langchain.utils.interactive_env import is_interactive_env # Used for internal purposes _DEPRECATED_TOOLS = {"PythonAstREPLTool", "PythonREPLTool"} def _import_python_tool_PythonAstREPLTool() -> Any: raise ImportError( "This tool has been moved to langchain experiment. " "This tool has access to a python REPL. " "For best practices make sure to sandbox this tool. " "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " "To keep using this code as is, install langchain experimental and " "update relevant imports replacing 'langchain' with 'langchain_experimental'" ) def _import_python_tool_PythonREPLTool() -> Any: raise ImportError( "This tool has been moved to langchain experiment. " "This tool has access to a python REPL. " "For best practices make sure to sandbox this tool. " "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " "To keep using this code as is, install langchain experimental and " "update relevant imports replacing 'langchain' with 'langchain_experimental'" ) def __getattr__(name: str) -> Any: if name == "PythonAstREPLTool": return _import_python_tool_PythonAstREPLTool() elif name == "PythonREPLTool": return _import_python_tool_PythonREPLTool() else: from langchain_community import tools # If not in interactive env, raise warning. if not is_interactive_env(): warnings.warn( "Importing tools from langchain is deprecated. Importing from " "langchain will no longer be supported as of langchain==0.2.0. " "Please import from langchain-community instead:\n\n" f"`from langchain_community.tools import {name}`.\n\n" "To install langchain-community run " "`pip install -U langchain-community`.", category=LangChainDeprecationWarning, ) return getattr(tools, name) __all__ = [ "AINAppOps", "AINOwnerOps", "AINRuleOps", "AINTransfer", "AINValueOps", "AIPluginTool", "APIOperation", "ArxivQueryRun", "AzureCogsFormRecognizerTool", "AzureCogsImageAnalysisTool", "AzureCogsSpeech2TextTool", "AzureCogsText2SpeechTool", "AzureCogsTextAnalyticsHealthTool", "BaseGraphQLTool", "BaseRequestsTool", "BaseSQLDatabaseTool", "BaseSparkSQLTool", "BaseTool", "BearlyInterpreterTool", "BingSearchResults", "BingSearchRun", "BraveSearch", "ClickTool", "CopyFileTool", "CurrentWebPageTool", "DeleteFileTool", "DuckDuckGoSearchResults", "DuckDuckGoSearchRun", "E2BDataAnalysisTool", "EdenAiExplicitImageTool", "EdenAiObjectDetectionTool", "EdenAiParsingIDTool", "EdenAiParsingInvoiceTool", "EdenAiSpeechToTextTool", "EdenAiTextModerationTool", "EdenAiTextToSpeechTool", "EdenaiTool", "ElevenLabsText2SpeechTool", "ExtractHyperlinksTool", "ExtractTextTool", "FileSearchTool", "GetElementsTool", "GmailCreateDraft", "GmailGetMessage", "GmailGetThread", "GmailSearch", "GmailSendMessage", "GoogleCloudTextToSpeechTool", "GooglePlacesTool", "GoogleSearchResults", "GoogleSearchRun", "GoogleSerperResults", "GoogleSerperRun", "SearchAPIResults", "SearchAPIRun", "HumanInputRun", "IFTTTWebhook", "InfoPowerBITool", "InfoSQLDatabaseTool", "InfoSparkSQLTool", "JiraAction", "JsonGetValueTool", "JsonListKeysTool", "ListDirectoryTool", "ListPowerBITool", "ListSQLDatabaseTool", "ListSparkSQLTool", "MerriamWebsterQueryRun", "MetaphorSearchResults", "MoveFileTool", "NasaAction", "NavigateBackTool", "NavigateTool", "O365CreateDraftMessage", "O365SearchEmails", "O365SearchEvents", "O365SendEvent", "O365SendMessage", "OpenAPISpec", "OpenWeatherMapQueryRun", "PubmedQueryRun", "RedditSearchRun", "QueryCheckerTool", "QueryPowerBITool", "QuerySQLCheckerTool", "QuerySQLDataBaseTool", "QuerySparkSQLTool", "ReadFileTool", "RequestsDeleteTool", "RequestsGetTool", "RequestsPatchTool", "RequestsPostTool", "RequestsPutTool", "SteamWebAPIQueryRun", "SceneXplainTool", "SearxSearchResults", "SearxSearchRun", "ShellTool", "SlackGetChannel", "SlackGetMessage", "SlackScheduleMessage", "SlackSendMessage", "SleepTool", "StdInInquireTool", "StackExchangeTool", "SteamshipImageGenerationTool", "StructuredTool", "Tool", "VectorStoreQATool", "VectorStoreQAWithSourcesTool", "WikipediaQueryRun", "WolframAlphaQueryRun", "WriteFileTool", "YahooFinanceNewsTool", "YouTubeSearchTool", "ZapierNLAListActions", "ZapierNLARunAction", "format_tool_to_openai_function", "tool", ]
[ "langchain.utils.interactive_env.is_interactive_env" ]
[((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (2198, 2548), False, 'import warnings\n')]
"""**Tools** are classes that an Agent uses to interact with the world. Each tool has a **description**. Agent uses the description to choose the right tool for the job. **Class hierarchy:** .. code-block:: ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool <name> # Examples: BraveSearch, HumanInputRun **Main helpers:** .. code-block:: CallbackManagerForToolRun, AsyncCallbackManagerForToolRun """ import warnings from typing import Any from langchain_core._api import LangChainDeprecationWarning from langchain_core.tools import BaseTool, StructuredTool, Tool, tool from langchain.utils.interactive_env import is_interactive_env # Used for internal purposes _DEPRECATED_TOOLS = {"PythonAstREPLTool", "PythonREPLTool"} def _import_python_tool_PythonAstREPLTool() -> Any: raise ImportError( "This tool has been moved to langchain experiment. " "This tool has access to a python REPL. " "For best practices make sure to sandbox this tool. " "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " "To keep using this code as is, install langchain experimental and " "update relevant imports replacing 'langchain' with 'langchain_experimental'" ) def _import_python_tool_PythonREPLTool() -> Any: raise ImportError( "This tool has been moved to langchain experiment. " "This tool has access to a python REPL. " "For best practices make sure to sandbox this tool. " "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " "To keep using this code as is, install langchain experimental and " "update relevant imports replacing 'langchain' with 'langchain_experimental'" ) def __getattr__(name: str) -> Any: if name == "PythonAstREPLTool": return _import_python_tool_PythonAstREPLTool() elif name == "PythonREPLTool": return _import_python_tool_PythonREPLTool() else: from langchain_community import tools # If not in interactive env, raise warning. if not is_interactive_env(): warnings.warn( "Importing tools from langchain is deprecated. Importing from " "langchain will no longer be supported as of langchain==0.2.0. " "Please import from langchain-community instead:\n\n" f"`from langchain_community.tools import {name}`.\n\n" "To install langchain-community run " "`pip install -U langchain-community`.", category=LangChainDeprecationWarning, ) return getattr(tools, name) __all__ = [ "AINAppOps", "AINOwnerOps", "AINRuleOps", "AINTransfer", "AINValueOps", "AIPluginTool", "APIOperation", "ArxivQueryRun", "AzureCogsFormRecognizerTool", "AzureCogsImageAnalysisTool", "AzureCogsSpeech2TextTool", "AzureCogsText2SpeechTool", "AzureCogsTextAnalyticsHealthTool", "BaseGraphQLTool", "BaseRequestsTool", "BaseSQLDatabaseTool", "BaseSparkSQLTool", "BaseTool", "BearlyInterpreterTool", "BingSearchResults", "BingSearchRun", "BraveSearch", "ClickTool", "CopyFileTool", "CurrentWebPageTool", "DeleteFileTool", "DuckDuckGoSearchResults", "DuckDuckGoSearchRun", "E2BDataAnalysisTool", "EdenAiExplicitImageTool", "EdenAiObjectDetectionTool", "EdenAiParsingIDTool", "EdenAiParsingInvoiceTool", "EdenAiSpeechToTextTool", "EdenAiTextModerationTool", "EdenAiTextToSpeechTool", "EdenaiTool", "ElevenLabsText2SpeechTool", "ExtractHyperlinksTool", "ExtractTextTool", "FileSearchTool", "GetElementsTool", "GmailCreateDraft", "GmailGetMessage", "GmailGetThread", "GmailSearch", "GmailSendMessage", "GoogleCloudTextToSpeechTool", "GooglePlacesTool", "GoogleSearchResults", "GoogleSearchRun", "GoogleSerperResults", "GoogleSerperRun", "SearchAPIResults", "SearchAPIRun", "HumanInputRun", "IFTTTWebhook", "InfoPowerBITool", "InfoSQLDatabaseTool", "InfoSparkSQLTool", "JiraAction", "JsonGetValueTool", "JsonListKeysTool", "ListDirectoryTool", "ListPowerBITool", "ListSQLDatabaseTool", "ListSparkSQLTool", "MerriamWebsterQueryRun", "MetaphorSearchResults", "MoveFileTool", "NasaAction", "NavigateBackTool", "NavigateTool", "O365CreateDraftMessage", "O365SearchEmails", "O365SearchEvents", "O365SendEvent", "O365SendMessage", "OpenAPISpec", "OpenWeatherMapQueryRun", "PubmedQueryRun", "RedditSearchRun", "QueryCheckerTool", "QueryPowerBITool", "QuerySQLCheckerTool", "QuerySQLDataBaseTool", "QuerySparkSQLTool", "ReadFileTool", "RequestsDeleteTool", "RequestsGetTool", "RequestsPatchTool", "RequestsPostTool", "RequestsPutTool", "SteamWebAPIQueryRun", "SceneXplainTool", "SearxSearchResults", "SearxSearchRun", "ShellTool", "SlackGetChannel", "SlackGetMessage", "SlackScheduleMessage", "SlackSendMessage", "SleepTool", "StdInInquireTool", "StackExchangeTool", "SteamshipImageGenerationTool", "StructuredTool", "Tool", "VectorStoreQATool", "VectorStoreQAWithSourcesTool", "WikipediaQueryRun", "WolframAlphaQueryRun", "WriteFileTool", "YahooFinanceNewsTool", "YouTubeSearchTool", "ZapierNLAListActions", "ZapierNLARunAction", "format_tool_to_openai_function", "tool", ]
[ "langchain.utils.interactive_env.is_interactive_env" ]
[((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (2198, 2548), False, 'import warnings\n')]
"""**Tools** are classes that an Agent uses to interact with the world. Each tool has a **description**. Agent uses the description to choose the right tool for the job. **Class hierarchy:** .. code-block:: ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool <name> # Examples: BraveSearch, HumanInputRun **Main helpers:** .. code-block:: CallbackManagerForToolRun, AsyncCallbackManagerForToolRun """ import warnings from typing import Any from langchain_core._api import LangChainDeprecationWarning from langchain_core.tools import BaseTool, StructuredTool, Tool, tool from langchain.utils.interactive_env import is_interactive_env # Used for internal purposes _DEPRECATED_TOOLS = {"PythonAstREPLTool", "PythonREPLTool"} def _import_python_tool_PythonAstREPLTool() -> Any: raise ImportError( "This tool has been moved to langchain experiment. " "This tool has access to a python REPL. " "For best practices make sure to sandbox this tool. " "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " "To keep using this code as is, install langchain experimental and " "update relevant imports replacing 'langchain' with 'langchain_experimental'" ) def _import_python_tool_PythonREPLTool() -> Any: raise ImportError( "This tool has been moved to langchain experiment. " "This tool has access to a python REPL. " "For best practices make sure to sandbox this tool. " "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " "To keep using this code as is, install langchain experimental and " "update relevant imports replacing 'langchain' with 'langchain_experimental'" ) def __getattr__(name: str) -> Any: if name == "PythonAstREPLTool": return _import_python_tool_PythonAstREPLTool() elif name == "PythonREPLTool": return _import_python_tool_PythonREPLTool() else: from langchain_community import tools # If not in interactive env, raise warning. if not is_interactive_env(): warnings.warn( "Importing tools from langchain is deprecated. Importing from " "langchain will no longer be supported as of langchain==0.2.0. " "Please import from langchain-community instead:\n\n" f"`from langchain_community.tools import {name}`.\n\n" "To install langchain-community run " "`pip install -U langchain-community`.", category=LangChainDeprecationWarning, ) return getattr(tools, name) __all__ = [ "AINAppOps", "AINOwnerOps", "AINRuleOps", "AINTransfer", "AINValueOps", "AIPluginTool", "APIOperation", "ArxivQueryRun", "AzureCogsFormRecognizerTool", "AzureCogsImageAnalysisTool", "AzureCogsSpeech2TextTool", "AzureCogsText2SpeechTool", "AzureCogsTextAnalyticsHealthTool", "BaseGraphQLTool", "BaseRequestsTool", "BaseSQLDatabaseTool", "BaseSparkSQLTool", "BaseTool", "BearlyInterpreterTool", "BingSearchResults", "BingSearchRun", "BraveSearch", "ClickTool", "CopyFileTool", "CurrentWebPageTool", "DeleteFileTool", "DuckDuckGoSearchResults", "DuckDuckGoSearchRun", "E2BDataAnalysisTool", "EdenAiExplicitImageTool", "EdenAiObjectDetectionTool", "EdenAiParsingIDTool", "EdenAiParsingInvoiceTool", "EdenAiSpeechToTextTool", "EdenAiTextModerationTool", "EdenAiTextToSpeechTool", "EdenaiTool", "ElevenLabsText2SpeechTool", "ExtractHyperlinksTool", "ExtractTextTool", "FileSearchTool", "GetElementsTool", "GmailCreateDraft", "GmailGetMessage", "GmailGetThread", "GmailSearch", "GmailSendMessage", "GoogleCloudTextToSpeechTool", "GooglePlacesTool", "GoogleSearchResults", "GoogleSearchRun", "GoogleSerperResults", "GoogleSerperRun", "SearchAPIResults", "SearchAPIRun", "HumanInputRun", "IFTTTWebhook", "InfoPowerBITool", "InfoSQLDatabaseTool", "InfoSparkSQLTool", "JiraAction", "JsonGetValueTool", "JsonListKeysTool", "ListDirectoryTool", "ListPowerBITool", "ListSQLDatabaseTool", "ListSparkSQLTool", "MerriamWebsterQueryRun", "MetaphorSearchResults", "MoveFileTool", "NasaAction", "NavigateBackTool", "NavigateTool", "O365CreateDraftMessage", "O365SearchEmails", "O365SearchEvents", "O365SendEvent", "O365SendMessage", "OpenAPISpec", "OpenWeatherMapQueryRun", "PubmedQueryRun", "RedditSearchRun", "QueryCheckerTool", "QueryPowerBITool", "QuerySQLCheckerTool", "QuerySQLDataBaseTool", "QuerySparkSQLTool", "ReadFileTool", "RequestsDeleteTool", "RequestsGetTool", "RequestsPatchTool", "RequestsPostTool", "RequestsPutTool", "SteamWebAPIQueryRun", "SceneXplainTool", "SearxSearchResults", "SearxSearchRun", "ShellTool", "SlackGetChannel", "SlackGetMessage", "SlackScheduleMessage", "SlackSendMessage", "SleepTool", "StdInInquireTool", "StackExchangeTool", "SteamshipImageGenerationTool", "StructuredTool", "Tool", "VectorStoreQATool", "VectorStoreQAWithSourcesTool", "WikipediaQueryRun", "WolframAlphaQueryRun", "WriteFileTool", "YahooFinanceNewsTool", "YouTubeSearchTool", "ZapierNLAListActions", "ZapierNLARunAction", "format_tool_to_openai_function", "tool", ]
[ "langchain.utils.interactive_env.is_interactive_env" ]
[((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (2198, 2548), False, 'import warnings\n')]
"""**Tools** are classes that an Agent uses to interact with the world. Each tool has a **description**. Agent uses the description to choose the right tool for the job. **Class hierarchy:** .. code-block:: ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool <name> # Examples: BraveSearch, HumanInputRun **Main helpers:** .. code-block:: CallbackManagerForToolRun, AsyncCallbackManagerForToolRun """ import warnings from typing import Any from langchain_core._api import LangChainDeprecationWarning from langchain_core.tools import BaseTool, StructuredTool, Tool, tool from langchain.utils.interactive_env import is_interactive_env # Used for internal purposes _DEPRECATED_TOOLS = {"PythonAstREPLTool", "PythonREPLTool"} def _import_python_tool_PythonAstREPLTool() -> Any: raise ImportError( "This tool has been moved to langchain experiment. " "This tool has access to a python REPL. " "For best practices make sure to sandbox this tool. " "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " "To keep using this code as is, install langchain experimental and " "update relevant imports replacing 'langchain' with 'langchain_experimental'" ) def _import_python_tool_PythonREPLTool() -> Any: raise ImportError( "This tool has been moved to langchain experiment. " "This tool has access to a python REPL. " "For best practices make sure to sandbox this tool. " "Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md " "To keep using this code as is, install langchain experimental and " "update relevant imports replacing 'langchain' with 'langchain_experimental'" ) def __getattr__(name: str) -> Any: if name == "PythonAstREPLTool": return _import_python_tool_PythonAstREPLTool() elif name == "PythonREPLTool": return _import_python_tool_PythonREPLTool() else: from langchain_community import tools # If not in interactive env, raise warning. if not is_interactive_env(): warnings.warn( "Importing tools from langchain is deprecated. Importing from " "langchain will no longer be supported as of langchain==0.2.0. " "Please import from langchain-community instead:\n\n" f"`from langchain_community.tools import {name}`.\n\n" "To install langchain-community run " "`pip install -U langchain-community`.", category=LangChainDeprecationWarning, ) return getattr(tools, name) __all__ = [ "AINAppOps", "AINOwnerOps", "AINRuleOps", "AINTransfer", "AINValueOps", "AIPluginTool", "APIOperation", "ArxivQueryRun", "AzureCogsFormRecognizerTool", "AzureCogsImageAnalysisTool", "AzureCogsSpeech2TextTool", "AzureCogsText2SpeechTool", "AzureCogsTextAnalyticsHealthTool", "BaseGraphQLTool", "BaseRequestsTool", "BaseSQLDatabaseTool", "BaseSparkSQLTool", "BaseTool", "BearlyInterpreterTool", "BingSearchResults", "BingSearchRun", "BraveSearch", "ClickTool", "CopyFileTool", "CurrentWebPageTool", "DeleteFileTool", "DuckDuckGoSearchResults", "DuckDuckGoSearchRun", "E2BDataAnalysisTool", "EdenAiExplicitImageTool", "EdenAiObjectDetectionTool", "EdenAiParsingIDTool", "EdenAiParsingInvoiceTool", "EdenAiSpeechToTextTool", "EdenAiTextModerationTool", "EdenAiTextToSpeechTool", "EdenaiTool", "ElevenLabsText2SpeechTool", "ExtractHyperlinksTool", "ExtractTextTool", "FileSearchTool", "GetElementsTool", "GmailCreateDraft", "GmailGetMessage", "GmailGetThread", "GmailSearch", "GmailSendMessage", "GoogleCloudTextToSpeechTool", "GooglePlacesTool", "GoogleSearchResults", "GoogleSearchRun", "GoogleSerperResults", "GoogleSerperRun", "SearchAPIResults", "SearchAPIRun", "HumanInputRun", "IFTTTWebhook", "InfoPowerBITool", "InfoSQLDatabaseTool", "InfoSparkSQLTool", "JiraAction", "JsonGetValueTool", "JsonListKeysTool", "ListDirectoryTool", "ListPowerBITool", "ListSQLDatabaseTool", "ListSparkSQLTool", "MerriamWebsterQueryRun", "MetaphorSearchResults", "MoveFileTool", "NasaAction", "NavigateBackTool", "NavigateTool", "O365CreateDraftMessage", "O365SearchEmails", "O365SearchEvents", "O365SendEvent", "O365SendMessage", "OpenAPISpec", "OpenWeatherMapQueryRun", "PubmedQueryRun", "RedditSearchRun", "QueryCheckerTool", "QueryPowerBITool", "QuerySQLCheckerTool", "QuerySQLDataBaseTool", "QuerySparkSQLTool", "ReadFileTool", "RequestsDeleteTool", "RequestsGetTool", "RequestsPatchTool", "RequestsPostTool", "RequestsPutTool", "SteamWebAPIQueryRun", "SceneXplainTool", "SearxSearchResults", "SearxSearchRun", "ShellTool", "SlackGetChannel", "SlackGetMessage", "SlackScheduleMessage", "SlackSendMessage", "SleepTool", "StdInInquireTool", "StackExchangeTool", "SteamshipImageGenerationTool", "StructuredTool", "Tool", "VectorStoreQATool", "VectorStoreQAWithSourcesTool", "WikipediaQueryRun", "WolframAlphaQueryRun", "WriteFileTool", "YahooFinanceNewsTool", "YouTubeSearchTool", "ZapierNLAListActions", "ZapierNLARunAction", "format_tool_to_openai_function", "tool", ]
[ "langchain.utils.interactive_env.is_interactive_env" ]
[((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""'], {'category': 'LangChainDeprecationWarning'}), '(\n f"""Importing tools from langchain is deprecated. Importing from langchain will no longer be supported as of langchain==0.2.0. Please import from langchain-community instead:\n\n`from langchain_community.tools import {name}`.\n\nTo install langchain-community run `pip install -U langchain-community`."""\n , category=LangChainDeprecationWarning)\n', (2198, 2548), False, 'import warnings\n')]
from functools import partial from typing import Optional from langchain_core.callbacks.manager import ( Callbacks, ) from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.retrievers import BaseRetriever from langchain.tools import Tool class RetrieverInput(BaseModel): """Input to the retriever.""" query: str = Field(description="query to look up in retriever") def _get_relevant_documents( query: str, retriever: BaseRetriever, document_prompt: BasePromptTemplate, document_separator: str, callbacks: Callbacks = None, ) -> str: docs = retriever.get_relevant_documents(query, callbacks=callbacks) return document_separator.join( format_document(doc, document_prompt) for doc in docs ) async def _aget_relevant_documents( query: str, retriever: BaseRetriever, document_prompt: BasePromptTemplate, document_separator: str, callbacks: Callbacks = None, ) -> str: docs = await retriever.aget_relevant_documents(query, callbacks=callbacks) return document_separator.join( format_document(doc, document_prompt) for doc in docs ) def create_retriever_tool( retriever: BaseRetriever, name: str, description: str, *, document_prompt: Optional[BasePromptTemplate] = None, document_separator: str = "\n\n", ) -> Tool: """Create a tool to do retrieval of documents. Args: retriever: The retriever to use for the retrieval name: The name for the tool. This will be passed to the language model, so should be unique and somewhat descriptive. description: The description for the tool. This will be passed to the language model, so should be descriptive. Returns: Tool class to pass to an agent """ document_prompt = document_prompt or PromptTemplate.from_template("{page_content}") func = partial( _get_relevant_documents, retriever=retriever, document_prompt=document_prompt, document_separator=document_separator, ) afunc = partial( _aget_relevant_documents, retriever=retriever, document_prompt=document_prompt, document_separator=document_separator, ) return Tool( name=name, description=description, func=func, coroutine=afunc, args_schema=RetrieverInput, )
[ "langchain.tools.Tool", "langchain_core.prompts.format_document", "langchain_core.prompts.PromptTemplate.from_template", "langchain_core.pydantic_v1.Field" ]
[((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_get_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2003, 2126), False, 'from functools import partial\n'), ((2173, 2304), 'functools.partial', 'partial', (['_aget_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_aget_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2180, 2304), False, 'from functools import partial\n'), ((2350, 2450), 'langchain.tools.Tool', 'Tool', ([], {'name': 'name', 'description': 'description', 'func': 'func', 'coroutine': 'afunc', 'args_schema': 'RetrieverInput'}), '(name=name, description=description, func=func, coroutine=afunc,\n args_schema=RetrieverInput)\n', (2354, 2450), False, 'from langchain.tools import Tool\n'), ((1938, 1984), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""{page_content}"""'], {}), "('{page_content}')\n", (1966, 1984), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((796, 833), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (811, 833), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((1176, 1213), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (1191, 1213), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n')]
from functools import partial from typing import Optional from langchain_core.callbacks.manager import ( Callbacks, ) from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.retrievers import BaseRetriever from langchain.tools import Tool class RetrieverInput(BaseModel): """Input to the retriever.""" query: str = Field(description="query to look up in retriever") def _get_relevant_documents( query: str, retriever: BaseRetriever, document_prompt: BasePromptTemplate, document_separator: str, callbacks: Callbacks = None, ) -> str: docs = retriever.get_relevant_documents(query, callbacks=callbacks) return document_separator.join( format_document(doc, document_prompt) for doc in docs ) async def _aget_relevant_documents( query: str, retriever: BaseRetriever, document_prompt: BasePromptTemplate, document_separator: str, callbacks: Callbacks = None, ) -> str: docs = await retriever.aget_relevant_documents(query, callbacks=callbacks) return document_separator.join( format_document(doc, document_prompt) for doc in docs ) def create_retriever_tool( retriever: BaseRetriever, name: str, description: str, *, document_prompt: Optional[BasePromptTemplate] = None, document_separator: str = "\n\n", ) -> Tool: """Create a tool to do retrieval of documents. Args: retriever: The retriever to use for the retrieval name: The name for the tool. This will be passed to the language model, so should be unique and somewhat descriptive. description: The description for the tool. This will be passed to the language model, so should be descriptive. Returns: Tool class to pass to an agent """ document_prompt = document_prompt or PromptTemplate.from_template("{page_content}") func = partial( _get_relevant_documents, retriever=retriever, document_prompt=document_prompt, document_separator=document_separator, ) afunc = partial( _aget_relevant_documents, retriever=retriever, document_prompt=document_prompt, document_separator=document_separator, ) return Tool( name=name, description=description, func=func, coroutine=afunc, args_schema=RetrieverInput, )
[ "langchain.tools.Tool", "langchain_core.prompts.format_document", "langchain_core.prompts.PromptTemplate.from_template", "langchain_core.pydantic_v1.Field" ]
[((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_get_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2003, 2126), False, 'from functools import partial\n'), ((2173, 2304), 'functools.partial', 'partial', (['_aget_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_aget_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2180, 2304), False, 'from functools import partial\n'), ((2350, 2450), 'langchain.tools.Tool', 'Tool', ([], {'name': 'name', 'description': 'description', 'func': 'func', 'coroutine': 'afunc', 'args_schema': 'RetrieverInput'}), '(name=name, description=description, func=func, coroutine=afunc,\n args_schema=RetrieverInput)\n', (2354, 2450), False, 'from langchain.tools import Tool\n'), ((1938, 1984), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""{page_content}"""'], {}), "('{page_content}')\n", (1966, 1984), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((796, 833), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (811, 833), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((1176, 1213), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (1191, 1213), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n')]
from functools import partial from typing import Optional from langchain_core.callbacks.manager import ( Callbacks, ) from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.retrievers import BaseRetriever from langchain.tools import Tool class RetrieverInput(BaseModel): """Input to the retriever.""" query: str = Field(description="query to look up in retriever") def _get_relevant_documents( query: str, retriever: BaseRetriever, document_prompt: BasePromptTemplate, document_separator: str, callbacks: Callbacks = None, ) -> str: docs = retriever.get_relevant_documents(query, callbacks=callbacks) return document_separator.join( format_document(doc, document_prompt) for doc in docs ) async def _aget_relevant_documents( query: str, retriever: BaseRetriever, document_prompt: BasePromptTemplate, document_separator: str, callbacks: Callbacks = None, ) -> str: docs = await retriever.aget_relevant_documents(query, callbacks=callbacks) return document_separator.join( format_document(doc, document_prompt) for doc in docs ) def create_retriever_tool( retriever: BaseRetriever, name: str, description: str, *, document_prompt: Optional[BasePromptTemplate] = None, document_separator: str = "\n\n", ) -> Tool: """Create a tool to do retrieval of documents. Args: retriever: The retriever to use for the retrieval name: The name for the tool. This will be passed to the language model, so should be unique and somewhat descriptive. description: The description for the tool. This will be passed to the language model, so should be descriptive. Returns: Tool class to pass to an agent """ document_prompt = document_prompt or PromptTemplate.from_template("{page_content}") func = partial( _get_relevant_documents, retriever=retriever, document_prompt=document_prompt, document_separator=document_separator, ) afunc = partial( _aget_relevant_documents, retriever=retriever, document_prompt=document_prompt, document_separator=document_separator, ) return Tool( name=name, description=description, func=func, coroutine=afunc, args_schema=RetrieverInput, )
[ "langchain.tools.Tool", "langchain_core.prompts.format_document", "langchain_core.prompts.PromptTemplate.from_template", "langchain_core.pydantic_v1.Field" ]
[((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_get_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2003, 2126), False, 'from functools import partial\n'), ((2173, 2304), 'functools.partial', 'partial', (['_aget_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_aget_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2180, 2304), False, 'from functools import partial\n'), ((2350, 2450), 'langchain.tools.Tool', 'Tool', ([], {'name': 'name', 'description': 'description', 'func': 'func', 'coroutine': 'afunc', 'args_schema': 'RetrieverInput'}), '(name=name, description=description, func=func, coroutine=afunc,\n args_schema=RetrieverInput)\n', (2354, 2450), False, 'from langchain.tools import Tool\n'), ((1938, 1984), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""{page_content}"""'], {}), "('{page_content}')\n", (1966, 1984), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((796, 833), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (811, 833), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((1176, 1213), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (1191, 1213), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n')]
from functools import partial from typing import Optional from langchain_core.callbacks.manager import ( Callbacks, ) from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.retrievers import BaseRetriever from langchain.tools import Tool class RetrieverInput(BaseModel): """Input to the retriever.""" query: str = Field(description="query to look up in retriever") def _get_relevant_documents( query: str, retriever: BaseRetriever, document_prompt: BasePromptTemplate, document_separator: str, callbacks: Callbacks = None, ) -> str: docs = retriever.get_relevant_documents(query, callbacks=callbacks) return document_separator.join( format_document(doc, document_prompt) for doc in docs ) async def _aget_relevant_documents( query: str, retriever: BaseRetriever, document_prompt: BasePromptTemplate, document_separator: str, callbacks: Callbacks = None, ) -> str: docs = await retriever.aget_relevant_documents(query, callbacks=callbacks) return document_separator.join( format_document(doc, document_prompt) for doc in docs ) def create_retriever_tool( retriever: BaseRetriever, name: str, description: str, *, document_prompt: Optional[BasePromptTemplate] = None, document_separator: str = "\n\n", ) -> Tool: """Create a tool to do retrieval of documents. Args: retriever: The retriever to use for the retrieval name: The name for the tool. This will be passed to the language model, so should be unique and somewhat descriptive. description: The description for the tool. This will be passed to the language model, so should be descriptive. Returns: Tool class to pass to an agent """ document_prompt = document_prompt or PromptTemplate.from_template("{page_content}") func = partial( _get_relevant_documents, retriever=retriever, document_prompt=document_prompt, document_separator=document_separator, ) afunc = partial( _aget_relevant_documents, retriever=retriever, document_prompt=document_prompt, document_separator=document_separator, ) return Tool( name=name, description=description, func=func, coroutine=afunc, args_schema=RetrieverInput, )
[ "langchain.tools.Tool", "langchain_core.prompts.format_document", "langchain_core.prompts.PromptTemplate.from_template", "langchain_core.pydantic_v1.Field" ]
[((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_get_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2003, 2126), False, 'from functools import partial\n'), ((2173, 2304), 'functools.partial', 'partial', (['_aget_relevant_documents'], {'retriever': 'retriever', 'document_prompt': 'document_prompt', 'document_separator': 'document_separator'}), '(_aget_relevant_documents, retriever=retriever, document_prompt=\n document_prompt, document_separator=document_separator)\n', (2180, 2304), False, 'from functools import partial\n'), ((2350, 2450), 'langchain.tools.Tool', 'Tool', ([], {'name': 'name', 'description': 'description', 'func': 'func', 'coroutine': 'afunc', 'args_schema': 'RetrieverInput'}), '(name=name, description=description, func=func, coroutine=afunc,\n args_schema=RetrieverInput)\n', (2354, 2450), False, 'from langchain.tools import Tool\n'), ((1938, 1984), 'langchain_core.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""{page_content}"""'], {}), "('{page_content}')\n", (1966, 1984), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((796, 833), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (811, 833), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n'), ((1176, 1213), 'langchain_core.prompts.format_document', 'format_document', (['doc', 'document_prompt'], {}), '(doc, document_prompt)\n', (1191, 1213), False, 'from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document\n')]