Update app.py
Browse files
app.py
CHANGED
@@ -7,13 +7,12 @@ import g4f
|
|
7 |
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
8 |
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
|
9 |
|
10 |
-
from llama_index.tools import QueryEngineTool, ToolMetadata
|
11 |
-
from llama_index.query_engine import SubQuestionQueryEngine
|
12 |
|
13 |
-
from llama_index
|
14 |
-
|
15 |
#from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceInstructEmbeddings
|
16 |
#from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
|
|
17 |
from g4f import Provider, models
|
18 |
|
19 |
from langchain.llms.base import LLM
|
@@ -35,7 +34,7 @@ embed_model = HuggingFaceInstructEmbeddings(
|
|
35 |
encode_kwargs=encode_kwargs
|
36 |
)
|
37 |
"""
|
38 |
-
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
39 |
|
40 |
model_name = "hkunlp/instructor-xl"
|
41 |
model_kwargs = {'device': 'cpu'}
|
|
|
7 |
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
8 |
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
|
9 |
|
|
|
|
|
10 |
|
11 |
+
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
|
12 |
+
|
13 |
#from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceInstructEmbeddings
|
14 |
#from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
15 |
+
from llama_index.embeddings.huggingface import HuggingFaceInstructEmbeddings
|
16 |
from g4f import Provider, models
|
17 |
|
18 |
from langchain.llms.base import LLM
|
|
|
34 |
encode_kwargs=encode_kwargs
|
35 |
)
|
36 |
"""
|
37 |
+
#from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
38 |
|
39 |
model_name = "hkunlp/instructor-xl"
|
40 |
model_kwargs = {'device': 'cpu'}
|