Update app.py
Browse files
app.py
CHANGED
@@ -12,7 +12,7 @@ from llama_index.query_engine import SubQuestionQueryEngine
|
|
12 |
|
13 |
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
|
14 |
from llama_index.llms import HuggingFaceLLM
|
15 |
-
from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceInstructEmbeddings
|
16 |
#from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
17 |
from g4f import Provider, models
|
18 |
|
@@ -26,7 +26,7 @@ g4f.debug.logging = True # Enable logging
|
|
26 |
#print(g4f.version) # Check version
|
27 |
#print(g4f.Provider.Ails.params)
|
28 |
|
29 |
-
|
30 |
#documents = SimpleDirectoryReader('data').load_data()
|
31 |
model_kwargs = {'device': 'cpu'}
|
32 |
encode_kwargs = {'normalize_embeddings': True}
|
@@ -34,6 +34,17 @@ embed_model = HuggingFaceInstructEmbeddings(
|
|
34 |
model_name="hkunlp/instructor-xl", model_kwargs=model_kwargs,
|
35 |
encode_kwargs=encode_kwargs
|
36 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
llm= LLM = G4FLLM(
|
39 |
model=models.gpt_35_turbo_16k,
|
|
|
12 |
|
13 |
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
|
14 |
from llama_index.llms import HuggingFaceLLM
|
15 |
+
#from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceInstructEmbeddings
|
16 |
#from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
17 |
from g4f import Provider, models
|
18 |
|
|
|
26 |
#print(g4f.version) # Check version
|
27 |
#print(g4f.Provider.Ails.params)
|
28 |
|
29 |
+
"""
|
30 |
#documents = SimpleDirectoryReader('data').load_data()
|
31 |
model_kwargs = {'device': 'cpu'}
|
32 |
encode_kwargs = {'normalize_embeddings': True}
|
|
|
34 |
model_name="hkunlp/instructor-xl", model_kwargs=model_kwargs,
|
35 |
encode_kwargs=encode_kwargs
|
36 |
)
|
37 |
+
"""
|
38 |
+
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
39 |
+
|
40 |
+
model_name = "hkunlp/instructor-xl"
|
41 |
+
model_kwargs = {'device': 'cpu'}
|
42 |
+
encode_kwargs = {'normalize_embeddings': True}
|
43 |
+
embed_model = HuggingFaceInstructEmbeddings(
|
44 |
+
model_name=model_name,
|
45 |
+
model_kwargs=model_kwargs,
|
46 |
+
encode_kwargs=encode_kwargs
|
47 |
+
)
|
48 |
|
49 |
llm= LLM = G4FLLM(
|
50 |
model=models.gpt_35_turbo_16k,
|