Update app.py
Browse files
app.py
CHANGED
@@ -18,7 +18,8 @@ from g4f import Provider, models
|
|
18 |
from langchain.llms.base import LLM
|
19 |
from llama_index.llms import LangChainLLM
|
20 |
from langchain_g4f import G4FLLM
|
21 |
-
|
|
|
22 |
nest_asyncio.apply()
|
23 |
g4f.debug.logging = True # Enable logging
|
24 |
#g4f.check_version = False # Disable automatic version checking
|
@@ -39,7 +40,7 @@ embed_model = HuggingFaceInstructEmbeddings(
|
|
39 |
model_name = "hkunlp/instructor-xl"
|
40 |
model_kwargs = {'device': 'cpu'}
|
41 |
encode_kwargs = {'normalize_embeddings': True}
|
42 |
-
embed_model = HuggingFaceInstructEmbeddings(
|
43 |
model_name=model_name,
|
44 |
model_kwargs=model_kwargs,
|
45 |
encode_kwargs=encode_kwargs
|
|
|
18 |
from langchain.llms.base import LLM
|
19 |
from llama_index.llms import LangChainLLM
|
20 |
from langchain_g4f import G4FLLM
|
21 |
+
#from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
22 |
+
from llama_index.core import Settings
|
23 |
nest_asyncio.apply()
|
24 |
g4f.debug.logging = True # Enable logging
|
25 |
#g4f.check_version = False # Disable automatic version checking
|
|
|
40 |
model_name = "hkunlp/instructor-xl"
|
41 |
model_kwargs = {'device': 'cpu'}
|
42 |
encode_kwargs = {'normalize_embeddings': True}
|
43 |
+
Settings.embed_model = HuggingFaceInstructEmbeddings(
|
44 |
model_name=model_name,
|
45 |
model_kwargs=model_kwargs,
|
46 |
encode_kwargs=encode_kwargs
|