Update app.py
Browse files
app.py
CHANGED
@@ -49,7 +49,10 @@ embed_model = HuggingFaceInstructEmbeddings(
|
|
49 |
model_name = "hkunlp/instructor-xl"
|
50 |
model_kwargs = {'device': 'cpu'}
|
51 |
encode_kwargs = {'normalize_embeddings': True}
|
52 |
-
|
|
|
|
|
|
|
53 |
model_name=model_name
|
54 |
)
|
55 |
|
@@ -57,7 +60,7 @@ llm= LLM = G4FLLM(
|
|
57 |
model=models.gpt_35_turbo_16k,
|
58 |
)
|
59 |
|
60 |
-
llm = LangChainLLM(llm=llm)
|
61 |
#embed_model=embed_model)
|
62 |
|
63 |
service_context = ServiceContext.from_defaults(chunk_size=5512, llm=llm, embed_model=embed_model )
|
|
|
49 |
model_name = "hkunlp/instructor-xl"
|
50 |
model_kwargs = {'device': 'cpu'}
|
51 |
encode_kwargs = {'normalize_embeddings': True}
|
52 |
+
|
53 |
+
from llama_index.core import Settings
|
54 |
+
|
55 |
+
Settings.embed_model = InstructorEmbedding(
|
56 |
model_name=model_name
|
57 |
)
|
58 |
|
|
|
60 |
model=models.gpt_35_turbo_16k,
|
61 |
)
|
62 |
|
63 |
+
Settings.llm = LangChainLLM(llm=llm)
|
64 |
#embed_model=embed_model)
|
65 |
|
66 |
service_context = ServiceContext.from_defaults(chunk_size=5512, llm=llm, embed_model=embed_model )
|