Update app.py
Browse files
app.py
CHANGED
@@ -52,10 +52,9 @@ encode_kwargs = {'normalize_embeddings': True}
|
|
52 |
|
53 |
from llama_index.core import Settings
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
llm= LLM = G4FLLM(
|
60 |
model=models.gpt_35_turbo_16k,
|
61 |
)
|
@@ -63,13 +62,13 @@ llm= LLM = G4FLLM(
|
|
63 |
Settings.llm = LangChainLLM(llm=llm)
|
64 |
#embed_model=embed_model)
|
65 |
|
66 |
-
Settings.service_context = ServiceContext.from_defaults(chunk_size=5512, llm=llm, embed_model=embed_model )
|
67 |
|
68 |
|
69 |
# rebuild storage context
|
70 |
storage_context = StorageContext.from_defaults(persist_dir="./storage")
|
71 |
# load index
|
72 |
-
index = load_index_from_storage(storage_context
|
73 |
|
74 |
"""
|
75 |
query_engine = index.as_query_engine()
|
|
|
52 |
|
53 |
from llama_index.core import Settings
|
54 |
|
55 |
+
embed_model = InstructorEmbedding(model_name=model_name)
|
56 |
+
Settings.embed_model = embed_model
|
57 |
+
Settings.chunk_size = 512
|
|
|
58 |
llm= LLM = G4FLLM(
|
59 |
model=models.gpt_35_turbo_16k,
|
60 |
)
|
|
|
62 |
Settings.llm = LangChainLLM(llm=llm)
|
63 |
#embed_model=embed_model)
|
64 |
|
65 |
+
#Settings.service_context = ServiceContext.from_defaults(chunk_size=5512, llm=llm, embed_model=embed_model )
|
66 |
|
67 |
|
68 |
# rebuild storage context
|
69 |
storage_context = StorageContext.from_defaults(persist_dir="./storage")
|
70 |
# load index
|
71 |
+
index = load_index_from_storage(storage_context) #service_context =service_context)
|
72 |
|
73 |
"""
|
74 |
query_engine = index.as_query_engine()
|