Update app.py
Browse files
app.py
CHANGED
@@ -7,13 +7,14 @@ from llama_index import ServiceContext, LLMPredictor, PromptHelper
|
|
7 |
from llama_index.text_splitter import TokenTextSplitter
|
8 |
from llama_index.node_parser import SimpleNodeParser
|
9 |
from langchain.embeddings import HuggingFaceEmbeddings
|
10 |
-
from llama_index import SimpleDirectoryReader,
|
11 |
from gradio import Interface
|
12 |
nest_asyncio.apply()
|
13 |
|
14 |
embed_model = HuggingFaceEmbeddings(
|
15 |
model_name="sentence-transformers/all-mpnet-base-v2"
|
16 |
)
|
|
|
17 |
node_parser = SimpleNodeParser.from_defaults(text_splitter=TokenTextSplitter(chunk_size=1024, chunk_overlap=20))
|
18 |
prompt_helper = PromptHelper(
|
19 |
context_window=4096,
|
@@ -21,7 +22,7 @@ prompt_helper = PromptHelper(
|
|
21 |
chunk_overlap_ratio=0.1,
|
22 |
chunk_size_limit=None
|
23 |
)
|
24 |
-
|
25 |
from langchain_g4f import G4FLLM
|
26 |
|
27 |
async def main(question):
|
@@ -34,12 +35,10 @@ async def main(question):
|
|
34 |
llm = LangChainLLM(llm=llm)
|
35 |
|
36 |
service_context = ServiceContext.from_defaults(llm=llm,
|
37 |
-
embed_model=embed_model
|
38 |
-
node_parser=node_parser,
|
39 |
-
prompt_helper=prompt_helper)
|
40 |
|
41 |
documents = SimpleDirectoryReader("data/").load_data()
|
42 |
-
index =
|
43 |
query_engine = index.as_query_engine(service_context=service_context)
|
44 |
response = query_engine.query(question)
|
45 |
print(response)
|
|
|
7 |
from llama_index.text_splitter import TokenTextSplitter
|
8 |
from llama_index.node_parser import SimpleNodeParser
|
9 |
from langchain.embeddings import HuggingFaceEmbeddings
|
10 |
+
from llama_index import SimpleDirectoryReader, VectorStoreIndex
|
11 |
from gradio import Interface
|
12 |
nest_asyncio.apply()
|
13 |
|
14 |
embed_model = HuggingFaceEmbeddings(
|
15 |
model_name="sentence-transformers/all-mpnet-base-v2"
|
16 |
)
|
17 |
+
"""
|
18 |
node_parser = SimpleNodeParser.from_defaults(text_splitter=TokenTextSplitter(chunk_size=1024, chunk_overlap=20))
|
19 |
prompt_helper = PromptHelper(
|
20 |
context_window=4096,
|
|
|
22 |
chunk_overlap_ratio=0.1,
|
23 |
chunk_size_limit=None
|
24 |
)
|
25 |
+
"""
|
26 |
from langchain_g4f import G4FLLM
|
27 |
|
28 |
async def main(question):
|
|
|
35 |
llm = LangChainLLM(llm=llm)
|
36 |
|
37 |
service_context = ServiceContext.from_defaults(llm=llm,
|
38 |
+
embed_model=embed_model)
|
|
|
|
|
39 |
|
40 |
documents = SimpleDirectoryReader("data/").load_data()
|
41 |
+
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
|
42 |
query_engine = index.as_query_engine(service_context=service_context)
|
43 |
response = query_engine.query(question)
|
44 |
print(response)
|