Docfile commited on
Commit
314250b
·
verified ·
1 Parent(s): e3cca40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -5
app.py CHANGED
@@ -8,17 +8,24 @@ logging.basicConfig(stream=sys.stdout, level=logging.INFO)
8
  logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
9
 
10
 
11
- from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
12
-
 
 
 
 
 
 
13
  #from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceInstructEmbeddings
14
- from langchain_community.embeddings import HuggingFaceInstructEmbeddings
15
  #from llama_index.embeddings.huggingface import HuggingFaceInstructEmbeddings
16
  from g4f import Provider, models
17
 
18
  from langchain.llms.base import LLM
19
- from llama_index.llms import LangChainLLM
20
  from langchain_g4f import G4FLLM
21
  #from llama_index.embeddings.huggingface import HuggingFaceEmbedding
 
22
  from llama_index.core import Settings
23
  nest_asyncio.apply()
24
  g4f.debug.logging = True # Enable logging
@@ -55,7 +62,6 @@ llm = LangChainLLM(llm=llm)
55
 
56
  service_context = ServiceContext.from_defaults(chunk_size=5512, llm=llm, embed_model=embed_model )
57
 
58
- from llama_index import StorageContext, load_index_from_storage
59
 
60
  # rebuild storage context
61
  storage_context = StorageContext.from_defaults(persist_dir="./storage")
 
8
  logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
9
 
10
 
11
+ from llama_index.core import (
12
+ ServiceContext,
13
+ SimpleDirectoryReader,
14
+ StorageContext,
15
+ VectorStoreIndex,
16
+ load_index_from_storage,
17
+ set_global_service_context,
18
+ )
19
  #from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceInstructEmbeddings
20
+ #from langchain_community.embeddings import HuggingFaceInstructEmbeddings
21
  #from llama_index.embeddings.huggingface import HuggingFaceInstructEmbeddings
22
  from g4f import Provider, models
23
 
24
  from langchain.llms.base import LLM
25
+ from llama_index.llms.langchain import LangChainLLM
26
  from langchain_g4f import G4FLLM
27
  #from llama_index.embeddings.huggingface import HuggingFaceEmbedding
28
+ from llama_index.embeddings.huggingface import HuggingFaceInstructEmbeddings
29
  from llama_index.core import Settings
30
  nest_asyncio.apply()
31
  g4f.debug.logging = True # Enable logging
 
62
 
63
  service_context = ServiceContext.from_defaults(chunk_size=5512, llm=llm, embed_model=embed_model )
64
 
 
65
 
66
  # rebuild storage context
67
  storage_context = StorageContext.from_defaults(persist_dir="./storage")