Update app.py
Browse files
app.py
CHANGED
@@ -11,6 +11,9 @@ from llama_index.text_splitter import TokenTextSplitter
|
|
11 |
from llama_index.node_parser import SimpleNodeParser
|
12 |
from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceInstructEmbeddings
|
13 |
from llama_index import SimpleDirectoryReader, VectorStoreIndex
|
|
|
|
|
|
|
14 |
from gradio import Interface
|
15 |
nest_asyncio.apply()
|
16 |
from huggingface_hub import hf_hub_download
|
@@ -38,10 +41,11 @@ prompt_helper = PromptHelper(
|
|
38 |
from langchain_g4f import G4FLLM
|
39 |
|
40 |
async def main(question):
|
41 |
-
llm =
|
42 |
-
|
|
|
43 |
)
|
44 |
-
|
45 |
|
46 |
llm = LangChainLLM(llm=llm)
|
47 |
|
|
|
11 |
from llama_index.node_parser import SimpleNodeParser
|
12 |
from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceInstructEmbeddings
|
13 |
from llama_index import SimpleDirectoryReader, VectorStoreIndex
|
14 |
+
from g4f import Provider, models
|
15 |
+
from langchain.llms.base import LLM
|
16 |
+
from llama_index.llms import LangChainLLM
|
17 |
from gradio import Interface
|
18 |
nest_asyncio.apply()
|
19 |
from huggingface_hub import hf_hub_download
|
|
|
41 |
from langchain_g4f import G4FLLM
|
42 |
|
43 |
async def main(question):
|
44 |
+
llm : LLM = G4FLLM(
|
45 |
+
model=models.gpt_35_turbo,
|
46 |
+
provider=Provider.Acytoo,
|
47 |
)
|
48 |
+
|
49 |
|
50 |
llm = LangChainLLM(llm=llm)
|
51 |
|