import gradio as gr from g4f import Provider, models from langchain.llms.base import LLM import asyncio import nest_asyncio from llama_index import ServiceContext, LLMPredictor, PromptHelper from llama_index.text_splitter import TokenTextSplitter from llama_index.node_parser import SimpleNodeParser from langchain.embeddings import HuggingFaceEmbeddings from llama_index import SimpleDirectoryReader from gradio import Interface nest_asyncio.apply() embed_model = HuggingFaceEmbeddings( model_name="sentence-transformers/all-mpnet-base-v2" ) node_parser = SimpleNodeParser.from_defaults(text_splitter=TokenTextSplitter(chunk_size=1024, chunk_overlap=20)) prompt_helper = PromptHelper( context_window=4096, num_output=256, chunk_overlap_ratio=0.1, chunk_size_limit=None ) from langchain_g4f import G4FLLM async def main(question): llm: LLM = G4FLLM( model=models.gpt_35_turbo, provider=Provider.DeepAi, ) from llama_index.llms import LangChainLLM llm = LangChainLLM(llm=llm) service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model, node_parser=node_parser, prompt_helper=prompt_helper) documents = SimpleDirectoryReader("data/").load_data() index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context) query_engine = index.as_query_engine(service_context=service_context) response = query_engine.query(question) return response iface = Interface(fn=main, inputs="text", outputs="text") iface.launch()