import logging import sys import gradio as gr import asyncio import nest_asyncio logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext from llama_index.llms import HuggingFaceLLM from langchain.embeddings import HuggingFaceEmbeddings from g4f import Provider, models from langchain.llms.base import LLM from llama_index.llms import LangChainLLM from langchain_g4f import G4FLLM nest_asyncio.apply() documents = SimpleDirectoryReader('data').load_data() embed_model = HuggingFaceEmbeddings( model_name="hkunlp/instructor-large" ) llm= LLM = G4FLLM( model=models.gpt_35_turbo, provider=Provider.Acytoo, ) llm = LangChainLLM(llm=llm) service_context = ServiceContext.from_defaults(chunk_size=512, llm=llm, embed_model=embed_model) index = VectorStoreIndex.from_documents(documents, service_context=service_context) async def main(query): query_engine = index.as_query_engine() response = query_engine.query(query) print(response) return response iface = gr.Interface(fn=main, inputs="text", outputs="text") iface.launch()