sz / app.py
Docfile's picture
Update app.py
62dcade verified
import logging
import sys
import gradio as gr
import g4f
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
ServiceContext,
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
load_index_from_storage,
set_global_service_context,
)
#from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceInstructEmbeddings
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
#from llama_index.embeddings.huggingface import HuggingFaceInstructEmbeddings
from g4f import Provider, models
from langchain.llms.base import LLM
from llama_index.llms.langchain import LangChainLLM
from langchain_g4f import G4FLLM
#from llama_index.embeddings.huggingface import HuggingFaceEmbedding
#from llama_index.embeddings.huggingface import HuggingFaceInstructEmbeddings
from llama_index.embeddings.instructor import InstructorEmbedding
g4f.debug.logging = True # Enable logging
#g4f.check_version = False # Disable automatic version checking
#print(g4f.version) # Check version
#print(g4f.Provider.Ails.params)
#documents = SimpleDirectoryReader('data').load_data()
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
embed_model = HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl", model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
#from langchain_community.embeddings import HuggingFaceInstructEmbeddings
model_name = "hkunlp/instructor-xl"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
from llama_index.core import Settings
#embed_model = InstructorEmbedding(model_name)
Settings.embed_model = embed_model
#Settings.chunk_size = 512
llm= LLM = G4FLLM(
model=models.gpt_35_turbo_16k,
)
Settings.llm = LangChainLLM(llm=llm)
#embed_model=embed_model)
service_context = ServiceContext.from_defaults(chunk_size=8512, llm=llm, embed_model=embed_model )
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir="./storage")
# load index
index = load_index_from_storage(storage_context, service_context =service_context)
"""
query_engine = index.as_query_engine()
query_engine_tools = [
QueryEngineTool(
query_engine=query_engine,
metadata=ToolMetadata(name='legal_code_gabon', description='Data on the legal codes of Gabon')
)
]
query_engine = SubQuestionQueryEngine.from_defaults(query_engine_tools=query_engine_tools)
"""
def main(query):
query_engine = index.as_query_engine()
response = query_engine.query(query)
print(response)
return response
iface = gr.Interface(fn=main, inputs=gr.Textbox(label="Question:", lines=4), outputs="text")
iface.launch()