File size: 2,827 Bytes
e961a39 b5baa16 e961a39 b8c00d0 314250b 0706992 314250b 4c5783c e961a39 b550edf e961a39 314250b e961a39 e3cca40 314250b e3cca40 e961a39 7009b46 1982296 11f5b31 7009b46 0706992 0bf30f8 e961a39 c3c4b99 e961a39 0706992 43b3d54 0706992 e3cca40 0706992 3d4c41f e961a39 7009b46 e961a39 614124f e961a39 70315c9 f7a4fca b8c00d0 e59d3ab 70315c9 b8c00d0 70315c9 1f22aa0 e961a39 f1bb85a e961a39 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
import logging
import sys
import gradio as gr
import asyncio
import nest_asyncio
import g4f
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
ServiceContext,
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
load_index_from_storage,
set_global_service_context,
)
#from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceInstructEmbeddings
#from langchain_community.embeddings import HuggingFaceInstructEmbeddings
#from llama_index.embeddings.huggingface import HuggingFaceInstructEmbeddings
from g4f import Provider, models
from langchain.llms.base import LLM
from llama_index.llms.langchain import LangChainLLM
from langchain_g4f import G4FLLM
#from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.embeddings.huggingface import HuggingFaceInstructEmbeddings
from llama_index.core import Settings
nest_asyncio.apply()
g4f.debug.logging = True # Enable logging
#g4f.check_version = False # Disable automatic version checking
#print(g4f.version) # Check version
#print(g4f.Provider.Ails.params)
"""
#documents = SimpleDirectoryReader('data').load_data()
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
embed_model = HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl", model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
"""
#from langchain_community.embeddings import HuggingFaceInstructEmbeddings
model_name = "hkunlp/instructor-xl"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
Settings.embed_model = HuggingFaceInstructEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
llm= LLM = G4FLLM(
model=models.gpt_35_turbo_16k,
)
llm = LangChainLLM(llm=llm)
#embed_model=embed_model)
service_context = ServiceContext.from_defaults(chunk_size=5512, llm=llm, embed_model=embed_model )
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir="./storage")
# load index
index = load_index_from_storage(storage_context, service_context =service_context)
"""
query_engine = index.as_query_engine()
query_engine_tools = [
QueryEngineTool(
query_engine=query_engine,
metadata=ToolMetadata(name='legal_code_gabon', description='Data on the legal codes of Gabon')
)
]
query_engine = SubQuestionQueryEngine.from_defaults(query_engine_tools=query_engine_tools)
"""
async def main(query):
query_engine = index.as_query_engine()
response = query_engine.query(query)
print(response)
return response
iface = gr.Interface(fn=main, inputs=gr.Textbox(label="Question:", lines=4), outputs="text")
iface.launch() |