File size: 1,937 Bytes
2e130ee 999e17e 2e130ee 3f06da9 d549477 3f06da9 81e6cb6 3f06da9 2e130ee 999e17e 9fee862 2e130ee 0a5dc0c a292d22 0a5dc0c 3e52453 9fee862 3e52453 4983b8a a292d22 2e130ee 0fdca50 3e52453 999e17e 3e52453 2e130ee 3e52453 2e130ee 7f66f23 999e17e 2e130ee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import logging
import sys
import gradio as gr
import asyncio
import nest_asyncio
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
ServiceContext,
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
set_global_service_context,
)
#from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceInstructEmbeddings
#from llama_index.embeddings.huggingface import HuggingFaceInstructEmbeddings
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
from g4f import Provider, models
from langchain.llms.base import LLM
from llama_index.llms.langchain import LangChainLLM
from langchain_g4f import G4FLLM
nest_asyncio.apply()
"""
documents = SimpleDirectoryReader('data').load_data()
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
embed_model = HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl", model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
"""
from langchain.embeddings import HuggingFaceInstructEmbeddings
model_name = "hkunlp/instructor-large"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
embed_model = HuggingFaceInstructEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
llm= LLM = G4FLLM(
model=models.gpt_35_turbo,
provider=Provider.ChatgptAi,
)
llm = LangChainLLM(llm=llm)
service_context = ServiceContext.from_defaults(chunk_size=512, llm=llm, embed_model=embed_model)
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
async def main(query):
query_engine = index.as_query_engine()
response = query_engine.query(query)
print(response)
return response
iface = gr.Interface(fn=main, inputs="text", outputs="text")
iface.launch()
|