legalrci / app.py
Docfile's picture
Update app.py
1c7ede8 verified
raw
history blame
2.36 kB
import gradio as gr
import os
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
GOOGLE_API_KEY = "AIzaSyDYhyRoOWBJWOb4bqY5wmFLrBo4HTwQDko" # add your GOOGLE API key here
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from g4f import Provider, models
from langchain.llms.base import LLM
from llama_index.llms.langchain import LangChainLLM
from langchain_g4f import G4FLLM
from llama_index.core import (
ServiceContext,
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
set_global_service_context,
)
#from llama_index.llms import Gemini
from llama_index.embeddings.gemini import GeminiEmbedding
import g4f
g4f.debug.logging = True
from llama_index.core import Settings
llm= LLM = G4FLLM(
model=models.gpt_35_turbo_16k,
)
llm = LangChainLLM(llm=llm)
model_name = "models/embedding-001"
#llm = Gemini()
embed_model = GeminiEmbedding(
model_name=model_name, api_key=GOOGLE_API_KEY, title="this is a document"
)
Settings.embed_model = embed_model
# Reads pdfs at "./" path
documents = (
SimpleDirectoryReader(
input_dir = 'data',
required_exts = [".pdf"])
.load_data()
)
# ServiceContext is a bundle of commonly used
# resources used during the indexing and
# querying stage
service_context = (
ServiceContext
.from_defaults(
llm=llm,
embed_model=embed_model,
chunk_size=545
)
)
set_global_service_context(service_context)
print("node passer11")
# Node represents a “chunk” of a source Document
nodes = (
service_context
.node_parser
.get_nodes_from_documents(documents)
)
print("node passer")
# offers core abstractions around storage of Nodes,
# indices, and vectors
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
print("node passer")
# Create the vectorstore index
index = (
VectorStoreIndex
.from_documents(
documents,
storage_context=storage_context,
llm=llm
)
)
print("node passer")
query_engine = index.as_query_engine()
# Query the index
def greet(name):
response = query_engine.query(name)
print(response)
return response
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()