File size: 1,822 Bytes
ea114c9 e087217 ea114c9 4405553 ea114c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import gradio as gr
import os
GOOGLE_API_KEY = "AIzaSyBbruzn10nez-0a-_60TA9R9h6qumLD1Es" # add your GOOGLE API key here
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from llama_index import (
ServiceContext,
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
set_global_service_context,
)
from llama_index.llms import Gemini
from llama_index.embeddings import GeminiEmbedding
model_name = "models/embedding-001"
llm = Gemini()
embed_model = GeminiEmbedding(
model_name=model_name, api_key=GOOGLE_API_KEY, title="this is a document"
)
# Reads pdfs at "./" path
documents = (
SimpleDirectoryReader(
input_dir = './',
required_exts = [".pdf"])
.load_data()
)
# ServiceContext is a bundle of commonly used
# resources used during the indexing and
# querying stage
service_context = (
ServiceContext
.from_defaults(
llm=llm,
embed_model=embed_model,
chunk_size=545
)
)
set_global_service_context(service_context)
print("node passer11")
# Node represents a “chunk” of a source Document
nodes = (
service_context
.node_parser
.get_nodes_from_documents(documents)
)
print("node passer")
# offers core abstractions around storage of Nodes,
# indices, and vectors
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
print("node passer")
# Create the vectorstore index
index = (
VectorStoreIndex
.from_documents(
documents,
storage_context=storage_context,
llm=llm
)
)
print("node passer")
query_engine = index.as_query_engine()
# Query the index
def greet(name):
response = query_engine.query(name)
print(response)
return response
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch() |