Docfile commited on
Commit
999e17e
Β·
1 Parent(s): 5a48709

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -0
app.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from g4f import Provider, models
3
+ from langchain.llms.base import LLM
4
+ import asyncio
5
+ import nest_asyncio
6
+ from llama_index import ServiceContext, LLMPredictor, PromptHelper
7
+ from llama_index.text_splitter import TokenTextSplitter
8
+ from llama_index.node_parser import SimpleNodeParser
9
+ from langchain.embeddings import HuggingFaceEmbeddings
10
+ from llama_index import SimpleDirectoryReader
11
+ from gradio import Interface
12
+ nest_asyncio.apply()
13
+
14
+ embed_model = HuggingFaceEmbeddings(
15
+ model_name="sentence-transformers/all-mpnet-base-v2"
16
+ )
17
+ node_parser = SimpleNodeParser.from_defaults(text_splitter=TokenTextSplitter(chunk_size=1024, chunk_overlap=20))
18
+ prompt_helper = PromptHelper(
19
+ context_window=4096,
20
+ num_output=256,
21
+ chunk_overlap_ratio=0.1,
22
+ chunk_size_limit=None
23
+ )
24
+
25
+ from langchain_g4f import G4FLLM
26
+
27
+ async def main(question):
28
+ llm: LLM = G4FLLM(
29
+ model=models.gpt_35_turbo,
30
+ provider=Provider.DeepAi,
31
+ )
32
+ from llama_index.llms import LangChainLLM
33
+
34
+ llm = LangChainLLM(llm=llm)
35
+
36
+ service_context = ServiceContext.from_defaults(llm=llm,
37
+ embed_model=embed_model,
38
+ node_parser=node_parser,
39
+ prompt_helper=prompt_helper)
40
+
41
+ documents = SimpleDirectoryReader("data/").load_data()
42
+ index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
43
+ query_engine = index.as_query_engine(service_context=service_context)
44
+ response = query_engine.query(question)
45
+ return response
46
+
47
+ iface = Interface(fn=main, inputs="text", outputs="text")
48
+ iface.launch()