Update app.py
Browse files
app.py
CHANGED
@@ -30,14 +30,14 @@ g4f.debug.logging = True
|
|
30 |
from llama_index.core import Settings
|
31 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
32 |
|
33 |
-
|
34 |
llm= LLM = G4FLLM(
|
35 |
model=models.gpt_35_turbo_16k,
|
36 |
)
|
37 |
|
38 |
llm = LangChainLLM(llm=llm)
|
39 |
|
40 |
-
|
41 |
|
42 |
safe = [
|
43 |
{
|
@@ -60,7 +60,7 @@ safe = [
|
|
60 |
|
61 |
|
62 |
|
63 |
-
llm = Gemini(model="models/gemini-pro", safety_settings=safe)
|
64 |
|
65 |
model_name = "models/embedding-001"
|
66 |
|
@@ -102,7 +102,7 @@ service_context = (
|
|
102 |
.from_defaults(
|
103 |
llm=llm,
|
104 |
embed_model=embed_model,
|
105 |
-
chunk_size=
|
106 |
)
|
107 |
)
|
108 |
set_global_service_context(service_context)
|
@@ -129,12 +129,13 @@ index = (
|
|
129 |
)
|
130 |
)
|
131 |
print("node passer")
|
132 |
-
|
133 |
|
134 |
# Query the index
|
135 |
|
136 |
|
137 |
def greet(name):
|
|
|
138 |
response = query_engine.query(name)
|
139 |
|
140 |
print("question :",name)
|
|
|
30 |
from llama_index.core import Settings
|
31 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
32 |
|
33 |
+
|
34 |
llm= LLM = G4FLLM(
|
35 |
model=models.gpt_35_turbo_16k,
|
36 |
)
|
37 |
|
38 |
llm = LangChainLLM(llm=llm)
|
39 |
|
40 |
+
|
41 |
|
42 |
safe = [
|
43 |
{
|
|
|
60 |
|
61 |
|
62 |
|
63 |
+
#llm = Gemini(model="models/gemini-pro", safety_settings=safe)
|
64 |
|
65 |
model_name = "models/embedding-001"
|
66 |
|
|
|
102 |
.from_defaults(
|
103 |
llm=llm,
|
104 |
embed_model=embed_model,
|
105 |
+
chunk_size=8045
|
106 |
)
|
107 |
)
|
108 |
set_global_service_context(service_context)
|
|
|
129 |
)
|
130 |
)
|
131 |
print("node passer")
|
132 |
+
|
133 |
|
134 |
# Query the index
|
135 |
|
136 |
|
137 |
def greet(name):
|
138 |
+
query_engine = index.as_query_engine()
|
139 |
response = query_engine.query(name)
|
140 |
|
141 |
print("question :",name)
|