Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,10 @@ from g4f import Provider, models
|
|
5 |
from langchain.llms.base import LLM
|
6 |
|
7 |
from langchain_g4f import G4FLLM
|
|
|
|
|
|
|
|
|
8 |
|
9 |
|
10 |
url = "https://app.embedchain.ai/api/v1/pipelines/f14b3df8-db63-456c-8a7f-4323b4467271/context/"
|
@@ -26,8 +30,7 @@ def greet(name):
|
|
26 |
print(name)
|
27 |
c = response.text
|
28 |
llm = LLM = G4FLLM(
|
29 |
-
model=models.gpt_35_turbo
|
30 |
-
provider=Provider.GeekGpt,
|
31 |
)
|
32 |
|
33 |
res = llm(f"""
|
|
|
5 |
from langchain.llms.base import LLM
|
6 |
|
7 |
from langchain_g4f import G4FLLM
|
8 |
+
g4f.debug.logging = True # Enable logging
|
9 |
+
g4f.check_version = False # Disable automatic version checking
|
10 |
+
print(g4f.version) # Check version
|
11 |
+
print(g4f.Provider.Ails.params) # Supported args
|
12 |
|
13 |
|
14 |
url = "https://app.embedchain.ai/api/v1/pipelines/f14b3df8-db63-456c-8a7f-4323b4467271/context/"
|
|
|
30 |
print(name)
|
31 |
c = response.text
|
32 |
llm = LLM = G4FLLM(
|
33 |
+
model=models.gpt_35_turbo
|
|
|
34 |
)
|
35 |
|
36 |
res = llm(f"""
|