Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,15 @@
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
-
from
|
4 |
from langchain import LLMChain, PromptTemplate
|
5 |
from langchain.memory import ConversationBufferMemory
|
6 |
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
template = """You are a helpful assistant to answer all user queries.
|
10 |
{chat_history}
|
@@ -18,7 +23,7 @@ prompt = PromptTemplate(
|
|
18 |
memory = ConversationBufferMemory(memory_key="chat_history")
|
19 |
|
20 |
llm_chain = LLMChain(
|
21 |
-
llm=
|
22 |
prompt=prompt,
|
23 |
verbose=True,
|
24 |
memory=memory,
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
4 |
from langchain import LLMChain, PromptTemplate
|
5 |
from langchain.memory import ConversationBufferMemory
|
6 |
|
7 |
+
|
8 |
+
import getpass
|
9 |
+
import os
|
10 |
+
|
11 |
+
|
12 |
+
os.environ["GOOGLE_API_KEY"] = "AIzaSyBbruzn10nez-0a-_60TA9R9h6qumLD1Es"
|
13 |
|
14 |
template = """You are a helpful assistant to answer all user queries.
|
15 |
{chat_history}
|
|
|
23 |
memory = ConversationBufferMemory(memory_key="chat_history")
|
24 |
|
25 |
llm_chain = LLMChain(
|
26 |
+
llm=ChatGoogleGenerativeAI(temperature='0.5', model_name="gemini-pro"),
|
27 |
prompt=prompt,
|
28 |
verbose=True,
|
29 |
memory=memory,
|