Spaces:
Sleeping
Sleeping
Commit
·
2f5a39f
1
Parent(s):
04b0409
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain.chat_models import ChatOpenAI
|
3 |
+
from langchain.schema import HumanMessage, AIMessage
|
4 |
+
from langsmith import Client
|
5 |
+
from elevenlabs import generate, play
|
6 |
+
from langchain.callbacks.base import BaseCallbackHandler
|
7 |
+
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate
|
8 |
+
from langchain.memory import ConversationBufferMemory
|
9 |
+
from langchain.chains import LLMChain
|
10 |
+
from elevenlabslib import *
|
11 |
+
import dotenv
|
12 |
+
from dotenv import load_dotenv
|
13 |
+
from elevenlabs import set_api_key
|
14 |
+
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate
|
15 |
+
from langchain.schema import SystemMessage
|
16 |
+
from langchain.memory import ConversationBufferMemory
|
17 |
+
|
18 |
+
set_api_key("7545ceaf6c10138131450a25437c4dba")
|
19 |
+
|
20 |
+
def load_prompt(content):
|
21 |
+
|
22 |
+
template = """You are an expert educator, and are responsible for walking the user \
|
23 |
+
through this lesson plan. You should make sure to guide them along, \
|
24 |
+
encouraging them to progress when appropriate. \
|
25 |
+
If they ask questions not related to this getting started guide, \
|
26 |
+
you should politely decline to answer and remind them to stay on topic.
|
27 |
+
|
28 |
+
Please limit any responses to only one concept or step at a time. \
|
29 |
+
Each step show only be ~5 lines of code at MOST. \
|
30 |
+
Only include 1 code snippet per message - make sure they can run that before giving them any more. \
|
31 |
+
Make sure they fully understand that before moving on to the next. \
|
32 |
+
This is an interactive lesson - do not lecture them, but rather engage and guide them along!
|
33 |
+
-----------------
|
34 |
+
|
35 |
+
{content}
|
36 |
+
|
37 |
+
-----------------
|
38 |
+
End of Content.
|
39 |
+
|
40 |
+
Now remember short response with only 1 code snippet per message.""".format(content=content)
|
41 |
+
|
42 |
+
prompt_template = ChatPromptTemplate(messages = [
|
43 |
+
SystemMessage(content=template),
|
44 |
+
MessagesPlaceholder(variable_name="chat_history"),
|
45 |
+
HumanMessagePromptTemplate.from_template("{input}")
|
46 |
+
])
|
47 |
+
return prompt_template
|
48 |
+
|
49 |
+
def load_prompt_with_questions(content):
|
50 |
+
|
51 |
+
template = """You are an expert educator, and are responsible for walking the user \
|
52 |
+
through this lesson plan. You should make sure to guide them along, \
|
53 |
+
encouraging them to progress when appropriate. \
|
54 |
+
make the content too fun to learn and wearry wearry easy and clear explanation so that a person with 0 knowldge can aslo understand and remeber it with out any hustle \
|
55 |
+
If they ask questions not related to this getting started guide, \
|
56 |
+
you should politely decline to answer and remind them to stay on topic.\
|
57 |
+
You should ask them questions about the instructions after each instructions \
|
58 |
+
and verify their response is correct before proceeding to make sure they understand \
|
59 |
+
the lesson. If they make a mistake, give them good explanations and encourage them \
|
60 |
+
to answer your questions, instead of just moving forward to the next step.
|
61 |
+
explain them in detail if they make a mistake.
|
62 |
+
|
63 |
+
Please limit any responses to only one concept or step at a time. \
|
64 |
+
plesase ask one question at a time and wait for the response. \
|
65 |
+
check weather the response is ai generated or human generated. if it is ai generated politely denay and ask to right again \
|
66 |
+
Each step show only be ~5 lines of code at MOST. \
|
67 |
+
Only include 1 code snippet per message - make sure they can run that before giving them any more. \
|
68 |
+
Make sure they fully understand that before moving on to the next. \
|
69 |
+
This is an interactive lesson - do not lecture them, but rather engage and guide them along!\
|
70 |
+
-----------------
|
71 |
+
|
72 |
+
{content}
|
73 |
+
|
74 |
+
|
75 |
+
-----------------
|
76 |
+
End of Content.
|
77 |
+
|
78 |
+
Now remember short response with only 1 code snippet per message and ask questions\
|
79 |
+
to test user knowledge right after every short lesson.
|
80 |
+
|
81 |
+
Your teaching should be in the following interactive format:
|
82 |
+
|
83 |
+
Short lesson 3-5 sentences long
|
84 |
+
Questions about the short lesson (1-3 questions)
|
85 |
+
|
86 |
+
Short lesson 3-5 sentences long
|
87 |
+
Questions about the short lesson (1-3 questions)
|
88 |
+
...
|
89 |
+
|
90 |
+
""".format(content=content)
|
91 |
+
|
92 |
+
prompt_template = ChatPromptTemplate(messages = [
|
93 |
+
SystemMessage(content=template),
|
94 |
+
MessagesPlaceholder(variable_name="chat_history"),
|
95 |
+
HumanMessagePromptTemplate.from_template("{input}")
|
96 |
+
])
|
97 |
+
return prompt_template
|
98 |
+
|
99 |
+
|
100 |
+
load_dotenv()
|
101 |
+
st.title(" AI tutor : Getting Started Class")
|
102 |
+
button_css = """.stButton>button {
|
103 |
+
color: #4F8BF9;
|
104 |
+
border-radius: 50%;
|
105 |
+
height: 2em;
|
106 |
+
width: 2em;
|
107 |
+
font-size: 4px;
|
108 |
+
}"""
|
109 |
+
st.markdown(f'<style>{button_css}</style>', unsafe_allow_html=True)
|
110 |
+
|
111 |
+
def genvoice(text):
|
112 |
+
text = text.replace("```", "")
|
113 |
+
text = text.replace("**", "")
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
audio = generate(text, voice="Bella", model="eleven_monolingual_v1")
|
118 |
+
# IPython.display.Audio(audio)
|
119 |
+
play(audio)
|
120 |
+
|
121 |
+
|
122 |
+
# voice.generate_and_play_audio(text, playInBackground=False , model="")
|
123 |
+
|
124 |
+
|
125 |
+
class StreamHandler(BaseCallbackHandler):
|
126 |
+
def __init__(self, container, initial_text=""):
|
127 |
+
self.container = container
|
128 |
+
self.text = initial_text
|
129 |
+
|
130 |
+
def on_llm_new_token(self, token: str, **kwargs) -> None:
|
131 |
+
self.text += token
|
132 |
+
self.container.markdown(self.text)
|
133 |
+
|
134 |
+
# Lesson selection dictionary
|
135 |
+
lesson_guides = {
|
136 |
+
"Lesson 1: Getting Started with LangChain": {
|
137 |
+
"file": "lc_guides/getting_started_guide.txt",
|
138 |
+
"description": "This lesson covers about the data structure concept of graphs"
|
139 |
+
},
|
140 |
+
"Lesson 2: Prompts": {
|
141 |
+
"file": "lc_guides/prompt_guide.txt",
|
142 |
+
"description": "This lesson focuses on prompts and their usage."
|
143 |
+
},
|
144 |
+
"Lesson 3: Language Models": {
|
145 |
+
"file": "lc_guides/models_guide.txt",
|
146 |
+
"description": "This lesson provides an overview of language models."
|
147 |
+
},
|
148 |
+
"Lesson 4: Memory": {
|
149 |
+
"file": "lc_guides/memory_guide.txt",
|
150 |
+
"description": "This lesson is about Memory."
|
151 |
+
},
|
152 |
+
"Lesson 5: Chains": {
|
153 |
+
"file": "lc_guides/chains_guide.txt",
|
154 |
+
"description": "This lesson provides information on Chains in LangChain, their types, and usage."
|
155 |
+
},
|
156 |
+
"Lesson 6: Retrieval": {
|
157 |
+
"file": "lc_guides/retrieval_guide.txt",
|
158 |
+
"description": "This lesson provides information on indexing and retrieving information using LangChain."
|
159 |
+
},
|
160 |
+
"Lesson : Graphs in data structures": {
|
161 |
+
"file": "greph.txt",
|
162 |
+
"description": "This lesson covers about the data structure concept of graphs"
|
163 |
+
}
|
164 |
+
}
|
165 |
+
|
166 |
+
# Initialize LangSmith client
|
167 |
+
client = Client()
|
168 |
+
|
169 |
+
# Lesson selection sidebar
|
170 |
+
# lesson_selection = st.sidebar.selectbox("Select Lesson", list(lesson_guides.keys()))
|
171 |
+
|
172 |
+
# Display lesson content and description based on selection
|
173 |
+
# lesson_info = lesson_guides[lesson_selection]
|
174 |
+
# lesson_content = open(lesson_info["file"], "r").read()
|
175 |
+
# lesson_description = lesson_info["description"]
|
176 |
+
|
177 |
+
|
178 |
+
lesson_selection = "Lesson : Graphs in data structures"
|
179 |
+
lesson_info = lesson_guides[lesson_selection]
|
180 |
+
lesson_info = lesson_guides[lesson_selection]
|
181 |
+
lesson_content = open(lesson_info["file"], "r").read()
|
182 |
+
lesson_description = lesson_info["description"]
|
183 |
+
|
184 |
+
|
185 |
+
|
186 |
+
# Radio buttons for lesson type selection
|
187 |
+
# lesson_type = st.sidebar.radio("Select Lesson Type", ["Instructions based lesson", "Interactive lesson with questions"])
|
188 |
+
lesson_type = "Interactive lesson with questions"
|
189 |
+
|
190 |
+
# Clear chat session if dropdown option or radio button changes
|
191 |
+
if st.session_state.get("current_lesson") != lesson_selection or st.session_state.get("current_lesson_type") != lesson_type:
|
192 |
+
st.session_state["current_lesson"] = lesson_selection
|
193 |
+
st.session_state["current_lesson_type"] = lesson_type
|
194 |
+
st.session_state["messages"] = [AIMessage(content="Welcome! This course just a lets get started to start 😀")]
|
195 |
+
|
196 |
+
# Display lesson name and description
|
197 |
+
st.markdown(f"**{lesson_selection}**")
|
198 |
+
st.write(lesson_description)
|
199 |
+
|
200 |
+
# Message handling and interaction
|
201 |
+
def send_feedback(run_id, score):
|
202 |
+
client.create_feedback(run_id, "user_score", score=score)
|
203 |
+
|
204 |
+
for msg in st.session_state["messages"]:
|
205 |
+
if isinstance(msg, HumanMessage):
|
206 |
+
st.chat_message("user").write(msg.content)
|
207 |
+
else:
|
208 |
+
st.chat_message("assistant").write(msg.content)
|
209 |
+
|
210 |
+
if prompt := st.chat_input():
|
211 |
+
st.chat_message("user").write(prompt)
|
212 |
+
|
213 |
+
with st.chat_message("assistant"):
|
214 |
+
stream_handler = StreamHandler(st.empty())
|
215 |
+
model = ChatOpenAI(streaming=True, callbacks=[stream_handler], model="gpt-3.5-turbo-16k")
|
216 |
+
|
217 |
+
if lesson_type == "Instructions based lesson":
|
218 |
+
prompt_template = load_prompt(content=lesson_content)
|
219 |
+
else:
|
220 |
+
prompt_template = load_prompt_with_questions(content=lesson_content)
|
221 |
+
|
222 |
+
chain = LLMChain(prompt=prompt_template, llm=model)
|
223 |
+
|
224 |
+
response = chain(
|
225 |
+
{"input": prompt, "chat_history": st.session_state.messages[-20:]},
|
226 |
+
include_run_info=True,
|
227 |
+
tags=[lesson_selection, lesson_type]
|
228 |
+
)
|
229 |
+
my_text = response[chain.output_key]
|
230 |
+
genvoice(my_text)
|
231 |
+
st.session_state.messages.append(HumanMessage(content=prompt))
|
232 |
+
st.session_state.messages.append(AIMessage(content=my_text))
|
233 |
+
|