File size: 8,956 Bytes
2f5a39f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, AIMessage
from langsmith import Client
from elevenlabs import generate, play
from langchain.callbacks.base import BaseCallbackHandler
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain.chains import LLMChain
from elevenlabslib import *
import dotenv
from dotenv import load_dotenv
from elevenlabs import set_api_key
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate
from langchain.schema import SystemMessage
from langchain.memory import ConversationBufferMemory

set_api_key("7545ceaf6c10138131450a25437c4dba")

def load_prompt(content):

	template = """You are an expert educator, and are responsible for walking the user \
	through this lesson plan. You should make sure to guide them along, \
	encouraging them to progress when appropriate. \
	If they ask questions not related to this getting started guide, \
	you should politely decline to answer and remind them to stay on topic.

	Please limit any responses to only one concept or step at a time. \
	Each step show only be ~5 lines of code at MOST. \
	Only include 1 code snippet per message - make sure they can run that before giving them any more. \
	Make sure they fully understand that before moving on to the next. \
	This is an interactive lesson - do not lecture them, but rather engage and guide them along!
	-----------------

	{content}
	
	-----------------
	End of Content.

	Now remember short response with only 1 code snippet per message.""".format(content=content)

	prompt_template = ChatPromptTemplate(messages = [
		SystemMessage(content=template), 
		MessagesPlaceholder(variable_name="chat_history"), 
		HumanMessagePromptTemplate.from_template("{input}")
		])
	return prompt_template

def load_prompt_with_questions(content):

	template = """You are an expert educator, and are responsible for walking the user \
	through this lesson plan. You should make sure to guide them along, \
	encouraging them to progress when appropriate. \
		make the content too fun to learn and wearry wearry easy and clear explanation so that a person with 0 knowldge can aslo understand and remeber it with out any hustle \
	If they ask questions not related to this getting started guide, \
	you should politely decline to answer and remind them to stay on topic.\
	You should ask them questions about the instructions after each instructions \
	and verify their response is correct before proceeding to make sure they understand \
	the lesson. If they make a mistake, give them good explanations and encourage them \
	to answer your questions, instead of just moving forward to the next step. 
	explain them in detail if they make a mistake.

	Please limit any responses to only one concept or step at a time. \
	plesase ask one question at a time and wait for the response. \
	check weather the response is ai generated or human generated. if it is ai generated politely denay and ask to right again \
	Each step show only be ~5 lines of code at MOST. \
	Only include 1 code snippet per message - make sure they can run that before giving them any more. \
	Make sure they fully understand that before moving on to the next. \
	This is an interactive lesson - do not lecture them, but rather engage and guide them along!\
	-----------------

	{content}


	-----------------
	End of Content.

	Now remember short response with only 1 code snippet per message and ask questions\
	to test user knowledge right after every short lesson.
	
	Your teaching should be in the following interactive format:
	
	Short lesson 3-5 sentences long
	Questions about the short lesson (1-3 questions)

	Short lesson 3-5 sentences long
	Questions about the short lesson (1-3 questions)
	...

	 """.format(content=content)

	prompt_template = ChatPromptTemplate(messages = [
		SystemMessage(content=template), 
		MessagesPlaceholder(variable_name="chat_history"), 
		HumanMessagePromptTemplate.from_template("{input}")
		])
	return prompt_template


load_dotenv()
st.title(" AI tutor : Getting Started Class")
button_css = """.stButton>button {
    color: #4F8BF9;
    border-radius: 50%;
    height: 2em;
    width: 2em;
    font-size: 4px;
}"""
st.markdown(f'<style>{button_css}</style>', unsafe_allow_html=True)

def genvoice(text):
    text = text.replace("```", "")
    text = text.replace("**", "")



    audio = generate(text, voice="Bella", model="eleven_monolingual_v1")
    # IPython.display.Audio(audio)
    play(audio)
        

    # voice.generate_and_play_audio(text, playInBackground=False , model="")


class StreamHandler(BaseCallbackHandler):
    def __init__(self, container, initial_text=""):
        self.container = container
        self.text = initial_text

    def on_llm_new_token(self, token: str, **kwargs) -> None:
        self.text += token
        self.container.markdown(self.text)

# Lesson selection dictionary
lesson_guides = {
    "Lesson 1: Getting Started with LangChain": {
        "file": "lc_guides/getting_started_guide.txt",
        "description": "This lesson covers about the data structure concept of graphs"
    },
    "Lesson 2: Prompts": {
        "file": "lc_guides/prompt_guide.txt",
        "description": "This lesson focuses on prompts and their usage."
    },
    "Lesson 3: Language Models": {
        "file": "lc_guides/models_guide.txt",
        "description": "This lesson provides an overview of language models."
    },
    "Lesson 4: Memory": {
        "file": "lc_guides/memory_guide.txt",
        "description": "This lesson is about Memory."
    },
    "Lesson 5: Chains": {
        "file": "lc_guides/chains_guide.txt",
        "description": "This lesson provides information on Chains in LangChain, their types, and usage."
    },
    "Lesson 6: Retrieval": {
        "file": "lc_guides/retrieval_guide.txt",
        "description": "This lesson provides information on indexing and retrieving information using LangChain."
    },
    "Lesson : Graphs in data structures": {
        "file": "greph.txt",
        "description": "This lesson covers about the data structure concept of graphs"
    }
}

# Initialize LangSmith client
client = Client()

# Lesson selection sidebar
# lesson_selection = st.sidebar.selectbox("Select Lesson", list(lesson_guides.keys()))

# Display lesson content and description based on selection
# lesson_info = lesson_guides[lesson_selection]
# lesson_content = open(lesson_info["file"], "r").read()
# lesson_description = lesson_info["description"]


lesson_selection = "Lesson : Graphs in data structures"
lesson_info = lesson_guides[lesson_selection]
lesson_info = lesson_guides[lesson_selection]
lesson_content = open(lesson_info["file"], "r").read()
lesson_description = lesson_info["description"]



# Radio buttons for lesson type selection
# lesson_type = st.sidebar.radio("Select Lesson Type", ["Instructions based lesson", "Interactive lesson with questions"])
lesson_type = "Interactive lesson with questions"

# Clear chat session if dropdown option or radio button changes
if st.session_state.get("current_lesson") != lesson_selection or st.session_state.get("current_lesson_type") != lesson_type:
    st.session_state["current_lesson"] = lesson_selection
    st.session_state["current_lesson_type"] = lesson_type
    st.session_state["messages"] = [AIMessage(content="Welcome! This course just a lets get started to start πŸ˜€")]

# Display lesson name and description
st.markdown(f"**{lesson_selection}**")
st.write(lesson_description)

# Message handling and interaction
def send_feedback(run_id, score):
    client.create_feedback(run_id, "user_score", score=score)

for msg in st.session_state["messages"]:
    if isinstance(msg, HumanMessage):
        st.chat_message("user").write(msg.content)
    else:
        st.chat_message("assistant").write(msg.content)

if prompt := st.chat_input():
    st.chat_message("user").write(prompt)

    with st.chat_message("assistant"):
        stream_handler = StreamHandler(st.empty())
        model = ChatOpenAI(streaming=True, callbacks=[stream_handler], model="gpt-3.5-turbo-16k")

        if lesson_type == "Instructions based lesson":
            prompt_template = load_prompt(content=lesson_content)
        else:
            prompt_template = load_prompt_with_questions(content=lesson_content)

        chain = LLMChain(prompt=prompt_template, llm=model)

        response = chain(
            {"input": prompt, "chat_history": st.session_state.messages[-20:]},
            include_run_info=True,
            tags=[lesson_selection, lesson_type]
        )
        my_text = response[chain.output_key]
        genvoice(my_text)
        st.session_state.messages.append(HumanMessage(content=prompt))
        st.session_state.messages.append(AIMessage(content=my_text))