File size: 2,832 Bytes
4a49d79
324b092
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4a49d79
 
3a5a4c6
4a49d79
 
324b092
 
 
 
 
 
 
 
 
 
 
 
 
 
3a5a4c6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import chainlit as cl
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser
from langchain.chains import LLMChain

@cl.on_chat_start
async def on_chat_start():
    ##########################################################################
    # Exercise 1a:
    # Our Chainlit app should initialize the LLM chat via Langchain at the
    # start of a chat session.
    # 
    # First, we need to choose an LLM from OpenAI's list of models. Remember
    # to set streaming=True for streaming tokens
    ##########################################################################
    model = ChatOpenAI(
        model="gpt-4-1106-preview",
        streaming=True
    )

    ##########################################################################
    # Exercise 1b:
    # Next, we will need to set the prompt template for chat. Prompt templates
    # is how we set prompts and then inject informations into the prompt.
    # 
    # Please create the prompt template using ChatPromptTemplate. Use variable
    # name "question" as the variable in the template.
    # Refer to the documentation listed in the README.md file for reference.
    ##########################################################################
    prompt = ChatPromptTemplate.from_messages(
        [
            (
                "system",
                "You are Chainlit GPT, a helpful assistant.",
            ),
            (
                "human",
                "{question}"
            ),
        ]
    )
    ##########################################################################
    # Exercise 1c:
    # Now we have model and prompt, let's build our Chain. A Chain is one or a
    # series of LLM calls.We will use the default StrOutputParser to parse the
    # LLM outputs.
    ##########################################################################
    chain = LLMChain(llm=model, prompt=prompt, output_parser=StrOutputParser())

    # We are saving the chain in user_session, so we do not have to rebuild
    # it every single time.
    cl.user_session.set("chain", chain)


@cl.on_message
async def main(message: cl.Message):

    # Let's load the chain from user_session
    chain = cl.user_session.get("chain")  # type: LLMChain

    ##########################################################################
    # Exercise 1d:
    # Everytime we receive a new user message, we will get the chain from 
    # user_session. We will run the chain with user's question and return LLM
    # response to the user.
    ##########################################################################
    response = await chain.arun(
        question=message.content, callbacks=[cl.LangchainCallbackHandler()]
    )

    await cl.Message(content=response).send()