Spaces:
Sleeping
Sleeping
Add chainlit jp
Browse files- .chainlit/translations/ja.json +230 -0
- app_chainlit-2nd.py +15 -0
- app_chainlit-3rd.py +39 -0
- app_chainlit.py +97 -9
.chainlit/translations/ja.json
ADDED
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"components": {
|
3 |
+
"atoms": {
|
4 |
+
"buttons": {
|
5 |
+
"userButton": {
|
6 |
+
"menu": {
|
7 |
+
"settings": "設定",
|
8 |
+
"settingsKey": "S",
|
9 |
+
"APIKeys": "APIキー",
|
10 |
+
"logout": "ログアウト"
|
11 |
+
}
|
12 |
+
}
|
13 |
+
}
|
14 |
+
},
|
15 |
+
"molecules": {
|
16 |
+
"newChatButton": {
|
17 |
+
"newChat": "新しいチャット"
|
18 |
+
},
|
19 |
+
"tasklist": {
|
20 |
+
"TaskList": {
|
21 |
+
"title": "\ud83d\uddd2\ufe0f タスクリスト",
|
22 |
+
"loading": "読み込み中...",
|
23 |
+
"error": "エラーが発生しました"
|
24 |
+
}
|
25 |
+
},
|
26 |
+
"attachments": {
|
27 |
+
"cancelUpload": "アップロードをキャンセル",
|
28 |
+
"removeAttachment": "添付ファイルを削除"
|
29 |
+
},
|
30 |
+
"newChatDialog": {
|
31 |
+
"createNewChat": "新しいチャットを作成しますか?",
|
32 |
+
"clearChat": "現在のメッセージをクリアして新しいチャットを開始します。",
|
33 |
+
"cancel": "キャンセル",
|
34 |
+
"confirm": "確認"
|
35 |
+
},
|
36 |
+
"settingsModal": {
|
37 |
+
"settings": "設定",
|
38 |
+
"expandMessages": "メッセージを展開する",
|
39 |
+
"hideChainOfThought": "思考の連鎖を隠す",
|
40 |
+
"darkMode": "ダークモード"
|
41 |
+
},
|
42 |
+
"detailsButton": {
|
43 |
+
"using": "処理中",
|
44 |
+
"used": ""
|
45 |
+
},
|
46 |
+
"auth": {
|
47 |
+
"authLogin": {
|
48 |
+
"title": "アプリにアクセスするにはログインしてください。",
|
49 |
+
"form": {
|
50 |
+
"email": "メールアドレス",
|
51 |
+
"password": "パスワード",
|
52 |
+
"noAccount": "アカウントを持っていませんか?",
|
53 |
+
"alreadyHaveAccount": "すでにアカウントをお持ちですか?",
|
54 |
+
"signup": "サインアップ",
|
55 |
+
"signin": "サインイン",
|
56 |
+
"or": "または",
|
57 |
+
"continue": "ログイン",
|
58 |
+
"forgotPassword": "パスワードをお忘れですか?",
|
59 |
+
"passwordMustContain": "パスワードには以下が含まれている必要があります:",
|
60 |
+
"emailRequired": "メールアドレスは必須項目です",
|
61 |
+
"passwordRequired": "パスワードは必須項目です"
|
62 |
+
},
|
63 |
+
"error": {
|
64 |
+
"default": "サインインできません。",
|
65 |
+
"signin": "異なるアカウントでサインインしてみてください。",
|
66 |
+
"oauthsignin": "異なるアカウントでサインインしてみてください。",
|
67 |
+
"redirect_uri_mismatch": "リダイレクトURIがOAuthアプリの設定と一致していません。",
|
68 |
+
"oauthcallbackerror": "異なるアカウントでサインインしてみてください。",
|
69 |
+
"oauthcreateaccount": "異なるアカウントでサインインしてみてください。",
|
70 |
+
"emailcreateaccount": "異なるアカウントでサインインしてみてください。",
|
71 |
+
"callback": "異なるアカウントでサインインしてみてください。",
|
72 |
+
"oauthaccountnotlinked": "本人確認のため、元々使用していたアカウントでサインインしてください。",
|
73 |
+
"emailsignin": "メールの送信に失敗しました。",
|
74 |
+
"emailverify": "メールアドレスを確認してください。新しいメールが送信されました。",
|
75 |
+
"credentialssignin": "サインインに失敗しました。提供した情報が正しいか確認してください。",
|
76 |
+
"sessionrequired": "このページにアクセスするにはサインインしてください。"
|
77 |
+
}
|
78 |
+
},
|
79 |
+
"authVerifyEmail": {
|
80 |
+
"almostThere": "もう少しです!メールを送信しました:",
|
81 |
+
"verifyEmailLink": "メール内のリンクをクリックしてサインアップを完了してください。",
|
82 |
+
"didNotReceive": "メールが見つかりませんか?",
|
83 |
+
"resendEmail": "メールを再送信",
|
84 |
+
"goBack": "戻る",
|
85 |
+
"emailSent": "メールが正常に送信されました。",
|
86 |
+
"verifyEmail": "メールアドレスを確認してください"
|
87 |
+
},
|
88 |
+
"providerButton": {
|
89 |
+
"continue": "{{provider}}で続行",
|
90 |
+
"signup": "{{provider}}でサインアップ"
|
91 |
+
},
|
92 |
+
"authResetPassword": {
|
93 |
+
"newPasswordRequired": "新しいパスワードは必須項目です",
|
94 |
+
"passwordsMustMatch": "パスワードが一致しません",
|
95 |
+
"confirmPasswordRequired": "確認用パスワードは必須項目です",
|
96 |
+
"newPassword": "新しいパスワード",
|
97 |
+
"confirmPassword": "確認用パスワード",
|
98 |
+
"resetPassword": "パスワードをリセット"
|
99 |
+
},
|
100 |
+
"authForgotPassword": {
|
101 |
+
"email": "メールアドレス",
|
102 |
+
"emailRequired": "メールアドレスは必須項目です",
|
103 |
+
"emailSent": "{{email}}宛にパスワードをリセットするための手順が記載されたメールを確認してください。",
|
104 |
+
"enterEmail": "メールアドレスを入力してください。パスワードをリセットする手順をお送りします。",
|
105 |
+
"resendEmail": "メールを再送信",
|
106 |
+
"continue": "続行",
|
107 |
+
"goBack": "戻る"
|
108 |
+
}
|
109 |
+
}
|
110 |
+
},
|
111 |
+
"organisms": {
|
112 |
+
"chat": {
|
113 |
+
"history": {
|
114 |
+
"index": {
|
115 |
+
"showHistory": "履歴を表示",
|
116 |
+
"lastInputs": "最後の入力",
|
117 |
+
"noInputs": "データがありません...",
|
118 |
+
"loading": "読み込み中..."
|
119 |
+
}
|
120 |
+
},
|
121 |
+
"inputBox": {
|
122 |
+
"input": {
|
123 |
+
"placeholder": "ここにメッセージを入力..."
|
124 |
+
},
|
125 |
+
"speechButton": {
|
126 |
+
"start": "録音を開始",
|
127 |
+
"stop": "録音を停止"
|
128 |
+
},
|
129 |
+
"SubmitButton": {
|
130 |
+
"sendMessage": "メッセージを送信",
|
131 |
+
"stopTask": "タスクを停止"
|
132 |
+
},
|
133 |
+
"UploadButton": {
|
134 |
+
"attachFiles": "ファイルを添付"
|
135 |
+
},
|
136 |
+
"waterMark": {
|
137 |
+
"text": "Built with"
|
138 |
+
}
|
139 |
+
},
|
140 |
+
"Messages": {
|
141 |
+
"index": {
|
142 |
+
"running": "実行中",
|
143 |
+
"executedSuccessfully": "正常に実行されました",
|
144 |
+
"failed": "失敗",
|
145 |
+
"feedbackUpdated": "フィードバックが更新されました",
|
146 |
+
"updating": "更新中"
|
147 |
+
}
|
148 |
+
},
|
149 |
+
"dropScreen": {
|
150 |
+
"dropYourFilesHere": "ここにファイルをドロップしてください"
|
151 |
+
},
|
152 |
+
"index": {
|
153 |
+
"failedToUpload": "アップロードに失敗しました",
|
154 |
+
"cancelledUploadOf": "次のファイルのアップロードをキャンセルしました:",
|
155 |
+
"couldNotReachServer": "サーバーに接続できませんでした",
|
156 |
+
"continuingChat": "前回のチャットを続ける"
|
157 |
+
},
|
158 |
+
"settings": {
|
159 |
+
"settingsPanel": "設定パネル",
|
160 |
+
"reset": "リセット",
|
161 |
+
"cancel": "キャンセル",
|
162 |
+
"confirm": "確認"
|
163 |
+
}
|
164 |
+
},
|
165 |
+
"threadHistory": {
|
166 |
+
"sidebar": {
|
167 |
+
"filters": {
|
168 |
+
"FeedbackSelect": {
|
169 |
+
"feedbackAll": "フィードバック:全て",
|
170 |
+
"feedbackPositive": "フィードバック:ポジティブ",
|
171 |
+
"feedbackNegative": "フィードバック:ネガティブ"
|
172 |
+
},
|
173 |
+
"SearchBar": {
|
174 |
+
"search": "検索"
|
175 |
+
}
|
176 |
+
},
|
177 |
+
"DeleteThreadButton": {
|
178 |
+
"confirmMessage": "このスレッドとそのメッセージおよび要素を削除します。",
|
179 |
+
"cancel": "キャンセル",
|
180 |
+
"confirm": "確認",
|
181 |
+
"deletingChat": "チャットを削除中",
|
182 |
+
"chatDeleted": "チャットが削除されました"
|
183 |
+
},
|
184 |
+
"index": {
|
185 |
+
"pastChats": "過去のチャット"
|
186 |
+
},
|
187 |
+
"ThreadList": {
|
188 |
+
"empty": "空です...",
|
189 |
+
"today": "今日",
|
190 |
+
"yesterday": "昨日",
|
191 |
+
"previous7days": "過去7日間",
|
192 |
+
"previous30days": "過去30日間"
|
193 |
+
},
|
194 |
+
"TriggerButton": {
|
195 |
+
"closeSidebar": "サイドバーを閉じる",
|
196 |
+
"openSidebar": "サイドバーを開く"
|
197 |
+
}
|
198 |
+
},
|
199 |
+
"Thread": {
|
200 |
+
"backToChat": "チャットに戻る",
|
201 |
+
"chatCreatedOn": "チャット作成日"
|
202 |
+
}
|
203 |
+
},
|
204 |
+
"header": {
|
205 |
+
"chat": "チャット",
|
206 |
+
"readme": "Readme"
|
207 |
+
}
|
208 |
+
}
|
209 |
+
},
|
210 |
+
"hooks": {
|
211 |
+
"useLLMProviders": {
|
212 |
+
"failedToFetchProviders": "プロバイダーの取得に失敗しました:"
|
213 |
+
}
|
214 |
+
},
|
215 |
+
"pages": {
|
216 |
+
"Design": {},
|
217 |
+
"Env": {
|
218 |
+
"savedSuccessfully": "保存が成功しました",
|
219 |
+
"requiredApiKeys": "必要なAPIキー",
|
220 |
+
"requiredApiKeysInfo": "このアプリを使用するには、以下のAPIキーが必要です。キーはデバイスのローカルストレージに保存されます。"
|
221 |
+
},
|
222 |
+
"Page": {
|
223 |
+
"notPartOfProject": "このプロジェクトの一部ではありません。"
|
224 |
+
},
|
225 |
+
"ResumeButton": {
|
226 |
+
"resumeChat": "チャットを再開"
|
227 |
+
}
|
228 |
+
}
|
229 |
+
}
|
230 |
+
|
app_chainlit-2nd.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dotenv import load_dotenv
|
2 |
+
load_dotenv()
|
3 |
+
import chainlit as cl
|
4 |
+
|
5 |
+
from langchain_openai import ChatOpenAI
|
6 |
+
|
7 |
+
# We will set streaming=True so that we can stream tokens
|
8 |
+
model = ChatOpenAI(temperature=0, streaming=True)
|
9 |
+
|
10 |
+
from langchain_core.messages import HumanMessage
|
11 |
+
|
12 |
+
@cl.on_message
|
13 |
+
async def run_convo(message: cl.Message):
|
14 |
+
response = model.invoke([HumanMessage(content=message.content)])
|
15 |
+
await cl.Message(content=response.content).send()
|
app_chainlit-3rd.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_openai import ChatOpenAI
|
2 |
+
from langchain.prompts import ChatPromptTemplate
|
3 |
+
from langchain.schema import StrOutputParser
|
4 |
+
from langchain.schema.runnable import Runnable
|
5 |
+
from langchain.schema.runnable.config import RunnableConfig
|
6 |
+
from typing import cast
|
7 |
+
|
8 |
+
import chainlit as cl
|
9 |
+
|
10 |
+
|
11 |
+
@cl.on_chat_start
|
12 |
+
async def on_chat_start():
|
13 |
+
model = ChatOpenAI(streaming=True)
|
14 |
+
prompt = ChatPromptTemplate.from_messages(
|
15 |
+
[
|
16 |
+
(
|
17 |
+
"system",
|
18 |
+
"You're a very knowledgeable historian who provides accurate and eloquent answers to historical questions.",
|
19 |
+
),
|
20 |
+
("human", "{question}"),
|
21 |
+
]
|
22 |
+
)
|
23 |
+
runnable = prompt | model | StrOutputParser()
|
24 |
+
cl.user_session.set("runnable", runnable)
|
25 |
+
|
26 |
+
|
27 |
+
@cl.on_message
|
28 |
+
async def on_message(message: cl.Message):
|
29 |
+
runnable = cast(Runnable, cl.user_session.get("runnable")) # type: Runnable
|
30 |
+
|
31 |
+
msg = cl.Message(content="")
|
32 |
+
|
33 |
+
async for chunk in runnable.astream(
|
34 |
+
{"question": message.content},
|
35 |
+
config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
|
36 |
+
):
|
37 |
+
await msg.stream_token(chunk)
|
38 |
+
|
39 |
+
await msg.send()
|
app_chainlit.py
CHANGED
@@ -1,15 +1,103 @@
|
|
1 |
-
from
|
2 |
-
|
|
|
|
|
|
|
|
|
|
|
3 |
import chainlit as cl
|
4 |
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
# We will set streaming=True so that we can stream tokens
|
8 |
-
model = ChatOpenAI(temperature=0, streaming=True)
|
9 |
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
@cl.on_message
|
13 |
-
async def
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Literal
|
2 |
+
from langchain_core.tools import tool
|
3 |
+
from langchain_openai import ChatOpenAI
|
4 |
+
from langgraph.prebuilt import ToolNode
|
5 |
+
from langchain.schema.runnable.config import RunnableConfig
|
6 |
+
from langchain_core.messages import HumanMessage
|
7 |
+
|
8 |
import chainlit as cl
|
9 |
|
10 |
+
@tool
|
11 |
+
def get_weather(city: Literal["nyc", "sf"]):
|
12 |
+
"""Use this to get weather information."""
|
13 |
+
if city == "nyc":
|
14 |
+
return "It might be cloudy in nyc"
|
15 |
+
elif city == "sf":
|
16 |
+
return "It's always sunny in sf"
|
17 |
+
else:
|
18 |
+
raise AssertionError("Unknown city")
|
19 |
|
|
|
|
|
20 |
|
21 |
+
tools = [get_weather]
|
22 |
+
model = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
|
23 |
+
final_model = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
|
24 |
+
|
25 |
+
model = model.bind_tools(tools)
|
26 |
+
# NOTE: this is where we're adding a tag that we'll can use later to filter the model stream events to only the model called in the final node.
|
27 |
+
# This is not necessary if you call a single LLM but might be important in case you call multiple models within the node and want to filter events
|
28 |
+
# from only one of them.
|
29 |
+
final_model = final_model.with_config(tags=["final_node"])
|
30 |
+
tool_node = ToolNode(tools=tools)
|
31 |
+
|
32 |
+
from typing import Annotated
|
33 |
+
from typing_extensions import TypedDict
|
34 |
+
|
35 |
+
from langgraph.graph import END, StateGraph, START
|
36 |
+
from langgraph.graph.message import MessagesState
|
37 |
+
from langchain_core.messages import BaseMessage, SystemMessage, HumanMessage
|
38 |
+
|
39 |
+
|
40 |
+
def should_continue(state: MessagesState) -> Literal["tools", "final"]:
|
41 |
+
messages = state["messages"]
|
42 |
+
last_message = messages[-1]
|
43 |
+
# If the LLM makes a tool call, then we route to the "tools" node
|
44 |
+
if last_message.tool_calls:
|
45 |
+
return "tools"
|
46 |
+
# Otherwise, we stop (reply to the user)
|
47 |
+
return "final"
|
48 |
+
|
49 |
+
|
50 |
+
def call_model(state: MessagesState):
|
51 |
+
messages = state["messages"]
|
52 |
+
response = model.invoke(messages)
|
53 |
+
# We return a list, because this will get added to the existing list
|
54 |
+
return {"messages": [response]}
|
55 |
+
|
56 |
+
|
57 |
+
def call_final_model(state: MessagesState):
|
58 |
+
messages = state["messages"]
|
59 |
+
last_ai_message = messages[-1]
|
60 |
+
response = final_model.invoke(
|
61 |
+
[
|
62 |
+
SystemMessage("Rewrite this in the voice of Al Roker"),
|
63 |
+
HumanMessage(last_ai_message.content),
|
64 |
+
]
|
65 |
+
)
|
66 |
+
# overwrite the last AI message from the agent
|
67 |
+
response.id = last_ai_message.id
|
68 |
+
return {"messages": [response]}
|
69 |
+
|
70 |
+
|
71 |
+
builder = StateGraph(MessagesState)
|
72 |
+
|
73 |
+
builder.add_node("agent", call_model)
|
74 |
+
builder.add_node("tools", tool_node)
|
75 |
+
# add a separate final node
|
76 |
+
builder.add_node("final", call_final_model)
|
77 |
+
|
78 |
+
builder.add_edge(START, "agent")
|
79 |
+
builder.add_conditional_edges(
|
80 |
+
"agent",
|
81 |
+
should_continue,
|
82 |
+
)
|
83 |
+
|
84 |
+
builder.add_edge("tools", "agent")
|
85 |
+
builder.add_edge("final", END)
|
86 |
+
|
87 |
+
graph = builder.compile()
|
88 |
|
89 |
@cl.on_message
|
90 |
+
async def on_message(msg: cl.Message):
|
91 |
+
config = {"configurable": {"thread_id": cl.context.session.id}}
|
92 |
+
cb = cl.LangchainCallbackHandler()
|
93 |
+
final_answer = cl.Message(content="")
|
94 |
+
|
95 |
+
for msg, metadata in graph.stream({"messages": [HumanMessage(content=msg.content)]}, stream_mode="messages", config=RunnableConfig(callbacks=[cb], **config)):
|
96 |
+
if (
|
97 |
+
msg.content
|
98 |
+
and not isinstance(msg, HumanMessage)
|
99 |
+
and metadata["langgraph_node"] == "final"
|
100 |
+
):
|
101 |
+
await final_answer.stream_token(msg.content)
|
102 |
+
|
103 |
+
await final_answer.send()
|