Spaces:
Sleeping
Sleeping
Copy to github 1st
Browse files- .gitignore +57 -0
- .replit +56 -0
- .streamlit/config.toml +34 -0
- README.md +120 -14
- README_en.md +120 -0
- agent_chat_history.py +46 -0
- attached_assets/Pasted-class-OpenRouterClient-def-init-self-api-key-self-api-key-api-key-self-1735000678229.txt +61 -0
- chat_manager.py +234 -0
- config.py +44 -0
- i18n_utils.py +30 -0
- llm_client.py +108 -0
- locales/en/translation.json +45 -0
- locales/ja/translation.json +45 -0
- main.py +261 -0
- openrouter_client.py +98 -0
- prompt_template.py +92 -0
- pyproject.toml +12 -0
- replit.nix +6 -0
- save_chat_history.py +14 -0
- templates/templates.json +14 -0
- ui_components.py +90 -0
- uv.lock +0 -0
.gitignore
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
*.so
|
6 |
+
.Python
|
7 |
+
.env
|
8 |
+
.venv
|
9 |
+
env/
|
10 |
+
venv/
|
11 |
+
ENV/
|
12 |
+
env.bak/
|
13 |
+
venv.bak/
|
14 |
+
.pytest_cache/
|
15 |
+
.coverage
|
16 |
+
htmlcov/
|
17 |
+
|
18 |
+
# Streamlit
|
19 |
+
.streamlit/secrets.toml
|
20 |
+
|
21 |
+
# Project specific
|
22 |
+
generated-icon.png
|
23 |
+
chat_export_*.md
|
24 |
+
agent_chat_*.md
|
25 |
+
*.log
|
26 |
+
|
27 |
+
# Export directory
|
28 |
+
export/
|
29 |
+
|
30 |
+
# Dependencies
|
31 |
+
.uv/
|
32 |
+
.venv/
|
33 |
+
.python-version
|
34 |
+
pip-log.txt
|
35 |
+
pip-delete-this-directory.txt
|
36 |
+
|
37 |
+
# Editor/OS
|
38 |
+
.vscode/
|
39 |
+
.idea/
|
40 |
+
*.swp
|
41 |
+
*.swo
|
42 |
+
*~
|
43 |
+
.DS_Store
|
44 |
+
Thumbs.db
|
45 |
+
|
46 |
+
# Distribution / packaging
|
47 |
+
dist/
|
48 |
+
build/
|
49 |
+
*.egg-info/
|
50 |
+
|
51 |
+
# Unit test / coverage reports
|
52 |
+
.tox/
|
53 |
+
.coverage.*
|
54 |
+
.cache
|
55 |
+
nosetests.xml
|
56 |
+
coverage.xml
|
57 |
+
*.cover
|
.replit
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
modules = ["python-3.11"]
|
2 |
+
|
3 |
+
[nix]
|
4 |
+
channel = "stable-24_05"
|
5 |
+
|
6 |
+
[deployment]
|
7 |
+
deploymentTarget = "autoscale"
|
8 |
+
run = ["sh", "-c", "python save_chat_history.py"]
|
9 |
+
|
10 |
+
[workflows]
|
11 |
+
runButton = "Project"
|
12 |
+
|
13 |
+
[[workflows.workflow]]
|
14 |
+
name = "Project"
|
15 |
+
mode = "parallel"
|
16 |
+
author = "agent"
|
17 |
+
|
18 |
+
[[workflows.workflow.tasks]]
|
19 |
+
task = "workflow.run"
|
20 |
+
args = "save_history"
|
21 |
+
|
22 |
+
[[workflows.workflow.tasks]]
|
23 |
+
task = "workflow.run"
|
24 |
+
args = "save_agent_history"
|
25 |
+
|
26 |
+
[[workflows.workflow]]
|
27 |
+
name = "save_history"
|
28 |
+
author = "agent"
|
29 |
+
|
30 |
+
[workflows.workflow.metadata]
|
31 |
+
agentRequireRestartOnSave = false
|
32 |
+
|
33 |
+
[[workflows.workflow.tasks]]
|
34 |
+
task = "packager.installForAll"
|
35 |
+
|
36 |
+
[[workflows.workflow.tasks]]
|
37 |
+
task = "shell.exec"
|
38 |
+
args = "python save_chat_history.py"
|
39 |
+
|
40 |
+
[[workflows.workflow]]
|
41 |
+
name = "save_agent_history"
|
42 |
+
author = "agent"
|
43 |
+
|
44 |
+
[workflows.workflow.metadata]
|
45 |
+
agentRequireRestartOnSave = false
|
46 |
+
|
47 |
+
[[workflows.workflow.tasks]]
|
48 |
+
task = "packager.installForAll"
|
49 |
+
|
50 |
+
[[workflows.workflow.tasks]]
|
51 |
+
task = "shell.exec"
|
52 |
+
args = "python agent_chat_history.py"
|
53 |
+
|
54 |
+
[[ports]]
|
55 |
+
localPort = 5000
|
56 |
+
externalPort = 80
|
.streamlit/config.toml
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[server]
|
2 |
+
headless = true
|
3 |
+
address = "0.0.0.0"
|
4 |
+
port = 5000
|
5 |
+
|
6 |
+
[theme]
|
7 |
+
# Primary accent color for interactive elements
|
8 |
+
primaryColor = "#FF4B4B"
|
9 |
+
|
10 |
+
# Background color for the main content area
|
11 |
+
backgroundColor = "#FFFFFF"
|
12 |
+
|
13 |
+
# Background color used for the sidebar and most interactive widgets
|
14 |
+
secondaryBackgroundColor = "#F0F2F6"
|
15 |
+
|
16 |
+
# Color used for almost all text
|
17 |
+
textColor = "#262730"
|
18 |
+
|
19 |
+
# Font family for all text in the app, except code blocks
|
20 |
+
# Accepted values (serif | sans serif | monospace)
|
21 |
+
# Default: "sans serif"
|
22 |
+
font = "sans serif"
|
23 |
+
|
24 |
+
# The preset Streamlit theme to use (light or dark)
|
25 |
+
base = "light"
|
26 |
+
|
27 |
+
# Background color for sidebar
|
28 |
+
sidebarBackgroundColor = "#F0F2F6"
|
29 |
+
|
30 |
+
# Background color for widgets
|
31 |
+
widgetBackgroundColor = "#FFFFFF"
|
32 |
+
|
33 |
+
[browser]
|
34 |
+
gatherUsageStats = false
|
README.md
CHANGED
@@ -1,14 +1,120 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# MyChatMe - マルチリンガルAIチャットアプリケーション
|
2 |
+
|
3 |
+
[](https://www.streamlit.io)
|
4 |
+
[](https://www.python.org/downloads/release/python-3120/)
|
5 |
+
[](https://opensource.org/licenses/MIT)
|
6 |
+
|
7 |
+
高度なマルチリンガルAIチャットアプリケーションで、複数の言語での対話とプロンプトテンプレート管理を実現します。
|
8 |
+
|
9 |
+
[English](README_en.md) | 日本語
|
10 |
+
|
11 |
+
## 🌟 主な機能
|
12 |
+
|
13 |
+
- 🤖 複数のAIモデルをサポート
|
14 |
+
- OpenAI GPT-4
|
15 |
+
- Google Gemini-2.0
|
16 |
+
- Anthropic Claude-3.5
|
17 |
+
- 🌐 多言語対応
|
18 |
+
- 日本語とEnglishのインターフェース
|
19 |
+
- 言語切り替え機能
|
20 |
+
- 📝 プロンプトテンプレート管理
|
21 |
+
- テンプレートの保存・編集
|
22 |
+
- カテゴリ分類
|
23 |
+
- 💾 チャット履歴
|
24 |
+
- マークダウン形式でエクスポート
|
25 |
+
- PDF出力対応
|
26 |
+
- 🎨 カスタマイズ可能なテーマ
|
27 |
+
- ⚡ レスポンシブデザイン
|
28 |
+
|
29 |
+
## 🛠️ 技術スタック
|
30 |
+
|
31 |
+
- **フロントエンド**: Streamlit
|
32 |
+
- **バックエンド**: Python 3.12
|
33 |
+
- **AI統合**:
|
34 |
+
- OpenAI API
|
35 |
+
- Google Gemini API (via OpenRouter)
|
36 |
+
- Anthropic Claude API (via OpenRouter)
|
37 |
+
- **国際化**: カスタムi18nシステム
|
38 |
+
- **データ出力**: ReportLab (PDF生成)
|
39 |
+
|
40 |
+
## 📋 必要条件
|
41 |
+
|
42 |
+
- Python 3.12以上
|
43 |
+
- OpenAI APIキー
|
44 |
+
- OpenRouter APIキー(Gemini-2.0とClaude-3.5用)
|
45 |
+
|
46 |
+
## 🚀 セットアップ
|
47 |
+
|
48 |
+
1. リポジトリのクローン:
|
49 |
+
```bash
|
50 |
+
git clone https://github.com/yourusername/mychatme.git
|
51 |
+
cd mychatme
|
52 |
+
```
|
53 |
+
|
54 |
+
2. 依存関係のインストール:
|
55 |
+
```bash
|
56 |
+
pip install -r requirements.txt
|
57 |
+
```
|
58 |
+
|
59 |
+
3. 環境変数の設定:
|
60 |
+
```bash
|
61 |
+
# .env ファイルを作成
|
62 |
+
OPENAI_API_KEY=your_openai_api_key
|
63 |
+
OPENROUTER_API_KEY=your_openrouter_api_key
|
64 |
+
```
|
65 |
+
|
66 |
+
4. アプリケーションの起動:
|
67 |
+
```bash
|
68 |
+
streamlit run main.py
|
69 |
+
```
|
70 |
+
|
71 |
+
## 💡 使用方法
|
72 |
+
|
73 |
+
1. **言語の選択**:
|
74 |
+
- サイドバーから日本語/英語を選択
|
75 |
+
|
76 |
+
2. **AIモデルの選択**:
|
77 |
+
- GPT-4, Gemini-2.0, Claude-3.5から選択
|
78 |
+
- 各モデルの特徴に応じて使い分け
|
79 |
+
|
80 |
+
3. **プロンプトテンプレート**:
|
81 |
+
- テンプレートの作成・保存
|
82 |
+
- 保存したテンプレートの呼び出し
|
83 |
+
- テンプレートの編集・削除
|
84 |
+
|
85 |
+
4. **チャット履歴の管理**:
|
86 |
+
- チャット内容のエクスポート(MD/PDF)
|
87 |
+
- タイムゾーン対応の履歴管理
|
88 |
+
|
89 |
+
5. **テーマカスタマイズ**:
|
90 |
+
- ライト/ダークモード
|
91 |
+
- カラーテーマの変更
|
92 |
+
|
93 |
+
## 🔒 セキュリティ
|
94 |
+
|
95 |
+
- API認証情報は環境変数で管理
|
96 |
+
- セッション管理による安全な状態保持
|
97 |
+
- エラーハンドリングとレート制限の実装
|
98 |
+
|
99 |
+
## 🤝 貢献
|
100 |
+
|
101 |
+
1. このリポジトリをフォーク
|
102 |
+
2. 新しいブランチを作成 (`git checkout -b feature/amazing-feature`)
|
103 |
+
3. 変更をコミット (`git commit -m 'Add amazing feature'`)
|
104 |
+
4. ブランチにプッシュ (`git push origin feature/amazing-feature`)
|
105 |
+
5. プルリクエストを作成
|
106 |
+
|
107 |
+
## 📜 ライセンス
|
108 |
+
|
109 |
+
このプロジェクトはMITライセンスの下で公開されています。詳細は [LICENSE](LICENSE) ファイルを参照してください。
|
110 |
+
|
111 |
+
## 🙏 謝辞
|
112 |
+
|
113 |
+
- [Streamlit](https://streamlit.io/) - 素晴らしいWebアプリケーションフレームワーク
|
114 |
+
- [OpenAI](https://openai.com/) - GPT-4モデルの提供
|
115 |
+
- [OpenRouter](https://openrouter.ai/) - 統合AIモデルアクセス
|
116 |
+
- [ReportLab](https://www.reportlab.com/) - PDFエクスポート機能
|
117 |
+
|
118 |
+
## 📞 サポート
|
119 |
+
|
120 |
+
問題や提案がある場合は、GitHubのIssueセクションでお知らせください。
|
README_en.md
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# MyChatMe - Multilingual AI Chat Application
|
2 |
+
|
3 |
+
[](https://www.streamlit.io)
|
4 |
+
[](https://www.python.org/downloads/release/python-3120/)
|
5 |
+
[](https://opensource.org/licenses/MIT)
|
6 |
+
|
7 |
+
An advanced multilingual AI chat application designed to streamline cross-language communication and prompt template management.
|
8 |
+
|
9 |
+
[日本語](README.md) | English
|
10 |
+
|
11 |
+
## 🌟 Key Features
|
12 |
+
|
13 |
+
- 🤖 Multiple AI Model Support
|
14 |
+
- OpenAI GPT-4
|
15 |
+
- Google Gemini-2.0
|
16 |
+
- Anthropic Claude-3.5
|
17 |
+
- 🌐 Multilingual Support
|
18 |
+
- English and Japanese interfaces
|
19 |
+
- Language switching capability
|
20 |
+
- 📝 Prompt Template Management
|
21 |
+
- Save and edit templates
|
22 |
+
- Category classification
|
23 |
+
- 💾 Chat History
|
24 |
+
- Export as Markdown
|
25 |
+
- PDF output support
|
26 |
+
- 🎨 Customizable Themes
|
27 |
+
- ⚡ Responsive Design
|
28 |
+
|
29 |
+
## 🛠️ Technology Stack
|
30 |
+
|
31 |
+
- **Frontend**: Streamlit
|
32 |
+
- **Backend**: Python 3.12
|
33 |
+
- **AI Integration**:
|
34 |
+
- OpenAI API
|
35 |
+
- Google Gemini API (via OpenRouter)
|
36 |
+
- Anthropic Claude API (via OpenRouter)
|
37 |
+
- **Internationalization**: Custom i18n system
|
38 |
+
- **Data Export**: ReportLab (PDF generation)
|
39 |
+
|
40 |
+
## 📋 Prerequisites
|
41 |
+
|
42 |
+
- Python 3.12 or higher
|
43 |
+
- OpenAI API key
|
44 |
+
- OpenRouter API key (for Gemini-2.0 and Claude-3.5)
|
45 |
+
|
46 |
+
## 🚀 Setup
|
47 |
+
|
48 |
+
1. Clone the repository:
|
49 |
+
```bash
|
50 |
+
git clone https://github.com/yourusername/mychatme.git
|
51 |
+
cd mychatme
|
52 |
+
```
|
53 |
+
|
54 |
+
2. Install dependencies:
|
55 |
+
```bash
|
56 |
+
pip install -r requirements.txt
|
57 |
+
```
|
58 |
+
|
59 |
+
3. Set environment variables:
|
60 |
+
```bash
|
61 |
+
# Create .env file
|
62 |
+
OPENAI_API_KEY=your_openai_api_key
|
63 |
+
OPENROUTER_API_KEY=your_openrouter_api_key
|
64 |
+
```
|
65 |
+
|
66 |
+
4. Start the application:
|
67 |
+
```bash
|
68 |
+
streamlit run main.py
|
69 |
+
```
|
70 |
+
|
71 |
+
## 💡 Usage
|
72 |
+
|
73 |
+
1. **Language Selection**:
|
74 |
+
- Choose English/Japanese from the sidebar
|
75 |
+
|
76 |
+
2. **AI Model Selection**:
|
77 |
+
- Choose between GPT-4, Gemini-2.0, and Claude-3.5
|
78 |
+
- Select based on each model's strengths
|
79 |
+
|
80 |
+
3. **Prompt Templates**:
|
81 |
+
- Create and save templates
|
82 |
+
- Load saved templates
|
83 |
+
- Edit and delete templates
|
84 |
+
|
85 |
+
4. **Chat History Management**:
|
86 |
+
- Export chat content (MD/PDF)
|
87 |
+
- Timezone-aware history management
|
88 |
+
|
89 |
+
5. **Theme Customization**:
|
90 |
+
- Light/Dark mode
|
91 |
+
- Color theme changes
|
92 |
+
|
93 |
+
## 🔒 Security
|
94 |
+
|
95 |
+
- API credentials managed via environment variables
|
96 |
+
- Secure session state management
|
97 |
+
- Error handling and rate limiting implementation
|
98 |
+
|
99 |
+
## 🤝 Contributing
|
100 |
+
|
101 |
+
1. Fork this repository
|
102 |
+
2. Create a new branch (`git checkout -b feature/amazing-feature`)
|
103 |
+
3. Commit your changes (`git commit -m 'Add amazing feature'`)
|
104 |
+
4. Push to the branch (`git push origin feature/amazing-feature`)
|
105 |
+
5. Create a Pull Request
|
106 |
+
|
107 |
+
## 📜 License
|
108 |
+
|
109 |
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
110 |
+
|
111 |
+
## 🙏 Acknowledgments
|
112 |
+
|
113 |
+
- [Streamlit](https://streamlit.io/) - Amazing web application framework
|
114 |
+
- [OpenAI](https://openai.com/) - GPT-4 model provider
|
115 |
+
- [OpenRouter](https://openrouter.ai/) - Unified AI model access
|
116 |
+
- [ReportLab](https://www.reportlab.com/) - PDF export functionality
|
117 |
+
|
118 |
+
## 📞 Support
|
119 |
+
|
120 |
+
If you have any issues or suggestions, please let us know in the GitHub Issues section.
|
agent_chat_history.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import datetime
|
2 |
+
import os
|
3 |
+
import pytz
|
4 |
+
from config import Config
|
5 |
+
|
6 |
+
def save_agent_chat_history(history):
|
7 |
+
"""Save agent chat history to a markdown file in the export directory."""
|
8 |
+
try:
|
9 |
+
# Create export directory if it doesn't exist
|
10 |
+
os.makedirs("export", exist_ok=True)
|
11 |
+
|
12 |
+
# Get current time in the configured timezone
|
13 |
+
tz = pytz.timezone(Config.DEFAULT_TIMEZONE)
|
14 |
+
current_time = datetime.now(tz)
|
15 |
+
|
16 |
+
filename = f"export/agent_chat_{current_time.strftime('%Y%m%d_%H%M%S')}.md"
|
17 |
+
|
18 |
+
with open(filename, "w", encoding="utf-8") as f:
|
19 |
+
f.write("# エージェントチャット履歴\n\n")
|
20 |
+
f.write(f"生成日時: {current_time.strftime('%Y-%m-%d %H:%M:%S %Z')}\n\n")
|
21 |
+
|
22 |
+
for entry in history:
|
23 |
+
if "User:" in entry:
|
24 |
+
f.write(f"## ユーザー\n{entry.replace('User:', '').strip()}\n\n")
|
25 |
+
elif "Editor:" in entry:
|
26 |
+
f.write(f"## エージェント\n{entry.replace('Editor:', '').strip()}\n\n")
|
27 |
+
|
28 |
+
return filename
|
29 |
+
except Exception as e:
|
30 |
+
raise Exception(f"エージェントチャット履歴の保存に失敗しました: {str(e)}")
|
31 |
+
|
32 |
+
if __name__ == "__main__":
|
33 |
+
# テスト用のチャット履歴
|
34 |
+
history = """
|
35 |
+
User: save_agent_chat_history の使い方を教えてください。
|
36 |
+
Editor: はい、save_agent_chat_historyの使い方を説明します。このツールは会話履歴を保存するために使用します。
|
37 |
+
|
38 |
+
User: 保存先はどこですか?
|
39 |
+
Editor: exportディレクトリに保存されます。ディレクトリが存在しない場合は自動的に作成されます。
|
40 |
+
"""
|
41 |
+
|
42 |
+
try:
|
43 |
+
saved_file = save_agent_chat_history(history.split('\n'))
|
44 |
+
print(f"チャット履歴を保存しました: {saved_file}")
|
45 |
+
except Exception as e:
|
46 |
+
print(f"エラー: {str(e)}")
|
attached_assets/Pasted-class-OpenRouterClient-def-init-self-api-key-self-api-key-api-key-self-1735000678229.txt
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class OpenRouterClient:
|
2 |
+
def __init__(self, api_key):
|
3 |
+
self.api_key = api_key
|
4 |
+
self.base_url = "https://openrouter.ai/api/v1/chat/completions"
|
5 |
+
|
6 |
+
def create(self, model, messages, response_format=None):
|
7 |
+
try:
|
8 |
+
headers = {
|
9 |
+
"Authorization": f"Bearer {self.api_key}",
|
10 |
+
"HTTP-Referer": "https://localhost:5000",
|
11 |
+
"X-Title": "ChatMe Application",
|
12 |
+
"Content-Type": "application/json"
|
13 |
+
}
|
14 |
+
|
15 |
+
data = {
|
16 |
+
"model": model,
|
17 |
+
"messages": messages
|
18 |
+
}
|
19 |
+
|
20 |
+
if response_format:
|
21 |
+
data["response_format"] = response_format
|
22 |
+
|
23 |
+
response = requests.post(
|
24 |
+
self.base_url,
|
25 |
+
headers=headers,
|
26 |
+
json=data # Use json parameter instead of data for automatic JSON encoding
|
27 |
+
)
|
28 |
+
response.raise_for_status()
|
29 |
+
|
30 |
+
response_data = response.json()
|
31 |
+
if not response_data.get('choices') or not response_data['choices'][0].get('message'):
|
32 |
+
raise ValueError(f"Invalid response format from OpenRouter: {response_data}")
|
33 |
+
|
34 |
+
return response_data['choices'][0]['message']['content']
|
35 |
+
|
36 |
+
except requests.exceptions.RequestException as e:
|
37 |
+
logger.error(f"OpenRouter API request error: {str(e)}")
|
38 |
+
return None
|
39 |
+
except (KeyError, ValueError) as e:
|
40 |
+
logger.error(f"OpenRouter API response error: {str(e)}")
|
41 |
+
return None
|
42 |
+
except Exception as e:
|
43 |
+
logger.error(f"Unexpected error in OpenRouter API call: {str(e)}")
|
44 |
+
return None
|
45 |
+
|
46 |
+
# Initialize OpenRouter client
|
47 |
+
openrouter_client = OpenRouterClient(api_key=os.environ.get("OPENROUTER_API_KEY"))
|
48 |
+
|
49 |
+
# Available LLM models
|
50 |
+
LLM_MODELS = {
|
51 |
+
"OpenAI": {
|
52 |
+
"name": "OpenAI GPT-4",
|
53 |
+
"model": "gpt-4o",
|
54 |
+
"client": "openai"
|
55 |
+
},
|
56 |
+
"OpenRouter": {
|
57 |
+
"name": "Gemini 2.0 Flash",
|
58 |
+
"model": "google/gemini-2.0-flash-exp:free",
|
59 |
+
"client": "openrouter"
|
60 |
+
}
|
61 |
+
}
|
chat_manager.py
ADDED
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Dict, Optional
|
2 |
+
from dataclasses import dataclass
|
3 |
+
from datetime import datetime
|
4 |
+
import json
|
5 |
+
from config import Config
|
6 |
+
from reportlab.lib import colors
|
7 |
+
from reportlab.lib.pagesizes import letter, A4
|
8 |
+
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
|
9 |
+
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
|
10 |
+
from reportlab.pdfbase import pdfmetrics
|
11 |
+
from reportlab.pdfbase.cidfonts import UnicodeCIDFont
|
12 |
+
import os
|
13 |
+
import pytz
|
14 |
+
|
15 |
+
@dataclass
|
16 |
+
class ChatSession:
|
17 |
+
id: str
|
18 |
+
messages: List[Dict[str, str]]
|
19 |
+
system_prompt: str
|
20 |
+
model: str
|
21 |
+
created_at: str
|
22 |
+
context_summary: Optional[str] = None
|
23 |
+
context_messages: List[Dict[str, str]] = None
|
24 |
+
|
25 |
+
def __post_init__(self):
|
26 |
+
if self.context_messages is None:
|
27 |
+
self.context_messages = []
|
28 |
+
|
29 |
+
class ChatManager:
|
30 |
+
def __init__(self):
|
31 |
+
self.current_session: ChatSession = self._create_new_session()
|
32 |
+
self.history: List[ChatSession] = []
|
33 |
+
self._setup_pdf_fonts()
|
34 |
+
|
35 |
+
def _setup_pdf_fonts(self):
|
36 |
+
"""Set up CID fonts for PDF generation with Japanese support"""
|
37 |
+
try:
|
38 |
+
# Register the Japanese font
|
39 |
+
pdfmetrics.registerFont(UnicodeCIDFont('HeiseiMin-W3'))
|
40 |
+
pdfmetrics.registerFont(UnicodeCIDFont('HeiseiKakuGo-W5'))
|
41 |
+
except Exception as e:
|
42 |
+
print(f"Warning: Could not register PDF fonts: {str(e)}")
|
43 |
+
|
44 |
+
def _format_datetime(self, dt_str: str, timezone: str = None) -> str:
|
45 |
+
"""Format datetime string according to the specified timezone"""
|
46 |
+
if not timezone:
|
47 |
+
timezone = Config.DEFAULT_TIMEZONE
|
48 |
+
|
49 |
+
try:
|
50 |
+
dt = datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S")
|
51 |
+
tz = pytz.timezone(timezone)
|
52 |
+
local_dt = pytz.utc.localize(dt).astimezone(tz)
|
53 |
+
return local_dt.strftime("%Y-%m-%d %H:%M:%S %Z")
|
54 |
+
except Exception as e:
|
55 |
+
print(f"Error formatting datetime: {str(e)}")
|
56 |
+
return dt_str
|
57 |
+
|
58 |
+
def _create_new_session(self, system_prompt: str = "", model: str = "gpt-4o") -> ChatSession:
|
59 |
+
return ChatSession(
|
60 |
+
id=datetime.now().strftime("%Y%m%d_%H%M%S"),
|
61 |
+
messages=[],
|
62 |
+
system_prompt=system_prompt,
|
63 |
+
model=model,
|
64 |
+
created_at=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
65 |
+
context_summary=None,
|
66 |
+
context_messages=[]
|
67 |
+
)
|
68 |
+
|
69 |
+
def new_chat(self, system_prompt: str, model: str) -> None:
|
70 |
+
if self.current_session.messages:
|
71 |
+
self.history.append(self.current_session)
|
72 |
+
if len(self.history) > Config.MAX_HISTORY_CHATS:
|
73 |
+
self.history.pop(0)
|
74 |
+
self.current_session = self._create_new_session(system_prompt, model)
|
75 |
+
|
76 |
+
def add_message(self, role: str, content: str) -> None:
|
77 |
+
message = {
|
78 |
+
"role": role,
|
79 |
+
"content": content,
|
80 |
+
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
81 |
+
}
|
82 |
+
self.current_session.messages.append(message)
|
83 |
+
self.current_session.context_messages.append(message)
|
84 |
+
|
85 |
+
# Keep only the most recent context window messages
|
86 |
+
if len(self.current_session.context_messages) > Config.CONTEXT_WINDOW_MESSAGES:
|
87 |
+
self.current_session.context_messages.pop(0)
|
88 |
+
|
89 |
+
def get_messages(self, include_system: bool = True, timezone: str = None) -> List[Dict[str, str]]:
|
90 |
+
messages = []
|
91 |
+
if include_system and self.current_session.system_prompt:
|
92 |
+
messages.append({
|
93 |
+
"role": "system",
|
94 |
+
"content": self.current_session.system_prompt
|
95 |
+
})
|
96 |
+
|
97 |
+
# Add context summary if available
|
98 |
+
if self.current_session.context_summary:
|
99 |
+
messages.append({
|
100 |
+
"role": "system",
|
101 |
+
"content": f"Previous conversation context: {self.current_session.context_summary}"
|
102 |
+
})
|
103 |
+
|
104 |
+
# Add recent context messages with formatted timestamps
|
105 |
+
for msg in self.current_session.context_messages:
|
106 |
+
formatted_msg = msg.copy()
|
107 |
+
if "timestamp" in formatted_msg:
|
108 |
+
formatted_msg["timestamp"] = self._format_datetime(formatted_msg["timestamp"], timezone)
|
109 |
+
messages.append(formatted_msg)
|
110 |
+
|
111 |
+
return messages
|
112 |
+
|
113 |
+
def update_context_summary(self, summary: str) -> None:
|
114 |
+
"""Update the conversation context summary."""
|
115 |
+
self.current_session.context_summary = summary
|
116 |
+
|
117 |
+
def load_chat(self, session_id: str) -> bool:
|
118 |
+
for session in self.history:
|
119 |
+
if session.id == session_id:
|
120 |
+
self.current_session = session
|
121 |
+
return True
|
122 |
+
return False
|
123 |
+
|
124 |
+
def clear_current_chat(self) -> None:
|
125 |
+
self.current_session = self._create_new_session(
|
126 |
+
self.current_session.system_prompt,
|
127 |
+
self.current_session.model
|
128 |
+
)
|
129 |
+
|
130 |
+
def export_chat_markdown(self, timezone: str = None) -> str:
|
131 |
+
"""Export current chat session as Markdown format."""
|
132 |
+
md_content = []
|
133 |
+
|
134 |
+
# Add header with timezone-aware timestamp
|
135 |
+
created_at = self._format_datetime(self.current_session.created_at, timezone)
|
136 |
+
md_content.append(f"# Chat Session - {created_at}\n")
|
137 |
+
md_content.append(f"Model: {self.current_session.model}\n")
|
138 |
+
|
139 |
+
# Add system prompt if exists
|
140 |
+
if self.current_session.system_prompt:
|
141 |
+
md_content.append("## System Prompt\n")
|
142 |
+
md_content.append(f"{self.current_session.system_prompt}\n")
|
143 |
+
|
144 |
+
# Add context summary if exists
|
145 |
+
if self.current_session.context_summary:
|
146 |
+
md_content.append("## Context Summary\n")
|
147 |
+
md_content.append(f"{self.current_session.context_summary}\n")
|
148 |
+
|
149 |
+
# Add messages with timezone-aware timestamps
|
150 |
+
md_content.append("## Messages\n")
|
151 |
+
for msg in self.current_session.messages:
|
152 |
+
role = msg["role"].title()
|
153 |
+
content = msg["content"].replace("\n", "\n ")
|
154 |
+
timestamp = self._format_datetime(msg.get("timestamp", ""), timezone)
|
155 |
+
md_content.append(f"### {role} ({timestamp})\n{content}\n")
|
156 |
+
|
157 |
+
return "\n".join(md_content)
|
158 |
+
|
159 |
+
def save_markdown_file(self, timezone: str = None) -> str:
|
160 |
+
"""Save current chat session as Markdown file and return the filename."""
|
161 |
+
try:
|
162 |
+
# Create export directory if it doesn't exist
|
163 |
+
os.makedirs("export", exist_ok=True)
|
164 |
+
|
165 |
+
filename = f"export/chat_export_{self.current_session.id}.md"
|
166 |
+
content = self.export_chat_markdown(timezone)
|
167 |
+
with open(filename, "w", encoding="utf-8") as f:
|
168 |
+
f.write(content)
|
169 |
+
return filename
|
170 |
+
except Exception as e:
|
171 |
+
raise Exception(f"Failed to save markdown file: {str(e)}")
|
172 |
+
|
173 |
+
def export_chat_pdf(self, timezone: str = None) -> str:
|
174 |
+
"""Export current chat session as PDF format."""
|
175 |
+
try:
|
176 |
+
# Create export directory if it doesn't exist
|
177 |
+
os.makedirs("export", exist_ok=True)
|
178 |
+
|
179 |
+
filename = f"export/chat_export_{self.current_session.id}.pdf"
|
180 |
+
doc = SimpleDocTemplate(filename, pagesize=A4)
|
181 |
+
styles = getSampleStyleSheet()
|
182 |
+
|
183 |
+
# Create custom styles with Japanese font support
|
184 |
+
styles.add(ParagraphStyle(
|
185 |
+
name='JapaneseText',
|
186 |
+
parent=styles['Normal'],
|
187 |
+
fontName='HeiseiMin-W3',
|
188 |
+
fontSize=10,
|
189 |
+
leading=14
|
190 |
+
))
|
191 |
+
styles.add(ParagraphStyle(
|
192 |
+
name='JapaneseHeading',
|
193 |
+
parent=styles['Heading1'],
|
194 |
+
fontName='HeiseiKakuGo-W5',
|
195 |
+
fontSize=16,
|
196 |
+
leading=20
|
197 |
+
))
|
198 |
+
|
199 |
+
story = []
|
200 |
+
|
201 |
+
# Add header with timezone-aware timestamp
|
202 |
+
created_at = self._format_datetime(self.current_session.created_at, timezone)
|
203 |
+
story.append(Paragraph(f"Chat Session - {created_at}", styles['JapaneseHeading']))
|
204 |
+
story.append(Paragraph(f"Model: {self.current_session.model}", styles['JapaneseText']))
|
205 |
+
story.append(Spacer(1, 12))
|
206 |
+
|
207 |
+
# Add system prompt if exists
|
208 |
+
if self.current_session.system_prompt:
|
209 |
+
story.append(Paragraph("System Prompt", styles['JapaneseHeading']))
|
210 |
+
story.append(Paragraph(self.current_session.system_prompt, styles['JapaneseText']))
|
211 |
+
story.append(Spacer(1, 12))
|
212 |
+
|
213 |
+
# Add context summary if exists
|
214 |
+
if self.current_session.context_summary:
|
215 |
+
story.append(Paragraph("Context Summary", styles['JapaneseHeading']))
|
216 |
+
story.append(Paragraph(self.current_session.context_summary, styles['JapaneseText']))
|
217 |
+
story.append(Spacer(1, 12))
|
218 |
+
|
219 |
+
# Add messages with timezone-aware timestamps
|
220 |
+
story.append(Paragraph("Messages", styles['JapaneseHeading']))
|
221 |
+
for msg in self.current_session.messages:
|
222 |
+
role = msg["role"].title()
|
223 |
+
content = msg["content"]
|
224 |
+
timestamp = self._format_datetime(msg.get("timestamp", ""), timezone)
|
225 |
+
|
226 |
+
story.append(Paragraph(f"{role} ({timestamp})", styles['JapaneseHeading']))
|
227 |
+
story.append(Paragraph(content, styles['JapaneseText']))
|
228 |
+
story.append(Spacer(1, 12))
|
229 |
+
|
230 |
+
doc.build(story)
|
231 |
+
return filename
|
232 |
+
|
233 |
+
except Exception as e:
|
234 |
+
raise Exception(f"Failed to save PDF file: {str(e)}")
|
config.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dataclasses import dataclass
|
3 |
+
import pytz
|
4 |
+
|
5 |
+
|
6 |
+
@dataclass
|
7 |
+
class Config:
|
8 |
+
# the newest OpenAI model is "gpt-4o" which was released May 13, 2024.
|
9 |
+
OPENAI_MODEL = "gpt-4o"
|
10 |
+
GEMINI_MODEL = "google/gemini-2.0-flash-exp:free"
|
11 |
+
# CLAUDE_MODEL = "anthropic/claude-3-sonnet:free"
|
12 |
+
CLAUDE_MODEL = "anthropic/claude-3.5-sonnet"
|
13 |
+
OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
|
14 |
+
|
15 |
+
SUPPORTED_LANGUAGES = ["en", "ja"]
|
16 |
+
DEFAULT_LANGUAGE = "ja" # Changed from "en" to "ja"
|
17 |
+
|
18 |
+
# Timezone settings
|
19 |
+
DEFAULT_TIMEZONE = "Asia/Tokyo"
|
20 |
+
SUPPORTED_TIMEZONES = [
|
21 |
+
"Asia/Tokyo",
|
22 |
+
"America/New_York",
|
23 |
+
"America/Los_Angeles",
|
24 |
+
"Europe/London",
|
25 |
+
"Europe/Paris",
|
26 |
+
"Asia/Shanghai",
|
27 |
+
"Asia/Singapore",
|
28 |
+
"Australia/Sydney",
|
29 |
+
"Pacific/Auckland"
|
30 |
+
]
|
31 |
+
|
32 |
+
# Chat context settings
|
33 |
+
MAX_HISTORY_CHATS = 10
|
34 |
+
MAX_CONTEXT_LENGTH = 4096 # Maximum token length for context
|
35 |
+
MEMORY_SUMMARY_TOKENS = 150 # Length of conversation summaries
|
36 |
+
CONTEXT_WINDOW_MESSAGES = 10 # Number of messages to keep in immediate context
|
37 |
+
|
38 |
+
@staticmethod
|
39 |
+
def get_openai_key():
|
40 |
+
return os.getenv("OPENAI_API_KEY", "")
|
41 |
+
|
42 |
+
@staticmethod
|
43 |
+
def get_openrouter_key():
|
44 |
+
return os.getenv("OPENROUTER_API_KEY", "")
|
i18n_utils.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from typing import Dict
|
4 |
+
from config import Config
|
5 |
+
|
6 |
+
class I18nManager:
|
7 |
+
_instance = None
|
8 |
+
_translations: Dict[str, Dict] = {}
|
9 |
+
_current_language = Config.DEFAULT_LANGUAGE
|
10 |
+
|
11 |
+
def __new__(cls):
|
12 |
+
if cls._instance is None:
|
13 |
+
cls._instance = super(I18nManager, cls).__new__(cls)
|
14 |
+
cls._instance._load_translations()
|
15 |
+
return cls._instance
|
16 |
+
|
17 |
+
def _load_translations(self):
|
18 |
+
for lang in Config.SUPPORTED_LANGUAGES:
|
19 |
+
file_path = f"locales/{lang}/translation.json"
|
20 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
21 |
+
self._translations[lang] = json.load(f)
|
22 |
+
|
23 |
+
def set_language(self, language: str):
|
24 |
+
if language in Config.SUPPORTED_LANGUAGES:
|
25 |
+
self._current_language = language
|
26 |
+
|
27 |
+
def get_text(self, key: str) -> str:
|
28 |
+
return self._translations.get(self._current_language, {}).get(
|
29 |
+
key, f"Missing translation: {key}"
|
30 |
+
)
|
llm_client.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import List, Dict
|
3 |
+
import openai
|
4 |
+
from config import Config
|
5 |
+
from openrouter_client import OpenRouterClient
|
6 |
+
|
7 |
+
class LLMClient:
|
8 |
+
def __init__(self):
|
9 |
+
self.openai_client = None
|
10 |
+
self.openrouter_client = None
|
11 |
+
self.test_mode = False # For testing error scenarios
|
12 |
+
self.initialize_clients()
|
13 |
+
|
14 |
+
def initialize_clients(self):
|
15 |
+
"""Initialize API clients with proper error handling"""
|
16 |
+
try:
|
17 |
+
openai_key = Config.get_openai_key()
|
18 |
+
openrouter_key = Config.get_openrouter_key()
|
19 |
+
|
20 |
+
if openai_key:
|
21 |
+
self.openai_client = openai.OpenAI(api_key=openai_key)
|
22 |
+
if openrouter_key:
|
23 |
+
self.openrouter_client = OpenRouterClient()
|
24 |
+
except Exception as e:
|
25 |
+
print(f"Error initializing API clients: {str(e)}")
|
26 |
+
|
27 |
+
def set_test_mode(self, enabled: bool = True):
|
28 |
+
"""Enable or disable test mode for simulating errors"""
|
29 |
+
self.test_mode = enabled
|
30 |
+
|
31 |
+
def chat_openai(self, messages: List[Dict[str, str]]) -> str:
|
32 |
+
"""Send chat completion request to OpenAI API"""
|
33 |
+
if not self.openai_client:
|
34 |
+
raise ValueError("OpenAI client not initialized. Please check your API key.")
|
35 |
+
|
36 |
+
if self.test_mode:
|
37 |
+
if messages and "test_error" in messages[-1].get("content", "").lower():
|
38 |
+
error_type = messages[-1]["content"].lower()
|
39 |
+
if "api_key" in error_type:
|
40 |
+
raise ValueError("Invalid API key")
|
41 |
+
elif "rate_limit" in error_type:
|
42 |
+
raise Exception("Rate limit exceeded")
|
43 |
+
elif "network" in error_type:
|
44 |
+
raise Exception("Network connection error")
|
45 |
+
|
46 |
+
try:
|
47 |
+
response = self.openai_client.chat.completions.create(
|
48 |
+
model=Config.OPENAI_MODEL,
|
49 |
+
messages=messages
|
50 |
+
)
|
51 |
+
return response.choices[0].message.content
|
52 |
+
except openai.APIError as e:
|
53 |
+
raise Exception(f"OpenAI API error: {str(e)}")
|
54 |
+
|
55 |
+
def chat_gemini(self, messages: List[Dict[str, str]]) -> str:
|
56 |
+
"""Send chat completion request to Gemini via OpenRouter"""
|
57 |
+
if not self.openrouter_client:
|
58 |
+
raise ValueError("OpenRouter client not initialized. Please check your API key.")
|
59 |
+
|
60 |
+
try:
|
61 |
+
return self.openrouter_client.create(
|
62 |
+
messages=messages,
|
63 |
+
model=Config.GEMINI_MODEL
|
64 |
+
)
|
65 |
+
except Exception as e:
|
66 |
+
raise Exception(f"Gemini API error: {str(e)}")
|
67 |
+
|
68 |
+
def chat_claude(self, messages: List[Dict[str, str]]) -> str:
|
69 |
+
"""Send chat completion request to Claude via OpenRouter"""
|
70 |
+
if not self.openrouter_client:
|
71 |
+
raise ValueError("OpenRouter client not initialized. Please check your API key.")
|
72 |
+
|
73 |
+
try:
|
74 |
+
return self.openrouter_client.create(
|
75 |
+
messages=messages,
|
76 |
+
model=Config.CLAUDE_MODEL
|
77 |
+
)
|
78 |
+
except Exception as e:
|
79 |
+
raise Exception(f"Claude API error: {str(e)}")
|
80 |
+
|
81 |
+
def generate_context_summary(self, messages: List[Dict[str, str]]) -> str:
|
82 |
+
"""Generate a summary of the conversation context."""
|
83 |
+
if not self.openai_client:
|
84 |
+
print("OpenAI client not initialized. Skipping context summary.")
|
85 |
+
return ""
|
86 |
+
|
87 |
+
try:
|
88 |
+
summary_prompt = {
|
89 |
+
"role": "system",
|
90 |
+
"content": (
|
91 |
+
"Summarize the key points of this conversation in a concise way. "
|
92 |
+
"Focus on the main topics and important details. "
|
93 |
+
f"Keep the summary within {Config.MEMORY_SUMMARY_TOKENS} tokens."
|
94 |
+
)
|
95 |
+
}
|
96 |
+
|
97 |
+
# Create a list of messages for summarization
|
98 |
+
summary_messages = [summary_prompt] + messages[-Config.CONTEXT_WINDOW_MESSAGES:]
|
99 |
+
|
100 |
+
response = self.openai_client.chat.completions.create(
|
101 |
+
model=Config.OPENAI_MODEL,
|
102 |
+
messages=summary_messages,
|
103 |
+
max_tokens=Config.MEMORY_SUMMARY_TOKENS
|
104 |
+
)
|
105 |
+
return response.choices[0].message.content
|
106 |
+
except Exception as e:
|
107 |
+
print(f"Failed to generate context summary: {str(e)}")
|
108 |
+
return "" # Return empty string if summarization fails
|
locales/en/translation.json
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"app_title": "MyChatMe",
|
3 |
+
"language": "Language",
|
4 |
+
"system_prompt": "System Prompt",
|
5 |
+
"chat_placeholder": "Type your message here...",
|
6 |
+
"send": "Send",
|
7 |
+
"clear_chat": "Clear Chat",
|
8 |
+
"model_selection": "Select Model",
|
9 |
+
"chat_history": "Chat History",
|
10 |
+
"new_chat": "New Chat",
|
11 |
+
"settings": "Settings",
|
12 |
+
"timezone": "Timezone",
|
13 |
+
"api_key_openai": "OpenAI API Key",
|
14 |
+
"api_key_openrouter": "OpenRouter API Key",
|
15 |
+
"save_settings": "Save Settings",
|
16 |
+
"error_missing_key": "Please enter API key",
|
17 |
+
"error_api_call": "Error calling API",
|
18 |
+
"error_model_switch": "Failed to switch to the selected model",
|
19 |
+
"error_model_switch_openai": "Failed to switch to GPT-4. Please check your OpenAI API key.",
|
20 |
+
"error_model_switch_openrouter": "Failed to switch to model. Please check your OpenRouter API key.",
|
21 |
+
"error_rate_limit": "Rate limit exceeded. Please try again later.",
|
22 |
+
"error_network": "Network connection error. Please check your connection.",
|
23 |
+
"default_system_prompt": "You are a helpful assistant. Please provide clear and concise responses.",
|
24 |
+
"export_chat": "Export Chat",
|
25 |
+
"export_format": "Export Format",
|
26 |
+
"export_success": "Chat exported successfully!",
|
27 |
+
"export_error": "Error exporting chat",
|
28 |
+
"export_pdf_error": "Error exporting PDF",
|
29 |
+
"developer_mode": "Developer Mode",
|
30 |
+
"enable_error_test": "Enable Error Test Mode",
|
31 |
+
"error_test_enabled": "Error test mode enabled.\nTo test errors, enter these commands:\n- test_error api_key: API key error\n- test_error rate_limit: Rate limit error\n- test_error network: Network error",
|
32 |
+
"error_test_disabled": "Error test mode disabled.",
|
33 |
+
"theme_mode": "Theme Mode",
|
34 |
+
"prompt_templates": "Prompt Templates",
|
35 |
+
"template_name": "Template Name",
|
36 |
+
"template_content": "Template Content",
|
37 |
+
"template_description": "Description (Optional)",
|
38 |
+
"save_template": "Save Template",
|
39 |
+
"delete_template": "Delete Template",
|
40 |
+
"template_saved": "Template saved successfully!",
|
41 |
+
"template_deleted": "Template deleted successfully!",
|
42 |
+
"template_error": "Error processing template",
|
43 |
+
"select_template": "Select Template",
|
44 |
+
"no_templates": "No templates available"
|
45 |
+
}
|
locales/ja/translation.json
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"app_title": "MyChatMe",
|
3 |
+
"language": "言語",
|
4 |
+
"system_prompt": "システムプロンプト",
|
5 |
+
"chat_placeholder": "メッセージを入力してください...",
|
6 |
+
"send": "送信",
|
7 |
+
"clear_chat": "チャットをクリア",
|
8 |
+
"model_selection": "モデルを選択",
|
9 |
+
"chat_history": "チャット履歴",
|
10 |
+
"new_chat": "新規チャット",
|
11 |
+
"settings": "設定",
|
12 |
+
"timezone": "タイムゾーン",
|
13 |
+
"api_key_openai": "OpenAI APIキー",
|
14 |
+
"api_key_openrouter": "OpenRouter APIキー",
|
15 |
+
"save_settings": "設定を保存",
|
16 |
+
"error_missing_key": "APIキーを入力してください",
|
17 |
+
"error_api_call": "APIの呼び出しでエラーが発生しました",
|
18 |
+
"error_model_switch": "モデルの切り替えに失敗しました",
|
19 |
+
"error_model_switch_openai": "GPT-4への切り替えに失敗しました。OpenAI APIキーを確認してください。",
|
20 |
+
"error_model_switch_openrouter": "モデルの切り替えに失敗しました。OpenRouter APIキーを確認してください。",
|
21 |
+
"error_rate_limit": "レート制限を超えました。しばらく待ってから再試行してください。",
|
22 |
+
"error_network": "ネットワーク接続エラーが発生しました。接続を確認してください。",
|
23 |
+
"default_system_prompt": "私は役立つアシスタントです。明確で簡潔な応答を提供します。",
|
24 |
+
"export_chat": "チャットをエクスポート",
|
25 |
+
"export_format": "エクスポート形式",
|
26 |
+
"export_success": "チャットのエクスポートが完了しました!",
|
27 |
+
"export_error": "エクスポート中にエラーが発生しました",
|
28 |
+
"export_pdf_error": "PDFエクスポート中にエラーが発生しました",
|
29 |
+
"developer_mode": "開発者モード",
|
30 |
+
"enable_error_test": "エラーテストモードを有効にする",
|
31 |
+
"error_test_enabled": "エラーテストモードが有効になりました。\nエラーをテストするには以下のコマンドを入力してください:\n- test_error api_key:APIキーエラー\n- test_error rate_limit:レート制限エラー\n- test_error network:ネットワークエラー",
|
32 |
+
"error_test_disabled": "エラーテストモードが無効になりました。",
|
33 |
+
"theme_mode": "テーマ設定",
|
34 |
+
"prompt_templates": "プロンプトテンプレート",
|
35 |
+
"template_name": "テンプレート名",
|
36 |
+
"template_content": "テンプレート内容",
|
37 |
+
"template_description": "説明(任意)",
|
38 |
+
"save_template": "テンプレートを保存",
|
39 |
+
"delete_template": "テンプレートを削除",
|
40 |
+
"template_saved": "テンプレートを保存しました!",
|
41 |
+
"template_deleted": "テンプレートを削除しました!",
|
42 |
+
"template_error": "テンプレート処理中にエラーが発生しました",
|
43 |
+
"select_template": "テンプレートを選択",
|
44 |
+
"no_templates": "テンプレートがありません"
|
45 |
+
}
|
main.py
ADDED
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
from chat_manager import ChatManager
|
4 |
+
from llm_client import LLMClient
|
5 |
+
from i18n_utils import I18nManager
|
6 |
+
from ui_components import render_message, render_sidebar, show_notification
|
7 |
+
from config import Config
|
8 |
+
from prompt_template import PromptTemplateManager
|
9 |
+
|
10 |
+
# Page configuration
|
11 |
+
st.set_page_config(
|
12 |
+
page_title="MyChatMe",
|
13 |
+
page_icon="🤖",
|
14 |
+
layout="wide",
|
15 |
+
initial_sidebar_state="expanded",
|
16 |
+
menu_items={
|
17 |
+
"Get Help": None,
|
18 |
+
"Report a bug": None,
|
19 |
+
"About": "MyChatMe - Multilingual AI Chat Application"
|
20 |
+
}
|
21 |
+
)
|
22 |
+
|
23 |
+
# Initialize session state
|
24 |
+
if "chat_manager" not in st.session_state:
|
25 |
+
st.session_state.chat_manager = ChatManager()
|
26 |
+
if "llm_client" not in st.session_state:
|
27 |
+
st.session_state.llm_client = LLMClient()
|
28 |
+
if "i18n" not in st.session_state:
|
29 |
+
st.session_state.i18n = I18nManager()
|
30 |
+
if "sidebar_state" not in st.session_state:
|
31 |
+
st.session_state.sidebar_state = "expanded"
|
32 |
+
if "test_mode" not in st.session_state:
|
33 |
+
st.session_state.test_mode = False
|
34 |
+
if "template_manager" not in st.session_state:
|
35 |
+
st.session_state.template_manager = PromptTemplateManager()
|
36 |
+
|
37 |
+
def render_template_manager(i18n):
|
38 |
+
"""プロンプトテンプレート管理セクションを表示"""
|
39 |
+
with st.expander(i18n.get_text("prompt_templates")):
|
40 |
+
# テンプレート一覧表示と選択
|
41 |
+
templates = st.session_state.template_manager.list_templates()
|
42 |
+
if templates:
|
43 |
+
template_names = [f"{t['name']} ({t['created_at']})" for t in templates]
|
44 |
+
selected_template = st.selectbox(
|
45 |
+
i18n.get_text("select_template"),
|
46 |
+
[""] + template_names,
|
47 |
+
key="template_selector"
|
48 |
+
)
|
49 |
+
|
50 |
+
if selected_template:
|
51 |
+
template = templates[template_names.index(selected_template) - 1]
|
52 |
+
|
53 |
+
# 選択したテンプレートの内容をフォームに表示
|
54 |
+
new_template_name = st.text_input(
|
55 |
+
i18n.get_text("template_name"),
|
56 |
+
value=template["name"],
|
57 |
+
key=f"edit_name_{template['id']}"
|
58 |
+
)
|
59 |
+
new_template_content = st.text_area(
|
60 |
+
i18n.get_text("template_content"),
|
61 |
+
value=template["content"],
|
62 |
+
key=f"edit_content_{template['id']}"
|
63 |
+
)
|
64 |
+
new_template_description = st.text_input(
|
65 |
+
i18n.get_text("template_description"),
|
66 |
+
value=template.get("description", ""),
|
67 |
+
key=f"edit_description_{template['id']}"
|
68 |
+
)
|
69 |
+
|
70 |
+
col1, col2 = st.columns(2)
|
71 |
+
with col1:
|
72 |
+
if st.button(i18n.get_text("delete_template")):
|
73 |
+
if st.session_state.template_manager.delete_template(template["id"]):
|
74 |
+
show_notification(i18n.get_text("template_deleted"), "success")
|
75 |
+
# 削除後にセッション状態をクリア
|
76 |
+
if "template_selector" in st.session_state:
|
77 |
+
del st.session_state.template_selector
|
78 |
+
st.rerun()
|
79 |
+
with col2:
|
80 |
+
if st.button(i18n.get_text("save_template"), key=f"save_button_{template['id']}"):
|
81 |
+
try:
|
82 |
+
if st.session_state.template_manager.update_template(
|
83 |
+
template["id"],
|
84 |
+
name=new_template_name,
|
85 |
+
content=new_template_content,
|
86 |
+
description=new_template_description
|
87 |
+
):
|
88 |
+
show_notification(i18n.get_text("template_saved"), "success")
|
89 |
+
# 更新後にセッション状態をクリア
|
90 |
+
if "template_selector" in st.session_state:
|
91 |
+
del st.session_state.template_selector
|
92 |
+
st.rerun()
|
93 |
+
else:
|
94 |
+
show_notification(i18n.get_text("template_error"), "error")
|
95 |
+
except Exception as e:
|
96 |
+
show_notification(f"{i18n.get_text('template_error')}: {str(e)}", "error")
|
97 |
+
|
98 |
+
# テンプレートが存在しない場合または新規作成モードの場合
|
99 |
+
if not templates or not selected_template:
|
100 |
+
new_template_name = st.text_input(
|
101 |
+
i18n.get_text("template_name"),
|
102 |
+
key="new_template_name"
|
103 |
+
)
|
104 |
+
new_template_content = st.text_area(
|
105 |
+
i18n.get_text("template_content"),
|
106 |
+
key="new_template_content"
|
107 |
+
)
|
108 |
+
new_template_description = st.text_input(
|
109 |
+
i18n.get_text("template_description"),
|
110 |
+
key="new_template_description"
|
111 |
+
)
|
112 |
+
|
113 |
+
if st.button(i18n.get_text("save_template"), key="save_new_template"):
|
114 |
+
try:
|
115 |
+
if st.session_state.template_manager.add_template(
|
116 |
+
new_template_name,
|
117 |
+
new_template_content,
|
118 |
+
new_template_description
|
119 |
+
):
|
120 |
+
show_notification(i18n.get_text("template_saved"), "success")
|
121 |
+
# 保存後にセッション状態をクリア
|
122 |
+
for key in list(st.session_state.keys()):
|
123 |
+
if key.startswith("new_template_"):
|
124 |
+
del st.session_state[key]
|
125 |
+
st.rerun()
|
126 |
+
else:
|
127 |
+
show_notification(i18n.get_text("template_error"), "error")
|
128 |
+
except Exception as e:
|
129 |
+
show_notification(f"{i18n.get_text('template_error')}: {str(e)}", "error")
|
130 |
+
|
131 |
+
def main():
|
132 |
+
i18n = st.session_state.i18n
|
133 |
+
chat_manager = st.session_state.chat_manager
|
134 |
+
llm_client = st.session_state.llm_client
|
135 |
+
|
136 |
+
st.title(i18n.get_text("app_title"))
|
137 |
+
|
138 |
+
# Check API keys
|
139 |
+
if not Config.get_openai_key():
|
140 |
+
show_notification(i18n.get_text("error_missing_key") + " (OpenAI)", "warning")
|
141 |
+
if not Config.get_openrouter_key():
|
142 |
+
show_notification(i18n.get_text("error_missing_key") + " (OpenRouter)", "warning")
|
143 |
+
|
144 |
+
# Sidebar
|
145 |
+
language, model = render_sidebar(i18n, chat_manager)
|
146 |
+
|
147 |
+
# Add test mode toggle in sidebar for development
|
148 |
+
with st.sidebar:
|
149 |
+
st.markdown("---")
|
150 |
+
st.header(i18n.get_text("developer_mode"))
|
151 |
+
test_mode = st.checkbox(i18n.get_text("enable_error_test"), value=st.session_state.test_mode)
|
152 |
+
if test_mode != st.session_state.test_mode:
|
153 |
+
st.session_state.test_mode = test_mode
|
154 |
+
llm_client.set_test_mode(test_mode)
|
155 |
+
if test_mode:
|
156 |
+
show_notification(i18n.get_text("error_test_enabled"), "info")
|
157 |
+
else:
|
158 |
+
show_notification(i18n.get_text("error_test_disabled"), "info")
|
159 |
+
|
160 |
+
# Update language
|
161 |
+
if language == "English" and i18n._current_language != "en":
|
162 |
+
i18n.set_language("en")
|
163 |
+
st.rerun()
|
164 |
+
elif language == "日本語" and i18n._current_language != "ja":
|
165 |
+
i18n.set_language("ja")
|
166 |
+
st.rerun()
|
167 |
+
|
168 |
+
# プロンプトテンプレート管理を表示
|
169 |
+
render_template_manager(i18n)
|
170 |
+
|
171 |
+
# System prompt
|
172 |
+
system_prompt = st.text_area(
|
173 |
+
i18n.get_text("system_prompt"),
|
174 |
+
value=i18n.get_text("default_system_prompt"),
|
175 |
+
key="system_prompt"
|
176 |
+
)
|
177 |
+
|
178 |
+
# Chat history selection
|
179 |
+
if chat_manager.history:
|
180 |
+
selected_chat = st.selectbox(
|
181 |
+
i18n.get_text("chat_history"),
|
182 |
+
["New Chat"] + [f"Chat {session.created_at}" for session in chat_manager.history]
|
183 |
+
)
|
184 |
+
|
185 |
+
if selected_chat != "New Chat":
|
186 |
+
chat_id = chat_manager.history[
|
187 |
+
[f"Chat {session.created_at}" for session in chat_manager.history].index(selected_chat)
|
188 |
+
].id
|
189 |
+
chat_manager.load_chat(chat_id)
|
190 |
+
|
191 |
+
# Display chat messages
|
192 |
+
for message in chat_manager.get_messages(include_system=False):
|
193 |
+
render_message(message["role"], message["content"])
|
194 |
+
|
195 |
+
# Chat input
|
196 |
+
if prompt := st.chat_input(i18n.get_text("chat_placeholder")):
|
197 |
+
# Add user message
|
198 |
+
chat_manager.add_message("user", prompt)
|
199 |
+
render_message("user", prompt)
|
200 |
+
|
201 |
+
try:
|
202 |
+
# Get AI response based on selected model
|
203 |
+
messages = chat_manager.get_messages()
|
204 |
+
response = None
|
205 |
+
|
206 |
+
# Model selection logic
|
207 |
+
model_map = {
|
208 |
+
"GPT-4": llm_client.chat_openai,
|
209 |
+
"Gemini-2.0": llm_client.chat_gemini,
|
210 |
+
"Claude-3.5": llm_client.chat_claude
|
211 |
+
}
|
212 |
+
|
213 |
+
if model in model_map:
|
214 |
+
try:
|
215 |
+
response = model_map[model](messages)
|
216 |
+
except Exception as e:
|
217 |
+
error_msg = str(e)
|
218 |
+
if "Rate limit exceeded" in error_msg:
|
219 |
+
if "provider" in error_msg:
|
220 |
+
show_notification(f"{i18n.get_text('error_rate_limit')} ({error_msg})", "warning")
|
221 |
+
else:
|
222 |
+
show_notification(i18n.get_text("error_rate_limit"), "warning")
|
223 |
+
elif "API key" in error_msg:
|
224 |
+
if model == "GPT-4":
|
225 |
+
show_notification(i18n.get_text("error_model_switch_openai"), "error")
|
226 |
+
else:
|
227 |
+
show_notification(i18n.get_text("error_model_switch_openrouter"), "error")
|
228 |
+
elif "network" in error_msg.lower():
|
229 |
+
show_notification(i18n.get_text("error_network"), "error")
|
230 |
+
else:
|
231 |
+
show_notification(f"{i18n.get_text('error_model_switch')}: {error_msg}", "error")
|
232 |
+
return
|
233 |
+
else:
|
234 |
+
show_notification(f"Invalid model selection: {model}", "error")
|
235 |
+
return
|
236 |
+
|
237 |
+
if response:
|
238 |
+
# Add AI response
|
239 |
+
chat_manager.add_message("assistant", response)
|
240 |
+
render_message("assistant", response)
|
241 |
+
|
242 |
+
# Generate and update context summary periodically
|
243 |
+
if len(chat_manager.current_session.messages) % 5 == 0: # Every 5 messages
|
244 |
+
try:
|
245 |
+
summary = llm_client.generate_context_summary(chat_manager.current_session.messages)
|
246 |
+
chat_manager.update_context_summary(summary)
|
247 |
+
except Exception as e:
|
248 |
+
print(f"Failed to generate context summary: {str(e)}")
|
249 |
+
|
250 |
+
# Keep sidebar expanded after chat
|
251 |
+
st.session_state.sidebar_state = "expanded"
|
252 |
+
except Exception as e:
|
253 |
+
show_notification(f"{i18n.get_text('error_api_call')}: {str(e)}", "error")
|
254 |
+
|
255 |
+
# Clear chat button
|
256 |
+
if st.button(i18n.get_text("clear_chat")):
|
257 |
+
chat_manager.clear_current_chat()
|
258 |
+
st.rerun()
|
259 |
+
|
260 |
+
if __name__ == "__main__":
|
261 |
+
main()
|
openrouter_client.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
import requests
|
4 |
+
from typing import List, Dict, Optional
|
5 |
+
from config import Config
|
6 |
+
|
7 |
+
class OpenRouterClient:
|
8 |
+
def __init__(self):
|
9 |
+
self.api_key = Config.get_openrouter_key()
|
10 |
+
if not self.api_key:
|
11 |
+
raise ValueError("OpenRouter API key is not set")
|
12 |
+
self.base_url = Config.OPENROUTER_API_BASE
|
13 |
+
self.max_retries = 3
|
14 |
+
self.retry_delay = 2 # Initial delay in seconds
|
15 |
+
|
16 |
+
def create(self, messages: List[Dict[str, str]], model: str = None, response_format: Optional[Dict] = None) -> str:
|
17 |
+
if not self.api_key:
|
18 |
+
raise ValueError("OpenRouter API key is not set")
|
19 |
+
|
20 |
+
retries = 0
|
21 |
+
last_error = None
|
22 |
+
|
23 |
+
while retries < self.max_retries:
|
24 |
+
try:
|
25 |
+
headers = {
|
26 |
+
"Authorization": f"Bearer {self.api_key}",
|
27 |
+
"HTTP-Referer": "https://replit.com",
|
28 |
+
"X-Title": "MyChatMe",
|
29 |
+
"Content-Type": "application/json"
|
30 |
+
}
|
31 |
+
|
32 |
+
data = {
|
33 |
+
"model": model or Config.GEMINI_MODEL,
|
34 |
+
"messages": messages,
|
35 |
+
"temperature": 0.7,
|
36 |
+
"max_tokens": 1000
|
37 |
+
}
|
38 |
+
|
39 |
+
if response_format:
|
40 |
+
data["response_format"] = response_format
|
41 |
+
|
42 |
+
response = requests.post(
|
43 |
+
f"{self.base_url}/chat/completions",
|
44 |
+
headers=headers,
|
45 |
+
json=data,
|
46 |
+
timeout=30 # Added timeout
|
47 |
+
)
|
48 |
+
|
49 |
+
if response.status_code == 429:
|
50 |
+
error_data = response.json()
|
51 |
+
error_message = error_data.get('error', {}).get('message', 'Rate limit exceeded')
|
52 |
+
print(f"Rate limit error: {error_message}")
|
53 |
+
|
54 |
+
# Check if it's a provider-specific rate limit
|
55 |
+
if 'metadata' in error_data.get('error', {}):
|
56 |
+
provider = error_data['error']['metadata'].get('provider_name', 'Unknown')
|
57 |
+
raise Exception(f"Rate limit exceeded for provider: {provider}")
|
58 |
+
|
59 |
+
wait_time = self.retry_delay * (2 ** retries)
|
60 |
+
print(f"Rate limit exceeded. Waiting {wait_time} seconds before retry...")
|
61 |
+
time.sleep(wait_time)
|
62 |
+
retries += 1
|
63 |
+
continue
|
64 |
+
|
65 |
+
response.raise_for_status()
|
66 |
+
response_data = response.json()
|
67 |
+
|
68 |
+
if "error" in response_data:
|
69 |
+
raise ValueError(f"OpenRouter API returned error: {response_data['error']}")
|
70 |
+
|
71 |
+
if not response_data.get('choices'):
|
72 |
+
raise ValueError("No choices in OpenRouter API response")
|
73 |
+
|
74 |
+
if not response_data['choices'][0].get('message'):
|
75 |
+
raise ValueError("No message in OpenRouter API response choice")
|
76 |
+
|
77 |
+
return response_data['choices'][0]['message']['content']
|
78 |
+
|
79 |
+
except requests.exceptions.RequestException as e:
|
80 |
+
print(f"OpenRouter Request Error: {str(e)}")
|
81 |
+
last_error = e
|
82 |
+
retries += 1
|
83 |
+
if retries < self.max_retries:
|
84 |
+
time.sleep(self.retry_delay * (2 ** retries))
|
85 |
+
except ValueError as e:
|
86 |
+
print(f"OpenRouter Value Error: {str(e)}")
|
87 |
+
last_error = e
|
88 |
+
break
|
89 |
+
except Exception as e:
|
90 |
+
print(f"OpenRouter Unexpected Error: {str(e)}")
|
91 |
+
last_error = e
|
92 |
+
break
|
93 |
+
|
94 |
+
# If all retries failed or other error occurred
|
95 |
+
error_msg = str(last_error) if last_error else "Maximum retries exceeded"
|
96 |
+
if "Rate limit exceeded" in error_msg:
|
97 |
+
raise Exception(f"OpenRouter rate limit exceeded. Please try again later or switch to a different model.")
|
98 |
+
raise Exception(f"OpenRouter API error: {error_msg}")
|
prompt_template.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from typing import Dict, List
|
4 |
+
from datetime import datetime
|
5 |
+
|
6 |
+
class PromptTemplateManager:
|
7 |
+
def __init__(self):
|
8 |
+
self.templates_dir = "templates"
|
9 |
+
self.ensure_template_directory()
|
10 |
+
self._templates: Dict[str, Dict] = {}
|
11 |
+
self.load_templates()
|
12 |
+
|
13 |
+
def ensure_template_directory(self):
|
14 |
+
"""テンプレートディレクトリが存在しない場合は作成"""
|
15 |
+
if not os.path.exists(self.templates_dir):
|
16 |
+
os.makedirs(self.templates_dir)
|
17 |
+
|
18 |
+
def load_templates(self):
|
19 |
+
"""保存されているテンプレートを読み込む"""
|
20 |
+
template_file = os.path.join(self.templates_dir, "templates.json")
|
21 |
+
if os.path.exists(template_file):
|
22 |
+
try:
|
23 |
+
with open(template_file, "r", encoding="utf-8") as f:
|
24 |
+
self._templates = json.load(f)
|
25 |
+
except Exception as e:
|
26 |
+
print(f"テンプレート読み込みエラー: {str(e)}")
|
27 |
+
self._templates = {}
|
28 |
+
|
29 |
+
def save_templates(self):
|
30 |
+
"""テンプレートをファイルに保存"""
|
31 |
+
template_file = os.path.join(self.templates_dir, "templates.json")
|
32 |
+
try:
|
33 |
+
with open(template_file, "w", encoding="utf-8") as f:
|
34 |
+
json.dump(self._templates, f, ensure_ascii=False, indent=2)
|
35 |
+
except Exception as e:
|
36 |
+
raise Exception(f"テンプレート保存エラー: {str(e)}")
|
37 |
+
|
38 |
+
def add_template(self, name: str, content: str, description: str = "") -> bool:
|
39 |
+
"""新しいテンプレートを追加"""
|
40 |
+
if not name or not content:
|
41 |
+
return False
|
42 |
+
|
43 |
+
template_id = datetime.now().strftime("%Y%m%d_%H%M%S")
|
44 |
+
self._templates[template_id] = {
|
45 |
+
"name": name,
|
46 |
+
"content": content,
|
47 |
+
"description": description,
|
48 |
+
"created_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
49 |
+
}
|
50 |
+
self.save_templates()
|
51 |
+
return True
|
52 |
+
|
53 |
+
def get_template(self, template_id: str) -> Dict:
|
54 |
+
"""指定されたIDのテンプレートを取得"""
|
55 |
+
return self._templates.get(template_id, {})
|
56 |
+
|
57 |
+
def list_templates(self) -> List[Dict]:
|
58 |
+
"""全てのテンプレートをリスト形式で取得"""
|
59 |
+
return [
|
60 |
+
{"id": k, **v}
|
61 |
+
for k, v in self._templates.items()
|
62 |
+
]
|
63 |
+
|
64 |
+
def delete_template(self, template_id: str) -> bool:
|
65 |
+
"""テンプレートを削除"""
|
66 |
+
if template_id in self._templates:
|
67 |
+
del self._templates[template_id]
|
68 |
+
self.save_templates()
|
69 |
+
return True
|
70 |
+
return False
|
71 |
+
|
72 |
+
def update_template(
|
73 |
+
self,
|
74 |
+
template_id: str,
|
75 |
+
name: str | None = None,
|
76 |
+
content: str | None = None,
|
77 |
+
description: str | None = None
|
78 |
+
) -> bool:
|
79 |
+
"""テンプレートを更新"""
|
80 |
+
if template_id not in self._templates:
|
81 |
+
return False
|
82 |
+
|
83 |
+
template = self._templates[template_id]
|
84 |
+
if name is not None:
|
85 |
+
template["name"] = name
|
86 |
+
if content is not None:
|
87 |
+
template["content"] = content
|
88 |
+
if description is not None:
|
89 |
+
template["description"] = description
|
90 |
+
|
91 |
+
self.save_templates()
|
92 |
+
return True
|
pyproject.toml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "repl-nix-ailanguagecompanion"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "Add your description here"
|
5 |
+
requires-python = ">=3.11"
|
6 |
+
dependencies = [
|
7 |
+
"openai>=1.58.1",
|
8 |
+
"pytz>=2024.2",
|
9 |
+
"reportlab>=4.2.5",
|
10 |
+
"requests>=2.32.3",
|
11 |
+
"streamlit>=1.41.1",
|
12 |
+
]
|
replit.nix
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{pkgs}: {
|
2 |
+
deps = [
|
3 |
+
pkgs.glibcLocales
|
4 |
+
pkgs.freetype
|
5 |
+
];
|
6 |
+
}
|
save_chat_history.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from chat_manager import ChatManager
|
2 |
+
from datetime import datetime
|
3 |
+
|
4 |
+
def save_chat_history():
|
5 |
+
chat_manager = ChatManager()
|
6 |
+
filename = f"chat_history_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md"
|
7 |
+
|
8 |
+
try:
|
9 |
+
content = chat_manager.export_chat_markdown()
|
10 |
+
with open(filename, "w", encoding="utf-8") as f:
|
11 |
+
f.write(content)
|
12 |
+
return filename
|
13 |
+
except Exception as e:
|
14 |
+
raise Exception(f"Failed to save chat history: {str(e)}")
|
templates/templates.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"20241224_054200": {
|
3 |
+
"name": "テンプレ1",
|
4 |
+
"content": "こんにちは。あなたはインターネットに精通している技術者です。",
|
5 |
+
"description": "こんにちは。あなたはインターネットに精通している技術者です。",
|
6 |
+
"created_at": "2024-12-24 05:42:00"
|
7 |
+
},
|
8 |
+
"20241224_054237": {
|
9 |
+
"name": "テンプレ2",
|
10 |
+
"content": "あなたはインターネットに精通している技術者です。",
|
11 |
+
"description": "あなたはインターネットに精通している技術者です。",
|
12 |
+
"created_at": "2024-12-24 05:42:37"
|
13 |
+
}
|
14 |
+
}
|
ui_components.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from datetime import datetime
|
3 |
+
import pytz
|
4 |
+
from config import Config
|
5 |
+
|
6 |
+
def render_message(role: str, content: str, timezone: str = Config.DEFAULT_TIMEZONE):
|
7 |
+
"""タイムゾーンを考慮してメッセージを表示"""
|
8 |
+
if role == "assistant":
|
9 |
+
with st.chat_message(role, avatar="🤖"):
|
10 |
+
st.write(content)
|
11 |
+
elif role == "user":
|
12 |
+
with st.chat_message(role, avatar="👤"):
|
13 |
+
st.write(content)
|
14 |
+
else:
|
15 |
+
with st.chat_message(role):
|
16 |
+
st.write(content)
|
17 |
+
|
18 |
+
def show_notification(message: str, type: str = "info", duration: int = 3):
|
19 |
+
"""Show an elegant notification toast."""
|
20 |
+
if type == "error":
|
21 |
+
st.error(message, icon="🚨")
|
22 |
+
elif type == "success":
|
23 |
+
st.success(message, icon="✅")
|
24 |
+
elif type == "warning":
|
25 |
+
st.warning(message, icon="⚠️")
|
26 |
+
else:
|
27 |
+
st.info(message, icon="ℹ️")
|
28 |
+
|
29 |
+
def render_sidebar(i18n, chat_manager):
|
30 |
+
# Create a persistent sidebar container
|
31 |
+
sidebar = st.sidebar
|
32 |
+
|
33 |
+
# Ensure sidebar is visible
|
34 |
+
if "sidebar_visibility" not in st.session_state:
|
35 |
+
st.session_state.sidebar_visibility = True
|
36 |
+
|
37 |
+
# Initialize timezone in session state if not exists
|
38 |
+
if "timezone" not in st.session_state:
|
39 |
+
st.session_state.timezone = Config.DEFAULT_TIMEZONE
|
40 |
+
|
41 |
+
with sidebar:
|
42 |
+
st.title(i18n.get_text("settings"))
|
43 |
+
|
44 |
+
# Language selection - デフォルトを日本語に設定
|
45 |
+
language = st.selectbox(
|
46 |
+
i18n.get_text("language"),
|
47 |
+
["English", "日本語"],
|
48 |
+
index=1,
|
49 |
+
key="language_selector"
|
50 |
+
)
|
51 |
+
|
52 |
+
# Model selection - デフォルトをGemini-2.0に設定
|
53 |
+
model = st.selectbox(
|
54 |
+
i18n.get_text("model_selection"),
|
55 |
+
["GPT-4", "Gemini-2.0", "Claude-3.5"],
|
56 |
+
index=1,
|
57 |
+
key="model_selection"
|
58 |
+
)
|
59 |
+
|
60 |
+
# Timezone selection
|
61 |
+
timezone = st.selectbox(
|
62 |
+
i18n.get_text("timezone"),
|
63 |
+
Config.SUPPORTED_TIMEZONES,
|
64 |
+
index=Config.SUPPORTED_TIMEZONES.index(Config.DEFAULT_TIMEZONE),
|
65 |
+
key="timezone_selector"
|
66 |
+
)
|
67 |
+
st.session_state.timezone = timezone
|
68 |
+
|
69 |
+
st.markdown("---")
|
70 |
+
|
71 |
+
# Export section
|
72 |
+
export_format = st.selectbox(
|
73 |
+
i18n.get_text("export_format"),
|
74 |
+
["Markdown", "PDF"],
|
75 |
+
key="export_format"
|
76 |
+
)
|
77 |
+
|
78 |
+
if st.button(i18n.get_text("export_chat"), key="export_button"):
|
79 |
+
try:
|
80 |
+
timezone = st.session_state.get("timezone", Config.DEFAULT_TIMEZONE)
|
81 |
+
if export_format == "Markdown":
|
82 |
+
filename = chat_manager.save_markdown_file(timezone=timezone)
|
83 |
+
show_notification(f"{i18n.get_text('export_success')} ({filename})", "success")
|
84 |
+
else:
|
85 |
+
filename = chat_manager.export_chat_pdf(timezone=timezone)
|
86 |
+
show_notification(f"{i18n.get_text('export_success')} ({filename})", "success")
|
87 |
+
except Exception as e:
|
88 |
+
show_notification(f"{i18n.get_text('export_error')}: {str(e)}", "error")
|
89 |
+
|
90 |
+
return language, model
|
uv.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|