AnmolManocha commited on
Commit
4f242c9
·
1 Parent(s): fb7b0da
Files changed (12) hide show
  1. .gitignore +124 -0
  2. LICENSE +201 -0
  3. README.md +140 -12
  4. docs/tools.md +36 -0
  5. gpt4tools.py +1224 -0
  6. images/overview.png +0 -0
  7. llama.py +175 -0
  8. lora_finetune.py +283 -0
  9. requirements.txt +41 -0
  10. templates/gpt4tools.json +6 -0
  11. utils/__init__.py +0 -0
  12. utils/prompter.py +51 -0
.gitignore ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ *.egg-info/
24
+ .installed.cfg
25
+ *.egg
26
+ MANIFEST
27
+
28
+ # PyInstaller
29
+ # Usually these files are written by a python script from a template
30
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
31
+ *.manifest
32
+ *.spec
33
+
34
+ # Installer logs
35
+ pip-log.txt
36
+ pip-delete-this-directory.txt
37
+
38
+ # Unit test / coverage reports
39
+ htmlcov/
40
+ .tox/
41
+ .coverage
42
+ .coverage.*
43
+ .cache
44
+ nosetests.xml
45
+ coverage.xml
46
+ *.cover
47
+ .hypothesis/
48
+ .pytest_cache/
49
+
50
+ # Translations
51
+ *.mo
52
+ *.pot
53
+
54
+ # Django stuff:
55
+ *.log
56
+ local_settings.py
57
+ db.sqlite3
58
+
59
+ # Flask stuff:
60
+ instance/
61
+ .webassets-cache
62
+
63
+ # Scrapy stuff:
64
+ .scrapy
65
+
66
+ # Sphinx documentation
67
+ docs/en/_build/
68
+ docs/zh_cn/_build/
69
+
70
+ # PyBuilder
71
+ target/
72
+
73
+ # Jupyter Notebook
74
+ .ipynb_checkpoints
75
+
76
+ # pyenv
77
+ .python-version
78
+
79
+ # celery beat schedule file
80
+ celerybeat-schedule
81
+
82
+ # SageMath parsed files
83
+ *.sage.py
84
+
85
+ # Environments
86
+ .env
87
+ .venv
88
+ env/
89
+ venv/
90
+ ENV/
91
+ env.bak/
92
+ venv.bak/
93
+
94
+ # Spyder project settings
95
+ .spyderproject
96
+ .spyproject
97
+
98
+ # Rope project settings
99
+ .ropeproject
100
+
101
+ # mkdocs documentation
102
+ /site
103
+
104
+ # mypy
105
+ .mypy_cache/
106
+
107
+ data/
108
+ data
109
+ .vscode
110
+ .idea
111
+ .DS_Store
112
+
113
+ # custom
114
+ *.pkl
115
+ *.pkl.json
116
+ *.log.json
117
+ docs/modelzoo_statistics.md
118
+ mmdet/.mim
119
+ work_dirs/
120
+
121
+ # Pytorch
122
+ *.pth
123
+ *.py~
124
+ *.sh~
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md CHANGED
@@ -1,12 +1,140 @@
1
- ---
2
- title: GPT4Tools
3
- emoji: 🐠
4
- colorFrom: red
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.28.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GPT4Tools: Teaching LLM to Use Tools via Self-instruction
2
+
3
+ [Lin Song](http://linsong.info/), [Yanwei Li](https://yanwei-li.com/), [Rui Yang](https://github.com/Yangr116), Sijie Zhao, [Yixiao Ge](https://geyixiao.com/), [Ying Shan](https://scholar.google.com/citations?user=4oXBp9UAAAAJ&hl=en)
4
+
5
+ GPT4Tools is a centralized system that can control multiple visual foundation models.
6
+ It is based on Vicuna (LLaMA), and 71K self-built instruction data.
7
+ By analyzing the language content, GPT4Tools is capable of automatically deciding, controlling, and utilizing different visual foundation models, allowing the user to interact with images during a conversation.
8
+ With this approach, GPT4Tools provides a seamless and efficient solution to fulfill various image-related requirements in a conversation.
9
+ Different from previous work, we support users teach their own LLM to use tools with simple refinement via self-instruction and LoRA.
10
+
11
+ <a href='https://gpt4tools.github.io'><img src='https://img.shields.io/badge/Project-Page-Green'></a> <a href='https://huggingface.co/stevengrove/gpt4tools-vicuna-13b-lora'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Model-blue'></a> [![YouTube](https://badges.aleen42.com/src/youtube.svg)](https://youtu.be/Qrj94ibQIT8) [![arXiv](https://img.shields.io/badge/arXiv-Paper-<COLOR>.svg)]()
12
+
13
+ ## Updates
14
+
15
+ * 🔥 We now release pretrained **GPT4Tools** models with <strong><font color="#008AD7">Vicuna-13B</font></strong> and released the dataset for <strong><font color="#008AD7">self-instruction</font></strong>. Check out the [project page](https://gpt4tools.github.io/) and [demo]( https://7db8018df6b6879722.gradio.live).
16
+
17
+ ## Demo
18
+ We provide some selected examples using GPT4Tools in this section. More examples can be found in our [project page](https://gpt4tools.github.io). Feel free to try our onlin [demo](https://7db8018df6b6879722.gradio.live)!
19
+
20
+ <div align=center>
21
+ <img width="80%" src="demos/demo.gif"/>
22
+ </div>
23
+
24
+ | | |
25
+ :-------------------------:|:-------------------------:
26
+ ![segment](demos/demo_seg.png) | ![detect kps](demos/demo_kps.png)
27
+ ![solve problem](demos/demo_explain.png) | ![style transfer](demos/demo_style.png)
28
+
29
+ ## Dataset
30
+ | **Data file name** | **Size** | OneDrive| Google Driver|
31
+ |:------------------:|:--------:| :--------: | :---------:|
32
+ | gpt4tools_71k.json | 229 MB | [link](https://1drv.ms/u/s!AqPQkBZ4aeVnhRdryHC9b1NtWJpZ?e=ZHBCqd) | [link](https://drive.google.com/file/d/1JKIT-Or1of7TJuWvmrJpPoOx0cLdcWry/view?usp=share_link)|
33
+
34
+ ```gpt4tools_71k.json``` contains 71K instruction-following data we used for fine-tuning the GPT4Tools model.
35
+
36
+ The data collection process is illustrated below:
37
+
38
+ We fed GPT-3.5 with captions from 3K images and descriptions of 22 visual tasks. This produced 66K instructions, each corresponding to a specific visual task and a visual foundation model (tool). Subsequently, we eliminated duplicate instructions and retained 41K sound instructions. To teach the model to utilize tools in a predefined manner, we followed the prompt format used in Visual ChatGPT and converted these instructions into a conversational format. Concurrently, we generated negative data without tool usage by randomly sampling 3K instructions from [`alpaca_gpt4_data`](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM/blob/main/data/alpaca_gpt4_data.json) and converting them to the defined format. Using the generated 71K instructions, we finetuned the Vicuna using LoRA and got our GPT4Tools, which can automatically decide, control, and utilize distinct tools in a conversation.
39
+
40
+
41
+ ## Models
42
+ GTP4Tools mainly contains three parts: LLM for instruction, LoRA for adaptation, and Visual Agent for provided functions.
43
+ It is a flexible and extensible system that can be easily extended to support more tools and functions.
44
+ For example, users can replace the existing LLM or tools with their own models, or add new tools to the system.
45
+ The only things needed are finetuned the LoRA with the provided instruction, which teaches LLM to use the provided tools.
46
+
47
+ ![image](images/overview.png)
48
+
49
+ ### Preparation
50
+
51
+
52
+ ```
53
+ git clone https://github.com/stevengrove/GPT4Tools
54
+ cd GPT4Tools
55
+ pip install -r requirements.txt
56
+ ```
57
+ * If bitsandbytes doesn't work, [install it from source.](https://github.com/TimDettmers/bitsandbytes/blob/main/compile_from_source.md) Windows users can follow [these instructions](https://github.com/tloen/alpaca-lora/issues/17).
58
+
59
+
60
+ ### Weights
61
+ GPT4Tools is based on the Vicuna, we release the LoRA weights of GPT4Tools to comply with the LLaMA model license. You can merge our LoRA weights with the Vicuna weights to obtain the GPT4Tools weights.
62
+
63
+ Steps:
64
+ 1. Get the original LLaMA weights in the Hugging Face format from [here](https://huggingface.co/docs/transformers/main/model_doc/llama).
65
+ 2. Using the [FastChat](https://github.com/lm-sys/FastChat/blob/main/README.md) to get Vicuna weigths by applying [the delta weights](https://huggingface.co/lmsys), more details please check [here](https://github.com/lm-sys/FastChat#vicuna-weights).
66
+ 3. Get the LoRA weights of GPT4Tools ([Hugging Face](https://huggingface.co/stevengrove/gpt4tools-vicuna-13b-lora), [OneDrive](https://1drv.ms/f/s!AqPQkBZ4aeVnhRzM69NOXLyG8cTY?e=apmpyQ), or [Google Driver](https://drive.google.com/drive/folders/1ebUINGR0QzNL-4hoKl19-6D_5rfeWTPD?usp=share_link)).
67
+
68
+ ### Tools
69
+ GPT4Tools can support 22 tools, more details please check [tools.md](docs/tools.md).
70
+ When using tools for the first time, the weights of tools need to be downloaded. If you don't like stored them on default cache, please revise the shell environment varibles:
71
+ ```
72
+ export TRANSFORMERS_CACHE=${your_transformers_cache}
73
+ export HUGGINGFACE_HUB_CACHE=${your_diffusers_cache}
74
+ ```
75
+ For SAM (Segmenting tools) and GrundingDINO (Text2Box tools):
76
+ ```
77
+ export checkpoints=${your_checkpoints_cache}
78
+ # or
79
+ ln -s ${your_checkpoints_path} checkpoints
80
+ ```
81
+
82
+ ### Serving with Web GUI
83
+ Making a gradio interface on your own devices:
84
+ ```
85
+ # Advice for 1 GPU
86
+ python gpt4tools.py \
87
+ --base_model <path_to_vicuna_with_tokenizer> \
88
+ --lora_model <path_to_lora_weights> \
89
+ --llm_device "cpu" \
90
+ --load "Text2Box_cuda:0,Segmenting_cuda:0,Inpainting_cuda:0,ImageCaptioning_cuda:0"
91
+ ```
92
+
93
+ ```
94
+ # Advice for 4 GPUs
95
+ python gpt4tools.py \
96
+ --base_model <path_to_vicuna_with_tokenizer> \
97
+ --lora_model <path_to_lora_weights> \
98
+ --llm_device "cuda:3" \
99
+ --load "Text2Box_cuda:0,Segmenting_cuda:0,Inpainting_cuda:0,ImageCaptioning_cuda:0,
100
+ Text2Image_cuda:1,VisualQuestionAnswering_cuda:1,InstructPix2Pix_cuda:2,
101
+ SegText2Image_cuda:2,Image2Pose_cpu,PoseText2Image_cuda:2"
102
+ ```
103
+ You can customize the used tools by specifying ```{tools_name}_{devices}``` after args ```--load``` of ```gpt4tools.py```. ```tools_name``` is illustrated in [tools.md](./docs/tools.md).
104
+
105
+ ### Finetuning with LoRA
106
+
107
+ ```
108
+ # Training with 8 GPUs
109
+ torchrun --nproc_per_node=8 --master_port=29005 lora_finetune.py \
110
+ --base_model <path_to_vicuna_with_tokenizer> \
111
+ --data_path <path_to_gpt4tools_71k.json> \
112
+ --output_dir output/gpt4tools \
113
+ --prompt_template_name gpt4tools \
114
+ --num_epochs 6 \
115
+ --batch_size 512 \
116
+ --cutoff_len 2048 \
117
+ --group_by_length \
118
+ --lora_target_modules '[q_proj,k_proj,v_proj,o_proj]' \
119
+ --lora_r 16 \
120
+ --micro_batch_size=8
121
+ ```
122
+
123
+ | Hyperparameter | Global Batch Size | Learning rate | Max length | Weight decay | LoRA attention dimension (lora_r) | LoRA scaling alpha(lora_alpha) | LoRA dropout (lora_dropout) | Modules to apply LoRA (lora_target_modules) |
124
+ |:--------------:|:-----------------:|:-------------:|:----------:|:------------:|:---------------------------------:|:----------:|:------------:|:-----------------------------:|
125
+ | GPT4Tools & Vicuna-13B | 512 | 3e-4 | 2048 | 0.0 | 16 | 16 | 0.05 | [q_proj,k_proj,v_proj,o_proj] |
126
+
127
+
128
+ ## Acknowledgement
129
+ * [VisualChatGPT](https://github.com/microsoft/TaskMatrix): It connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting.
130
+ * [Vicuna](https://github.com/lm-sys/FastChat): The language ability of Vicuna is fantastic and amazing. And it is open-source!
131
+ * [Alpaca-LoRA](https://github.com/tloen/alpaca-lora): Instruct-tune LLaMA on consumer hardware.
132
+
133
+ If you're using our GPT4Tools in your research or applications, please cite using this BibTeX:
134
+ ```
135
+ @misc{gpt4tools,
136
+ title = {GPT4Tools: Teaching LLM to Use Tools via Self-instruction},
137
+ author = {Lin Song and Yanwei Li and Rui Yang and Sijie Zhao and Yixiao Ge and Ying Shan},
138
+ year = {2023},
139
+ }
140
+ ```
docs/tools.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GPT4Tools can support 22 tools, including:
2
+
3
+ | No. | Tools Name | Function |
4
+ |-----|:-----------------------:|:------------------------------------------------------------------------------:|
5
+ | 1 | InstructPix2Pix | Style the image to be like the text. |
6
+ | 2 | Text2Image | Generate an image from an input text. |
7
+ | 3 | ImageCaptioning | Describe the input image. |
8
+ | 4 | Image2Canny | Detect the edge of the image |
9
+ | 5 | CannyText2Image | Generate a new real image from both the user description and a canny image. |
10
+ | 6 | Image2Line | Detect the straight line of the image. |
11
+ | 7 | Image2Hed | Detect the soft hed boundary of the image. |
12
+ | 8 | HedText2Image | Generate a new real image from both the user description. |
13
+ | 9 | Image2Scribble | Generate a scribble of the image. |
14
+ | 10 | ScribbleText2Image | Generate a new real image from both the user description and a scribble image. |
15
+ | 11 | Image2Pose | Detect the human pose of the image. |
16
+ | 12 | PoseText2Image | Generate a new real image from both the user description. |
17
+ | 13 | SegText2Image | Generate a new real image from both the user description and segmentations. |
18
+ | 14 | Image2Depth | Detect depth of the image. |
19
+ | 15 | DepthText2Image | Generate a new real image from both the user description and depth image. |
20
+ | 16 | Image2Normal | Detect norm map of the image. |
21
+ | 17 | NormalText2Image | Generate a new real image from both the user description and normal map. |
22
+ | 18 | VisualQuestionAnswering | Answer for a question based on an image. |
23
+ | 19 | Segmenting | Segment all the part of the image. |
24
+ | 20 | Text2Box | Detect or find out given objects in the picture. |
25
+ | 21 | ObjectSegmenting | Segment the certain objects in the picture. |
26
+ | 22 | ImageEditing | Remove and object or something from the photo. |
27
+
28
+ You can customize the used tools by specifying ```{tools_name}_{devices}``` after args ```--load``` of ```gpt4tools.py```. For example, enabling ```Text2Box```, ```Segmenting```, and ```ImageCaptioning```:
29
+ ```
30
+ python gpt4tools.py \
31
+ --base_model <path_to_vicuna_with_tokenizer> \
32
+ --lora_model <path_to_lora_weights> \
33
+ --llm_device "cpu" \
34
+ --load "Text2Box_cuda:0,Segmenting_cuda:0,ImageCaptioning_cuda:0"
35
+ ```
36
+ More tools will be supported in the future!
gpt4tools.py ADDED
@@ -0,0 +1,1224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding: utf-8
2
+ import os
3
+ import gradio as gr
4
+ import random
5
+ import torch
6
+ import cv2
7
+ import re
8
+ import uuid
9
+ from PIL import Image, ImageDraw, ImageOps, ImageFont
10
+ import math
11
+ import numpy as np
12
+ import argparse
13
+ import inspect
14
+ import tempfile
15
+ from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
16
+ from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
17
+ from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
18
+
19
+ from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline, StableDiffusionInstructPix2PixPipeline
20
+ from diffusers import EulerAncestralDiscreteScheduler
21
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
22
+ from controlnet_aux import OpenposeDetector, MLSDdetector, HEDdetector
23
+
24
+ from langchain.agents.initialize import initialize_agent
25
+ from langchain.agents.tools import Tool
26
+ from langchain.chains.conversation.memory import ConversationBufferMemory
27
+
28
+ # Grounding DINO
29
+ import groundingdino.datasets.transforms as T
30
+ from groundingdino.models import build_model
31
+ from groundingdino.util import box_ops
32
+ from groundingdino.util.slconfig import SLConfig
33
+ from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
34
+
35
+ # segment anything
36
+ from segment_anything import build_sam, SamPredictor, SamAutomaticMaskGenerator
37
+ import cv2
38
+ import numpy as np
39
+ import matplotlib.pyplot as plt
40
+ import wget
41
+
42
+ from llama import Llama
43
+
44
+
45
+ GPT4TOOLS_PREFIX = """GPT4Tools can handle various text and visual tasks, such as answering questions and providing in-depth explanations and discussions. It generates human-like text and uses tools to indirectly understand images. When referring to images, GPT4Tools follows strict file name rules. To complete visual tasks, GPT4Tools uses tools and stays loyal to observation outputs. Users can provide new images to GPT4Tools with a description, but tools must be used for subsequent tasks.
46
+ TOOLS:
47
+ ------
48
+
49
+ GPT4Tools has access to the following tools:"""
50
+
51
+
52
+ GPT4TOOLS_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
53
+
54
+ ```
55
+ Thought: Do I need to use a tool? Yes
56
+ Action: the action to take, should be one of [{tool_names}]
57
+ Action Input: the input to the action
58
+ Observation: the result of the action
59
+ ```
60
+
61
+ When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
62
+
63
+ ```
64
+ Thought: Do I need to use a tool? No
65
+ {ai_prefix}: [your response here]
66
+ ```
67
+ """
68
+
69
+ GPT4TOOLS_SUFFIX = """Follow file name rules and do not fake non-existent file names. Remember to provide the image file name loyally from the last tool observation.
70
+
71
+ Previous conversation:
72
+ {chat_history}
73
+
74
+ New input: {input}
75
+ GPT4Tools needs to use tools to observe images, not directly imagine them. Thoughts and observations in the conversation are only visible to GPT4Tools. When answering human questions, repeat important information. Let's think step by step.
76
+ {agent_scratchpad}"""
77
+
78
+
79
+ os.makedirs('image', exist_ok=True)
80
+
81
+
82
+ def seed_everything(seed):
83
+ random.seed(seed)
84
+ np.random.seed(seed)
85
+ torch.manual_seed(seed)
86
+ torch.cuda.manual_seed_all(seed)
87
+ return seed
88
+
89
+
90
+ def prompts(name, description):
91
+ def decorator(func):
92
+ func.name = name
93
+ func.description = description
94
+ return func
95
+
96
+ return decorator
97
+
98
+
99
+ def blend_gt2pt(old_image, new_image, sigma=0.15, steps=100):
100
+ new_size = new_image.size
101
+ old_size = old_image.size
102
+ easy_img = np.array(new_image)
103
+ gt_img_array = np.array(old_image)
104
+ pos_w = (new_size[0] - old_size[0]) // 2
105
+ pos_h = (new_size[1] - old_size[1]) // 2
106
+
107
+ kernel_h = cv2.getGaussianKernel(old_size[1], old_size[1] * sigma)
108
+ kernel_w = cv2.getGaussianKernel(old_size[0], old_size[0] * sigma)
109
+ kernel = np.multiply(kernel_h, np.transpose(kernel_w))
110
+
111
+ kernel[steps:-steps, steps:-steps] = 1
112
+ kernel[:steps, :steps] = kernel[:steps, :steps] / kernel[steps - 1, steps - 1]
113
+ kernel[:steps, -steps:] = kernel[:steps, -steps:] / kernel[steps - 1, -(steps)]
114
+ kernel[-steps:, :steps] = kernel[-steps:, :steps] / kernel[-steps, steps - 1]
115
+ kernel[-steps:, -steps:] = kernel[-steps:, -steps:] / kernel[-steps, -steps]
116
+ kernel = np.expand_dims(kernel, 2)
117
+ kernel = np.repeat(kernel, 3, 2)
118
+
119
+ weight = np.linspace(0, 1, steps)
120
+ top = np.expand_dims(weight, 1)
121
+ top = np.repeat(top, old_size[0] - 2 * steps, 1)
122
+ top = np.expand_dims(top, 2)
123
+ top = np.repeat(top, 3, 2)
124
+
125
+ weight = np.linspace(1, 0, steps)
126
+ down = np.expand_dims(weight, 1)
127
+ down = np.repeat(down, old_size[0] - 2 * steps, 1)
128
+ down = np.expand_dims(down, 2)
129
+ down = np.repeat(down, 3, 2)
130
+
131
+ weight = np.linspace(0, 1, steps)
132
+ left = np.expand_dims(weight, 0)
133
+ left = np.repeat(left, old_size[1] - 2 * steps, 0)
134
+ left = np.expand_dims(left, 2)
135
+ left = np.repeat(left, 3, 2)
136
+
137
+ weight = np.linspace(1, 0, steps)
138
+ right = np.expand_dims(weight, 0)
139
+ right = np.repeat(right, old_size[1] - 2 * steps, 0)
140
+ right = np.expand_dims(right, 2)
141
+ right = np.repeat(right, 3, 2)
142
+
143
+ kernel[:steps, steps:-steps] = top
144
+ kernel[-steps:, steps:-steps] = down
145
+ kernel[steps:-steps, :steps] = left
146
+ kernel[steps:-steps, -steps:] = right
147
+
148
+ pt_gt_img = easy_img[pos_h:pos_h + old_size[1], pos_w:pos_w + old_size[0]]
149
+ gaussian_gt_img = kernel * gt_img_array + (1 - kernel) * pt_gt_img # gt img with blur img
150
+ gaussian_gt_img = gaussian_gt_img.astype(np.int64)
151
+ easy_img[pos_h:pos_h + old_size[1], pos_w:pos_w + old_size[0]] = gaussian_gt_img
152
+ gaussian_img = Image.fromarray(easy_img)
153
+ return gaussian_img
154
+
155
+
156
+ def cut_dialogue_history(history_memory, keep_last_n_paragraphs=1):
157
+ if history_memory is None or len(history_memory) == 0:
158
+ return history_memory
159
+ paragraphs = history_memory.split('Human:')
160
+ if len(paragraphs) <= keep_last_n_paragraphs:
161
+ return history_memory
162
+ return 'Human:' + 'Human:'.join(paragraphs[-1:])
163
+
164
+
165
+ def get_new_image_name(org_img_name, func_name="update"):
166
+ head_tail = os.path.split(org_img_name)
167
+ head = head_tail[0]
168
+ tail = head_tail[1]
169
+ new_file_name = f'{str(uuid.uuid4())[:8]}.png'
170
+ return os.path.join(head, new_file_name)
171
+
172
+
173
+ class InstructPix2Pix:
174
+ def __init__(self, device):
175
+ print(f"Initializing InstructPix2Pix to {device}")
176
+ self.device = device
177
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
178
+ self.pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained("timbrooks/instruct-pix2pix",
179
+ safety_checker=None,
180
+ torch_dtype=self.torch_dtype).to(device)
181
+ self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config)
182
+
183
+ @prompts(name="Instruct Image Using Text",
184
+ description="useful when you want to the style of the image to be like the text. "
185
+ "like: make it look like a painting. or make it like a robot. "
186
+ "The input to this tool should be a comma separated string of two, "
187
+ "representing the image_path and the text. ")
188
+ def inference(self, inputs):
189
+ """Change style of image."""
190
+ print("===>Starting InstructPix2Pix Inference")
191
+ image_path, text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
192
+ original_image = Image.open(image_path)
193
+ image = self.pipe(text, image=original_image, num_inference_steps=40, image_guidance_scale=1.2).images[0]
194
+ updated_image_path = get_new_image_name(image_path, func_name="pix2pix")
195
+ image.save(updated_image_path)
196
+ print(f"\nProcessed InstructPix2Pix, Input Image: {image_path}, Instruct Text: {text}, "
197
+ f"Output Image: {updated_image_path}")
198
+ return updated_image_path
199
+
200
+
201
+ class Text2Image:
202
+ def __init__(self, device):
203
+ print(f"Initializing Text2Image to {device}")
204
+ self.device = device
205
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
206
+ self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",
207
+ torch_dtype=self.torch_dtype)
208
+ self.pipe.to(device)
209
+ self.a_prompt = 'best quality, extremely detailed'
210
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
211
+ 'fewer digits, cropped, worst quality, low quality'
212
+
213
+ @prompts(name="Generate Image From User Input Text",
214
+ description="useful when you want to generate an image from a user input text and save it to a file. "
215
+ "like: generate an image of an object or something, or generate an image that includes some objects. "
216
+ "The input to this tool should be a string, representing the text used to generate image. ")
217
+ def inference(self, text):
218
+ image_filename = os.path.join('image', f"{str(uuid.uuid4())[:8]}.png")
219
+ prompt = text + ', ' + self.a_prompt
220
+ image = self.pipe(prompt, negative_prompt=self.n_prompt).images[0]
221
+ image.save(image_filename)
222
+ print(
223
+ f"\nProcessed Text2Image, Input Text: {text}, Output Image: {image_filename}")
224
+ return image_filename
225
+
226
+
227
+ class ImageCaptioning:
228
+ def __init__(self, device):
229
+ print(f"Initializing ImageCaptioning to {device}")
230
+ self.device = device
231
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
232
+ self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
233
+ self.model = BlipForConditionalGeneration.from_pretrained(
234
+ "Salesforce/blip-image-captioning-base", torch_dtype=self.torch_dtype).to(self.device)
235
+
236
+ @prompts(name="Get Photo Description",
237
+ description="useful when you want to know what is inside the photo. receives image_path as input. "
238
+ "The input to this tool should be a string, representing the image_path. ")
239
+ def inference(self, image_path):
240
+ inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device, self.torch_dtype)
241
+ out = self.model.generate(**inputs)
242
+ captions = self.processor.decode(out[0], skip_special_tokens=True)
243
+ print(f"\nProcessed ImageCaptioning, Input Image: {image_path}, Output Text: {captions}")
244
+ return captions
245
+
246
+
247
+ class Image2Canny:
248
+ def __init__(self, device):
249
+ print("Initializing Image2Canny")
250
+ self.low_threshold = 100
251
+ self.high_threshold = 200
252
+
253
+ @prompts(name="Edge Detection On Image",
254
+ description="useful when you want to detect the edge of the image. "
255
+ "like: detect the edges of this image, or canny detection on image, "
256
+ "or perform edge detection on this image, or detect the canny image of this image. "
257
+ "The input to this tool should be a string, representing the image_path")
258
+ def inference(self, inputs):
259
+ image = Image.open(inputs)
260
+ image = np.array(image)
261
+ canny = cv2.Canny(image, self.low_threshold, self.high_threshold)
262
+ canny = canny[:, :, None]
263
+ canny = np.concatenate([canny, canny, canny], axis=2)
264
+ canny = Image.fromarray(canny)
265
+ updated_image_path = get_new_image_name(inputs, func_name="edge")
266
+ canny.save(updated_image_path)
267
+ print(f"\nProcessed Image2Canny, Input Image: {inputs}, Output Text: {updated_image_path}")
268
+ return updated_image_path
269
+
270
+
271
+ class CannyText2Image:
272
+ def __init__(self, device):
273
+ print(f"Initializing CannyText2Image to {device}")
274
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
275
+ self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-canny",
276
+ torch_dtype=self.torch_dtype)
277
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
278
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
279
+ torch_dtype=self.torch_dtype)
280
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
281
+ self.pipe.to(device)
282
+ self.seed = -1
283
+ self.a_prompt = 'best quality, extremely detailed'
284
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
285
+ 'fewer digits, cropped, worst quality, low quality'
286
+
287
+ @prompts(name="Generate Image Condition On Canny Image",
288
+ description="useful when you want to generate a new real image from both the user description and a canny image."
289
+ " like: generate a real image of a object or something from this canny image,"
290
+ " or generate a new real image of a object or something from this edge image. "
291
+ "The input to this tool should be a comma separated string of two, "
292
+ "representing the image_path and the user description. ")
293
+ def inference(self, inputs):
294
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
295
+ image = Image.open(image_path)
296
+ self.seed = random.randint(0, 65535)
297
+ seed_everything(self.seed)
298
+ prompt = f'{instruct_text}, {self.a_prompt}'
299
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
300
+ guidance_scale=9.0).images[0]
301
+ updated_image_path = get_new_image_name(image_path, func_name="canny2image")
302
+ image.save(updated_image_path)
303
+ print(f"\nProcessed CannyText2Image, Input Canny: {image_path}, Input Text: {instruct_text}, "
304
+ f"Output Text: {updated_image_path}")
305
+ return updated_image_path
306
+
307
+
308
+ class Image2Line:
309
+ def __init__(self, device):
310
+ print("Initializing Image2Line")
311
+ self.detector = MLSDdetector.from_pretrained('lllyasviel/ControlNet')
312
+
313
+ @prompts(name="Line Detection On Image",
314
+ description="useful when you want to detect the straight line of the image. "
315
+ "like: detect the straight lines of this image, or straight line detection on image, "
316
+ "or perform straight line detection on this image, or detect the straight line image of this image. "
317
+ "The input to this tool should be a string, representing the image_path")
318
+ def inference(self, inputs):
319
+ image = Image.open(inputs)
320
+ mlsd = self.detector(image)
321
+ updated_image_path = get_new_image_name(inputs, func_name="line-of")
322
+ mlsd.save(updated_image_path)
323
+ print(f"\nProcessed Image2Line, Input Image: {inputs}, Output Line: {updated_image_path}")
324
+ return updated_image_path
325
+
326
+
327
+ class LineText2Image:
328
+ def __init__(self, device):
329
+ print(f"Initializing LineText2Image to {device}")
330
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
331
+ self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-mlsd",
332
+ torch_dtype=self.torch_dtype)
333
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
334
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
335
+ torch_dtype=self.torch_dtype
336
+ )
337
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
338
+ self.pipe.to(device)
339
+ self.seed = -1
340
+ self.a_prompt = 'best quality, extremely detailed'
341
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
342
+ 'fewer digits, cropped, worst quality, low quality'
343
+
344
+ @prompts(name="Generate Image Condition On Line Image",
345
+ description="useful when you want to generate a new real image from both the user description "
346
+ "and a straight line image. "
347
+ "like: generate a real image of a object or something from this straight line image, "
348
+ "or generate a new real image of a object or something from this straight lines. "
349
+ "The input to this tool should be a comma separated string of two, "
350
+ "representing the image_path and the user description. ")
351
+ def inference(self, inputs):
352
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
353
+ image = Image.open(image_path)
354
+ self.seed = random.randint(0, 65535)
355
+ seed_everything(self.seed)
356
+ prompt = f'{instruct_text}, {self.a_prompt}'
357
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
358
+ guidance_scale=9.0).images[0]
359
+ updated_image_path = get_new_image_name(image_path, func_name="line2image")
360
+ image.save(updated_image_path)
361
+ print(f"\nProcessed LineText2Image, Input Line: {image_path}, Input Text: {instruct_text}, "
362
+ f"Output Text: {updated_image_path}")
363
+ return updated_image_path
364
+
365
+
366
+ class Image2Hed:
367
+ def __init__(self, device):
368
+ print("Initializing Image2Hed")
369
+ self.detector = HEDdetector.from_pretrained('lllyasviel/ControlNet')
370
+
371
+ @prompts(name="Hed Detection On Image",
372
+ description="useful when you want to detect the soft hed boundary of the image. "
373
+ "like: detect the soft hed boundary of this image, or hed boundary detection on image, "
374
+ "or perform hed boundary detection on this image, or detect soft hed boundary image of this image. "
375
+ "The input to this tool should be a string, representing the image_path")
376
+ def inference(self, inputs):
377
+ image = Image.open(inputs)
378
+ hed = self.detector(image)
379
+ updated_image_path = get_new_image_name(inputs, func_name="hed-boundary")
380
+ hed.save(updated_image_path)
381
+ print(f"\nProcessed Image2Hed, Input Image: {inputs}, Output Hed: {updated_image_path}")
382
+ return updated_image_path
383
+
384
+
385
+ class HedText2Image:
386
+ def __init__(self, device):
387
+ print(f"Initializing HedText2Image to {device}")
388
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
389
+ self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-hed",
390
+ torch_dtype=self.torch_dtype)
391
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
392
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
393
+ torch_dtype=self.torch_dtype
394
+ )
395
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
396
+ self.pipe.to(device)
397
+ self.seed = -1
398
+ self.a_prompt = 'best quality, extremely detailed'
399
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
400
+ 'fewer digits, cropped, worst quality, low quality'
401
+
402
+ @prompts(name="Generate Image Condition On Soft Hed Boundary Image",
403
+ description="useful when you want to generate a new real image from both the user description "
404
+ "and a soft hed boundary image. "
405
+ "like: generate a real image of a object or something from this soft hed boundary image, "
406
+ "or generate a new real image of a object or something from this hed boundary. "
407
+ "The input to this tool should be a comma separated string of two, "
408
+ "representing the image_path and the user description")
409
+ def inference(self, inputs):
410
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
411
+ image = Image.open(image_path)
412
+ self.seed = random.randint(0, 65535)
413
+ seed_everything(self.seed)
414
+ prompt = f'{instruct_text}, {self.a_prompt}'
415
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
416
+ guidance_scale=9.0).images[0]
417
+ updated_image_path = get_new_image_name(image_path, func_name="hed2image")
418
+ image.save(updated_image_path)
419
+ print(f"\nProcessed HedText2Image, Input Hed: {image_path}, Input Text: {instruct_text}, "
420
+ f"Output Image: {updated_image_path}")
421
+ return updated_image_path
422
+
423
+
424
+ class Image2Scribble:
425
+ def __init__(self, device):
426
+ print("Initializing Image2Scribble")
427
+ self.detector = HEDdetector.from_pretrained('lllyasviel/ControlNet')
428
+
429
+ @prompts(name="Sketch Detection On Image",
430
+ description="useful when you want to generate a scribble of the image. "
431
+ "like: generate a scribble of this image, or generate a sketch from this image, "
432
+ "detect the sketch from this image. "
433
+ "The input to this tool should be a string, representing the image_path")
434
+ def inference(self, inputs):
435
+ image = Image.open(inputs)
436
+ scribble = self.detector(image, scribble=True)
437
+ updated_image_path = get_new_image_name(inputs, func_name="scribble")
438
+ scribble.save(updated_image_path)
439
+ print(f"\nProcessed Image2Scribble, Input Image: {inputs}, Output Scribble: {updated_image_path}")
440
+ return updated_image_path
441
+
442
+
443
+ class ScribbleText2Image:
444
+ def __init__(self, device):
445
+ print(f"Initializing ScribbleText2Image to {device}")
446
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
447
+ self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-scribble",
448
+ torch_dtype=self.torch_dtype)
449
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
450
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
451
+ torch_dtype=self.torch_dtype
452
+ )
453
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
454
+ self.pipe.to(device)
455
+ self.seed = -1
456
+ self.a_prompt = 'best quality, extremely detailed'
457
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \
458
+ 'fewer digits, cropped, worst quality, low quality'
459
+
460
+ @prompts(name="Generate Image Condition On Sketch Image",
461
+ description="useful when you want to generate a new real image from both the user description and "
462
+ "a scribble image or a sketch image. "
463
+ "The input to this tool should be a comma separated string of two, "
464
+ "representing the image_path and the user description")
465
+ def inference(self, inputs):
466
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
467
+ image = Image.open(image_path)
468
+ self.seed = random.randint(0, 65535)
469
+ seed_everything(self.seed)
470
+ prompt = f'{instruct_text}, {self.a_prompt}'
471
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
472
+ guidance_scale=9.0).images[0]
473
+ updated_image_path = get_new_image_name(image_path, func_name="scribble2image")
474
+ image.save(updated_image_path)
475
+ print(f"\nProcessed ScribbleText2Image, Input Scribble: {image_path}, Input Text: {instruct_text}, "
476
+ f"Output Image: {updated_image_path}")
477
+ return updated_image_path
478
+
479
+
480
+ class Image2Pose:
481
+ def __init__(self, device):
482
+ print("Initializing Image2Pose")
483
+ self.detector = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
484
+
485
+ @prompts(name="Pose Detection On Image",
486
+ description="useful when you want to detect the human pose of the image. "
487
+ "like: generate human poses of this image, or generate a pose image from this image. "
488
+ "The input to this tool should be a string, representing the image_path")
489
+ def inference(self, inputs):
490
+ image = Image.open(inputs)
491
+ pose = self.detector(image)
492
+ updated_image_path = get_new_image_name(inputs, func_name="human-pose")
493
+ pose.save(updated_image_path)
494
+ print(f"\nProcessed Image2Pose, Input Image: {inputs}, Output Pose: {updated_image_path}")
495
+ return updated_image_path
496
+
497
+
498
+ class PoseText2Image:
499
+ def __init__(self, device):
500
+ print(f"Initializing PoseText2Image to {device}")
501
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
502
+ self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-openpose",
503
+ torch_dtype=self.torch_dtype)
504
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
505
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
506
+ torch_dtype=self.torch_dtype)
507
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
508
+ self.pipe.to(device)
509
+ self.num_inference_steps = 20
510
+ self.seed = -1
511
+ self.unconditional_guidance_scale = 9.0
512
+ self.a_prompt = 'best quality, extremely detailed'
513
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \
514
+ ' fewer digits, cropped, worst quality, low quality'
515
+
516
+ @prompts(name="Generate Image Condition On Pose Image",
517
+ description="useful when you want to generate a new real image from both the user description "
518
+ "and a human pose image. "
519
+ "like: generate a real image of a human from this human pose image, "
520
+ "or generate a new real image of a human from this pose. "
521
+ "The input to this tool should be a comma separated string of two, "
522
+ "representing the image_path and the user description")
523
+ def inference(self, inputs):
524
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
525
+ image = Image.open(image_path)
526
+ self.seed = random.randint(0, 65535)
527
+ seed_everything(self.seed)
528
+ prompt = f'{instruct_text}, {self.a_prompt}'
529
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
530
+ guidance_scale=9.0).images[0]
531
+ updated_image_path = get_new_image_name(image_path, func_name="pose2image")
532
+ image.save(updated_image_path)
533
+ print(f"\nProcessed PoseText2Image, Input Pose: {image_path}, Input Text: {instruct_text}, "
534
+ f"Output Image: {updated_image_path}")
535
+ return updated_image_path
536
+
537
+
538
+ class SegText2Image:
539
+ def __init__(self, device):
540
+ print(f"Initializing SegText2Image to {device}")
541
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
542
+ self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-seg",
543
+ torch_dtype=self.torch_dtype)
544
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
545
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
546
+ torch_dtype=self.torch_dtype)
547
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
548
+ self.pipe.to(device)
549
+ self.seed = -1
550
+ self.a_prompt = 'best quality, extremely detailed'
551
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \
552
+ ' fewer digits, cropped, worst quality, low quality'
553
+
554
+ @prompts(name="Generate Image Condition On Segmentations",
555
+ description="useful when you want to generate a new real image from both the user description and segmentations. "
556
+ "like: generate a real image of a object or something from this segmentation image, "
557
+ "or generate a new real image of a object or something from these segmentations. "
558
+ "The input to this tool should be a comma separated string of two, "
559
+ "representing the image_path and the user description")
560
+ def inference(self, inputs):
561
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
562
+ image = Image.open(image_path)
563
+ self.seed = random.randint(0, 65535)
564
+ seed_everything(self.seed)
565
+ prompt = f'{instruct_text}, {self.a_prompt}'
566
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
567
+ guidance_scale=9.0).images[0]
568
+ updated_image_path = get_new_image_name(image_path, func_name="segment2image")
569
+ image.save(updated_image_path)
570
+ print(f"\nProcessed SegText2Image, Input Seg: {image_path}, Input Text: {instruct_text}, "
571
+ f"Output Image: {updated_image_path}")
572
+ return updated_image_path
573
+
574
+
575
+ class Image2Depth:
576
+ def __init__(self, device):
577
+ print("Initializing Image2Depth")
578
+ self.depth_estimator = pipeline('depth-estimation')
579
+
580
+ @prompts(name="Predict Depth On Image",
581
+ description="useful when you want to detect depth of the image. like: generate the depth from this image, "
582
+ "or detect the depth map on this image, or predict the depth for this image. "
583
+ "The input to this tool should be a string, representing the image_path")
584
+ def inference(self, inputs):
585
+ image = Image.open(inputs)
586
+ depth = self.depth_estimator(image)['depth']
587
+ depth = np.array(depth)
588
+ depth = depth[:, :, None]
589
+ depth = np.concatenate([depth, depth, depth], axis=2)
590
+ depth = Image.fromarray(depth)
591
+ updated_image_path = get_new_image_name(inputs, func_name="depth")
592
+ depth.save(updated_image_path)
593
+ print(f"\nProcessed Image2Depth, Input Image: {inputs}, Output Depth: {updated_image_path}")
594
+ return updated_image_path
595
+
596
+
597
+ class DepthText2Image:
598
+ def __init__(self, device):
599
+ print(f"Initializing DepthText2Image to {device}")
600
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
601
+ self.controlnet = ControlNetModel.from_pretrained(
602
+ "fusing/stable-diffusion-v1-5-controlnet-depth", torch_dtype=self.torch_dtype)
603
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
604
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
605
+ torch_dtype=self.torch_dtype)
606
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
607
+ self.pipe.to(device)
608
+ self.seed = -1
609
+ self.a_prompt = 'best quality, extremely detailed'
610
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \
611
+ ' fewer digits, cropped, worst quality, low quality'
612
+
613
+ @prompts(name="Generate Image Condition On Depth",
614
+ description="useful when you want to generate a new real image from both the user description and depth image. "
615
+ "like: generate a real image of a object or something from this depth image, "
616
+ "or generate a new real image of a object or something from the depth map. "
617
+ "The input to this tool should be a comma separated string of two, "
618
+ "representing the image_path and the user description")
619
+ def inference(self, inputs):
620
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
621
+ image = Image.open(image_path)
622
+ self.seed = random.randint(0, 65535)
623
+ seed_everything(self.seed)
624
+ prompt = f'{instruct_text}, {self.a_prompt}'
625
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
626
+ guidance_scale=9.0).images[0]
627
+ updated_image_path = get_new_image_name(image_path, func_name="depth2image")
628
+ image.save(updated_image_path)
629
+ print(f"\nProcessed DepthText2Image, Input Depth: {image_path}, Input Text: {instruct_text}, "
630
+ f"Output Image: {updated_image_path}")
631
+ return updated_image_path
632
+
633
+
634
+ class Image2Normal:
635
+ def __init__(self, device):
636
+ print("Initializing Image2Normal")
637
+ self.depth_estimator = pipeline("depth-estimation", model="Intel/dpt-hybrid-midas")
638
+ self.bg_threhold = 0.4
639
+
640
+ @prompts(name="Predict Normal Map On Image",
641
+ description="useful when you want to detect norm map of the image. "
642
+ "like: generate normal map from this image, or predict normal map of this image. "
643
+ "The input to this tool should be a string, representing the image_path")
644
+ def inference(self, inputs):
645
+ image = Image.open(inputs)
646
+ original_size = image.size
647
+ image = self.depth_estimator(image)['predicted_depth'][0]
648
+ image = image.numpy()
649
+ image_depth = image.copy()
650
+ image_depth -= np.min(image_depth)
651
+ image_depth /= np.max(image_depth)
652
+ x = cv2.Sobel(image, cv2.CV_32F, 1, 0, ksize=3)
653
+ x[image_depth < self.bg_threhold] = 0
654
+ y = cv2.Sobel(image, cv2.CV_32F, 0, 1, ksize=3)
655
+ y[image_depth < self.bg_threhold] = 0
656
+ z = np.ones_like(x) * np.pi * 2.0
657
+ image = np.stack([x, y, z], axis=2)
658
+ image /= np.sum(image ** 2.0, axis=2, keepdims=True) ** 0.5
659
+ image = (image * 127.5 + 127.5).clip(0, 255).astype(np.uint8)
660
+ image = Image.fromarray(image)
661
+ image = image.resize(original_size)
662
+ updated_image_path = get_new_image_name(inputs, func_name="normal-map")
663
+ image.save(updated_image_path)
664
+ print(f"\nProcessed Image2Normal, Input Image: {inputs}, Output Depth: {updated_image_path}")
665
+ return updated_image_path
666
+
667
+
668
+ class NormalText2Image:
669
+ def __init__(self, device):
670
+ print(f"Initializing NormalText2Image to {device}")
671
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
672
+ self.controlnet = ControlNetModel.from_pretrained(
673
+ "fusing/stable-diffusion-v1-5-controlnet-normal", torch_dtype=self.torch_dtype)
674
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
675
+ "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None,
676
+ torch_dtype=self.torch_dtype)
677
+ self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config)
678
+ self.pipe.to(device)
679
+ self.seed = -1
680
+ self.a_prompt = 'best quality, extremely detailed'
681
+ self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \
682
+ ' fewer digits, cropped, worst quality, low quality'
683
+
684
+ @prompts(name="Generate Image Condition On Normal Map",
685
+ description="useful when you want to generate a new real image from both the user description and normal map. "
686
+ "like: generate a real image of a object or something from this normal map, "
687
+ "or generate a new real image of a object or something from the normal map. "
688
+ "The input to this tool should be a comma separated string of two, "
689
+ "representing the image_path and the user description")
690
+ def inference(self, inputs):
691
+ image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
692
+ image = Image.open(image_path)
693
+ self.seed = random.randint(0, 65535)
694
+ seed_everything(self.seed)
695
+ prompt = f'{instruct_text}, {self.a_prompt}'
696
+ image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt,
697
+ guidance_scale=9.0).images[0]
698
+ updated_image_path = get_new_image_name(image_path, func_name="normal2image")
699
+ image.save(updated_image_path)
700
+ print(f"\nProcessed NormalText2Image, Input Normal: {image_path}, Input Text: {instruct_text}, "
701
+ f"Output Image: {updated_image_path}")
702
+ return updated_image_path
703
+
704
+
705
+ class VisualQuestionAnswering:
706
+ def __init__(self, device):
707
+ print(f"Initializing VisualQuestionAnswering to {device}")
708
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
709
+ self.device = device
710
+ self.processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
711
+ self.model = BlipForQuestionAnswering.from_pretrained(
712
+ "Salesforce/blip-vqa-base", torch_dtype=self.torch_dtype).to(self.device)
713
+
714
+ @prompts(name="Answer Question About The Image",
715
+ description="useful when you need an answer for a question based on an image. "
716
+ "like: what is the background color of the last image, how many cats in this figure, what is in this figure. "
717
+ "The input to this tool should be a comma separated string of two, representing the image_path and the question")
718
+ def inference(self, inputs):
719
+ image_path, question = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
720
+ raw_image = Image.open(image_path).convert('RGB')
721
+ inputs = self.processor(raw_image, question, return_tensors="pt").to(self.device, self.torch_dtype)
722
+ out = self.model.generate(**inputs)
723
+ answer = self.processor.decode(out[0], skip_special_tokens=True)
724
+ print(f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}, Input Question: {question}, "
725
+ f"Output Answer: {answer}")
726
+ return answer
727
+
728
+
729
+ class Segmenting:
730
+ def __init__(self, device):
731
+ print(f"Inintializing Segmentation to {device}")
732
+ self.device = device
733
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
734
+ self.model_checkpoint_path = os.path.join("checkpoints","sam")
735
+
736
+ self.download_parameters()
737
+ self.sam = build_sam(checkpoint=self.model_checkpoint_path).to(device)
738
+ self.sam_predictor = SamPredictor(self.sam)
739
+ self.mask_generator = SamAutomaticMaskGenerator(self.sam)
740
+
741
+ def download_parameters(self):
742
+ url = "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth"
743
+ if not os.path.exists(self.model_checkpoint_path):
744
+ wget.download(url,out=self.model_checkpoint_path)
745
+
746
+ def show_mask(self, mask, ax, random_color=False):
747
+ if random_color:
748
+ color = np.concatenate([np.random.random(3), np.array([1])], axis=0)
749
+ else:
750
+ color = np.array([30/255, 144/255, 255/255, 1])
751
+ h, w = mask.shape[-2:]
752
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
753
+ ax.imshow(mask_image)
754
+
755
+ def show_box(self, box, ax, label):
756
+ x0, y0 = box[0], box[1]
757
+ w, h = box[2] - box[0], box[3] - box[1]
758
+ ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2))
759
+ ax.text(x0, y0, label)
760
+
761
+
762
+ def get_mask_with_boxes(self, image_pil, image, boxes_filt):
763
+
764
+ size = image_pil.size
765
+ H, W = size[1], size[0]
766
+ for i in range(boxes_filt.size(0)):
767
+ boxes_filt[i] = boxes_filt[i] * torch.Tensor([W, H, W, H])
768
+ boxes_filt[i][:2] -= boxes_filt[i][2:] / 2
769
+ boxes_filt[i][2:] += boxes_filt[i][:2]
770
+
771
+ boxes_filt = boxes_filt.cpu()
772
+ transformed_boxes = self.sam_predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(self.device)
773
+
774
+ masks, _, _ = self.sam_predictor.predict_torch(
775
+ point_coords = None,
776
+ point_labels = None,
777
+ boxes = transformed_boxes.to(self.device),
778
+ multimask_output = False,
779
+ )
780
+ return masks
781
+
782
+ def segment_image_with_boxes(self, image_pil, image_path, boxes_filt, pred_phrases):
783
+
784
+ image = cv2.imread(image_path)
785
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
786
+ self.sam_predictor.set_image(image)
787
+
788
+ masks = self.get_mask_with_boxes(image_pil, image, boxes_filt)
789
+
790
+ # draw output image
791
+ plt.figure(figsize=(10, 10))
792
+ plt.imshow(image)
793
+ for mask in masks:
794
+ self.show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
795
+
796
+ updated_image_path = get_new_image_name(image_path, func_name="segmentation")
797
+ plt.axis('off')
798
+ plt.savefig(
799
+ updated_image_path,
800
+ bbox_inches="tight", dpi=300, pad_inches=0.0
801
+ )
802
+ return updated_image_path
803
+
804
+
805
+ @prompts(name="Segment the Image",
806
+ description="useful when you want to segment all the part of the image, but not segment a certain object."
807
+ "like: segment all the object in this image, or generate segmentations on this image, "
808
+ "or segment the image,"
809
+ "or perform segmentation on this image, "
810
+ "or segment all the object in this image."
811
+ "The input to this tool should be a string, representing the image_path")
812
+ def inference_all(self,image_path):
813
+ image = cv2.imread(image_path)
814
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
815
+ masks = self.mask_generator.generate(image)
816
+ plt.figure(figsize=(20,20))
817
+ plt.imshow(image)
818
+ if len(masks) == 0:
819
+ return
820
+ sorted_anns = sorted(masks, key=(lambda x: x['area']), reverse=True)
821
+ ax = plt.gca()
822
+ ax.set_autoscale_on(False)
823
+ polygons = []
824
+ color = []
825
+ for ann in sorted_anns:
826
+ m = ann['segmentation']
827
+ img = np.ones((m.shape[0], m.shape[1], 3))
828
+ color_mask = np.random.random((1, 3)).tolist()[0]
829
+ for i in range(3):
830
+ img[:,:,i] = color_mask[i]
831
+ ax.imshow(np.dstack((img, m)))
832
+
833
+ updated_image_path = get_new_image_name(image_path, func_name="segment-image")
834
+ plt.axis('off')
835
+ plt.savefig(
836
+ updated_image_path,
837
+ bbox_inches="tight", dpi=300, pad_inches=0.0
838
+ )
839
+ return updated_image_path
840
+
841
+
842
+ class Text2Box:
843
+ def __init__(self, device):
844
+ print(f"Initializing ObjectDetection to {device}")
845
+ self.device = device
846
+ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
847
+ self.model_checkpoint_path = os.path.join("checkpoints","groundingdino")
848
+ self.model_config_path = os.path.join("checkpoints","grounding_config.py")
849
+ self.download_parameters()
850
+ self.box_threshold = 0.3
851
+ self.text_threshold = 0.25
852
+ self.grounding = (self.load_model()).to(self.device)
853
+
854
+ def download_parameters(self):
855
+ url = "https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth"
856
+ if not os.path.exists(self.model_checkpoint_path):
857
+ wget.download(url,out=self.model_checkpoint_path)
858
+ config_url = "https://raw.githubusercontent.com/IDEA-Research/GroundingDINO/main/groundingdino/config/GroundingDINO_SwinT_OGC.py"
859
+ if not os.path.exists(self.model_config_path):
860
+ wget.download(config_url,out=self.model_config_path)
861
+ def load_image(self,image_path):
862
+ # load image
863
+ image_pil = Image.open(image_path).convert("RGB") # load image
864
+
865
+ transform = T.Compose(
866
+ [
867
+ T.RandomResize([512], max_size=1333),
868
+ T.ToTensor(),
869
+ T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
870
+ ]
871
+ )
872
+ image, _ = transform(image_pil, None) # 3, h, w
873
+ return image_pil, image
874
+
875
+ def load_model(self):
876
+ args = SLConfig.fromfile(self.model_config_path)
877
+ args.device = self.device
878
+ model = build_model(args)
879
+ checkpoint = torch.load(self.model_checkpoint_path, map_location="cpu")
880
+ load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
881
+ print(load_res)
882
+ _ = model.eval()
883
+ return model
884
+
885
+ def get_grounding_boxes(self, image, caption, with_logits=True):
886
+ caption = caption.lower()
887
+ caption = caption.strip()
888
+ if not caption.endswith("."):
889
+ caption = caption + "."
890
+ image = image.to(self.device)
891
+ with torch.no_grad():
892
+ outputs = self.grounding(image[None], captions=[caption])
893
+ logits = outputs["pred_logits"].cpu().sigmoid()[0] # (nq, 256)
894
+ boxes = outputs["pred_boxes"].cpu()[0] # (nq, 4)
895
+ logits.shape[0]
896
+
897
+ # filter output
898
+ logits_filt = logits.clone()
899
+ boxes_filt = boxes.clone()
900
+ filt_mask = logits_filt.max(dim=1)[0] > self.box_threshold
901
+ logits_filt = logits_filt[filt_mask] # num_filt, 256
902
+ boxes_filt = boxes_filt[filt_mask] # num_filt, 4
903
+ logits_filt.shape[0]
904
+
905
+ # get phrase
906
+ tokenlizer = self.grounding.tokenizer
907
+ tokenized = tokenlizer(caption)
908
+ # build pred
909
+ pred_phrases = []
910
+ for logit, box in zip(logits_filt, boxes_filt):
911
+ pred_phrase = get_phrases_from_posmap(logit > self.text_threshold, tokenized, tokenlizer)
912
+ if with_logits:
913
+ pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
914
+ else:
915
+ pred_phrases.append(pred_phrase)
916
+
917
+ return boxes_filt, pred_phrases
918
+
919
+ def plot_boxes_to_image(self, image_pil, tgt):
920
+ H, W = tgt["size"]
921
+ boxes = tgt["boxes"]
922
+ labels = tgt["labels"]
923
+ assert len(boxes) == len(labels), "boxes and labels must have same length"
924
+
925
+ draw = ImageDraw.Draw(image_pil)
926
+ mask = Image.new("L", image_pil.size, 0)
927
+ mask_draw = ImageDraw.Draw(mask)
928
+
929
+ # draw boxes and masks
930
+ for box, label in zip(boxes, labels):
931
+ # from 0..1 to 0..W, 0..H
932
+ box = box * torch.Tensor([W, H, W, H])
933
+ # from xywh to xyxy
934
+ box[:2] -= box[2:] / 2
935
+ box[2:] += box[:2]
936
+ # random color
937
+ color = tuple(np.random.randint(0, 255, size=3).tolist())
938
+ # draw
939
+ x0, y0, x1, y1 = box
940
+ x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
941
+
942
+ draw.rectangle([x0, y0, x1, y1], outline=color, width=6)
943
+ # draw.text((x0, y0), str(label), fill=color)
944
+
945
+ font = ImageFont.load_default()
946
+ if hasattr(font, "getbbox"):
947
+ bbox = draw.textbbox((x0, y0), str(label), font)
948
+ else:
949
+ w, h = draw.textsize(str(label), font)
950
+ bbox = (x0, y0, w + x0, y0 + h)
951
+ # bbox = draw.textbbox((x0, y0), str(label))
952
+ draw.rectangle(bbox, fill=color)
953
+ draw.text((x0, y0), str(label), fill="white")
954
+
955
+ mask_draw.rectangle([x0, y0, x1, y1], fill=255, width=2)
956
+
957
+ return image_pil, mask
958
+
959
+ @prompts(name="Detect the Give Object",
960
+ description="useful when you only want to detect or find out given objects in the picture"
961
+ "The input to this tool should be a comma separated string of two, "
962
+ "representing the image_path, the text description of the object to be found")
963
+ def inference(self, inputs):
964
+ image_path, det_prompt = inputs.split(",")
965
+ print(f"image_path={image_path}, text_prompt={det_prompt}")
966
+ image_pil, image = self.load_image(image_path)
967
+
968
+ boxes_filt, pred_phrases = self.get_grounding_boxes(image, det_prompt)
969
+
970
+ size = image_pil.size
971
+ pred_dict = {
972
+ "boxes": boxes_filt,
973
+ "size": [size[1], size[0]], # H,W
974
+ "labels": pred_phrases,}
975
+
976
+ image_with_box = self.plot_boxes_to_image(image_pil, pred_dict)[0]
977
+
978
+ updated_image_path = get_new_image_name(image_path, func_name="detect-something")
979
+ updated_image = image_with_box.resize(size)
980
+ updated_image.save(updated_image_path)
981
+ print(
982
+ f"\nProcessed ObejectDetecting, Input Image: {image_path}, Object to be Detect {det_prompt}, "
983
+ f"Output Image: {updated_image_path}")
984
+ return updated_image_path
985
+
986
+
987
+ class Inpainting:
988
+ def __init__(self, device):
989
+ self.device = device
990
+ self.revision = 'fp16' if 'cuda' in self.device else None
991
+ self.torch_dtype = torch.float16 if 'cuda' in self.device else torch.float32
992
+
993
+ self.inpaint = StableDiffusionInpaintPipeline.from_pretrained(
994
+ "runwayml/stable-diffusion-inpainting", revision=self.revision, torch_dtype=self.torch_dtype).to(device)
995
+ def __call__(self, prompt, original_image, mask_image):
996
+ update_image = self.inpaint(prompt=prompt, image=original_image.resize((512, 512)),
997
+ mask_image=mask_image.resize((512, 512))).images[0]
998
+ return update_image
999
+
1000
+
1001
+ class ObjectSegmenting:
1002
+ template_model = True # Add this line to show this is a template model.
1003
+ def __init__(self, Text2Box:Text2Box, Segmenting:Segmenting):
1004
+ self.grounding = Text2Box
1005
+ self.sam = Segmenting
1006
+
1007
+ @prompts(name="Segment the given object",
1008
+ description="useful when you only want to segment the certain objects in the picture"
1009
+ "according to the given text"
1010
+ "like: segment the cat,"
1011
+ "or can you segment an obeject for me"
1012
+ "The input to this tool should be a comma separated string of two, "
1013
+ "representing the image_path, the text description of the object to be found")
1014
+ def inference(self, inputs):
1015
+ image_path, det_prompt = inputs.split(",")
1016
+ print(f"image_path={image_path}, text_prompt={det_prompt}")
1017
+ image_pil, image = self.grounding.load_image(image_path)
1018
+ boxes_filt, pred_phrases = self.grounding.get_grounding_boxes(image, det_prompt)
1019
+ updated_image_path = self.sam.segment_image_with_boxes(image_pil,image_path,boxes_filt,pred_phrases)
1020
+ print(
1021
+ f"\nProcessed ObejectSegmenting, Input Image: {image_path}, Object to be Segment {det_prompt}, "
1022
+ f"Output Image: {updated_image_path}")
1023
+ return updated_image_path
1024
+
1025
+
1026
+ class ImageEditing:
1027
+ template_model = True
1028
+ def __init__(self, Text2Box:Text2Box, Segmenting:Segmenting, Inpainting:Inpainting):
1029
+ print(f"Initializing ImageEditing")
1030
+ self.sam = Segmenting
1031
+ self.grounding = Text2Box
1032
+ self.inpaint = Inpainting
1033
+
1034
+ def pad_edge(self,mask,padding):
1035
+ #mask Tensor [H,W]
1036
+ mask = mask.numpy()
1037
+ true_indices = np.argwhere(mask)
1038
+ mask_array = np.zeros_like(mask, dtype=bool)
1039
+ for idx in true_indices:
1040
+ padded_slice = tuple(slice(max(0, i - padding), i + padding + 1) for i in idx)
1041
+ mask_array[padded_slice] = True
1042
+ new_mask = (mask_array * 255).astype(np.uint8)
1043
+ #new_mask
1044
+ return new_mask
1045
+
1046
+ @prompts(name="Remove Something From The Photo",
1047
+ description="useful when you want to remove and object or something from the photo "
1048
+ "from its description or location. "
1049
+ "The input to this tool should be a comma separated string of two, "
1050
+ "representing the image_path and the object need to be removed. ")
1051
+ def inference_remove(self, inputs):
1052
+ image_path, to_be_removed_txt = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
1053
+ return self.inference_replace_sam(f"{image_path},{to_be_removed_txt},background")
1054
+
1055
+ @prompts(name="Replace Something From The Photo",
1056
+ description="useful when you want to replace an object from the object description or "
1057
+ "location with another object from its description. "
1058
+ "The input to this tool should be a comma separated string of three, "
1059
+ "representing the image_path, the object to be replaced, the object to be replaced with ")
1060
+ def inference_replace_sam(self,inputs):
1061
+ image_path, to_be_replaced_txt, replace_with_txt = inputs.split(",")
1062
+
1063
+ print(f"image_path={image_path}, to_be_replaced_txt={to_be_replaced_txt}")
1064
+ image_pil, image = self.grounding.load_image(image_path)
1065
+ boxes_filt, pred_phrases = self.grounding.get_grounding_boxes(image, to_be_replaced_txt)
1066
+ image = cv2.imread(image_path)
1067
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
1068
+ self.sam.sam_predictor.set_image(image)
1069
+ masks = self.sam.get_mask_with_boxes(image_pil, image, boxes_filt)
1070
+ mask = torch.sum(masks, dim=0).unsqueeze(0)
1071
+ mask = torch.where(mask > 0, True, False)
1072
+ mask = mask.squeeze(0).squeeze(0).cpu() #tensor
1073
+
1074
+ mask = self.pad_edge(mask,padding=20) #numpy
1075
+ mask_image = Image.fromarray(mask)
1076
+
1077
+ updated_image = self.inpaint(prompt=replace_with_txt, original_image=image_pil,
1078
+ mask_image=mask_image)
1079
+ updated_image_path = get_new_image_name(image_path, func_name="replace-something")
1080
+ updated_image = updated_image.resize(image_pil.size)
1081
+ updated_image.save(updated_image_path)
1082
+ print(
1083
+ f"\nProcessed ImageEditing, Input Image: {image_path}, Replace {to_be_replaced_txt} to {replace_with_txt}, "
1084
+ f"Output Image: {updated_image_path}")
1085
+ return updated_image_path
1086
+
1087
+
1088
+ class ConversationBot:
1089
+ def __init__(self, load_dict, llm_kwargs):
1090
+ # load_dict = {'VisualQuestionAnswering':'cuda:0', 'ImageCaptioning':'cuda:1',...}
1091
+ print(f"Initializing GPT4Tools, load_dict={load_dict}")
1092
+ if 'ImageCaptioning' not in load_dict:
1093
+ raise ValueError("You have to load ImageCaptioning as a basic function for GPT4Tools")
1094
+
1095
+ self.models = {}
1096
+ # Load Basic Foundation Models
1097
+ for class_name, device in load_dict.items():
1098
+ self.models[class_name] = globals()[class_name](device=device)
1099
+
1100
+ # Load Template Foundation Models
1101
+ for class_name, module in globals().items():
1102
+ if getattr(module, 'template_model', False):
1103
+ template_required_names = {k for k in inspect.signature(module.__init__).parameters.keys() if k!='self'}
1104
+ loaded_names = set([type(e).__name__ for e in self.models.values()])
1105
+ if template_required_names.issubset(loaded_names):
1106
+ self.models[class_name] = globals()[class_name](
1107
+ **{name: self.models[name] for name in template_required_names})
1108
+
1109
+ print(f"All the Available Functions: {self.models}")
1110
+
1111
+ self.tools = []
1112
+ for instance in self.models.values():
1113
+ for e in dir(instance):
1114
+ if e.startswith('inference'):
1115
+ func = getattr(instance, e)
1116
+ self.tools.append(Tool(name=func.name, description=func.description, func=func))
1117
+ self.llm = Llama(model_kwargs=llm_kwargs)
1118
+ self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
1119
+
1120
+ def init_agent(self, lang):
1121
+ self.memory.clear() #clear previous history
1122
+ if lang=='English':
1123
+ PREFIX, FORMAT_INSTRUCTIONS, SUFFIX = GPT4TOOLS_PREFIX, GPT4TOOLS_FORMAT_INSTRUCTIONS, GPT4TOOLS_SUFFIX
1124
+ place = "Enter text and press enter, or upload an image"
1125
+ label_clear = "Clear"
1126
+ else:
1127
+ raise NotImplementedError(f'{lang} is not supported yet')
1128
+ self.agent = initialize_agent(
1129
+ self.tools,
1130
+ self.llm,
1131
+ agent="conversational-react-description",
1132
+ verbose=True,
1133
+ memory=self.memory,
1134
+ return_intermediate_steps=True,
1135
+ agent_kwargs={'prefix': PREFIX, 'format_instructions': FORMAT_INSTRUCTIONS,
1136
+ 'suffix': SUFFIX}, )
1137
+ return gr.update(visible = True), gr.update(visible = False), gr.update(placeholder=place), gr.update(value=label_clear)
1138
+
1139
+ def run_text(self, text, state):
1140
+ self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer)
1141
+ res = self.agent({"input": text.strip()})
1142
+ res['output'] = res['output'].replace("\\", "/")
1143
+ response = re.sub('(image/[-\w]*.png)', lambda m: f'![](file={m.group(0)})*{m.group(0)}*', res['output'])
1144
+ state = state + [(text, response)]
1145
+ print(f"\nProcessed run_text, Input text: {text}\nCurrent state: {state}\n"
1146
+ f"Current Memory: {self.agent.memory.buffer}")
1147
+ return state, state
1148
+
1149
+ def run_image(self, image, state, txt, lang='English'):
1150
+ image_filename = os.path.join('image', f"{str(uuid.uuid4())[:8]}.png")
1151
+ print("======>Auto Resize Image...")
1152
+ img = Image.open(image.name)
1153
+ width, height = img.size
1154
+ ratio = min(512 / width, 512 / height)
1155
+ width_new, height_new = (round(width * ratio), round(height * ratio))
1156
+ width_new = int(np.round(width_new / 64.0)) * 64
1157
+ height_new = int(np.round(height_new / 64.0)) * 64
1158
+ img = img.resize((width_new, height_new))
1159
+ img = img.convert('RGB')
1160
+ img.save(image_filename, "PNG")
1161
+ print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
1162
+ description = self.models['ImageCaptioning'].inference(image_filename)
1163
+ if lang == 'English':
1164
+ Human_prompt = f'\nHuman: Provide an image named {image_filename}. The description is: {description}. Understand the image using tools.\n'
1165
+ AI_prompt = "Received."
1166
+ else:
1167
+ raise NotImplementedError(f'{lang} is not supported yet')
1168
+ self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
1169
+ state = state + [(f"![](file={image_filename})*{image_filename}*", AI_prompt)]
1170
+ print(f"\nProcessed run_image, Input image: {image_filename}\nCurrent state: {state}\n"
1171
+ f"Current Memory: {self.agent.memory.buffer}")
1172
+ return state, state, f'{txt} {image_filename} '
1173
+
1174
+
1175
+ if __name__ == '__main__':
1176
+ if not os.path.exists("checkpoints"):
1177
+ os.mkdir("checkpoints")
1178
+ parser = argparse.ArgumentParser()
1179
+ parser.add_argument('--base_model', type=str, required=True, help='folder path to the vicuna with tokenizer')
1180
+ parser.add_argument('--lora_model', type=str, required=True, help='folder path to the lora model')
1181
+ parser.add_argument('--load', type=str, default='ImageCaptioning_cuda:0,Text2Image_cuda:0')
1182
+ parser.add_argument('--llm_device', type=str, default='cpu', help='device to run the llm model')
1183
+ parser.add_argument('--temperature', type=float, default=0.1, help='temperature for the llm model')
1184
+ parser.add_argument('--max_new_tokens', type=int, default=512, help='max number of new tokens to generate')
1185
+ parser.add_argument('--top_p', type=float, default=0.75, help='top_p for the llm model')
1186
+ parser.add_argument('--top_k', type=int, default=40, help='top_k for the llm model')
1187
+ parser.add_argument('--num_beams', type=int, default=1, help='num_beams for the llm model')
1188
+ args = parser.parse_args()
1189
+ load_dict = {e.split('_')[0].strip(): e.split('_')[1].strip() for e in args.load.split(',')}
1190
+ llm_kwargs = {'base_model': args.base_model,
1191
+ 'lora_model': args.lora_model,
1192
+ 'device': args.llm_device,
1193
+ 'temperature': args.temperature,
1194
+ 'max_new_tokens': args.max_new_tokens,
1195
+ 'top_p': args.top_p,
1196
+ 'top_k': args.top_k,
1197
+ 'num_beams': args.num_beams}
1198
+ bot = ConversationBot(load_dict=load_dict, llm_kwargs=llm_kwargs)
1199
+ with gr.Blocks() as demo:
1200
+ chatbot = gr.Chatbot(elem_id="chatbot", label="🦙 GPT4Tools").style(height=700)
1201
+ state = gr.State([])
1202
+ with gr.Row(visible=True) as input_raws:
1203
+ with gr.Column(scale=0.7):
1204
+ txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(
1205
+ container=False)
1206
+ with gr.Column(scale=0.15, min_width=0):
1207
+ clear = gr.Button("Clear")
1208
+ with gr.Column(scale=0.15, min_width=0):
1209
+ btn = gr.UploadButton(label="🖼️",file_types=["image"])
1210
+
1211
+ # TODO: support more language
1212
+ bot.init_agent('English')
1213
+ txt.submit(bot.run_text, [txt, state], [chatbot, state])
1214
+ txt.submit(lambda: "", None, txt)
1215
+ btn.upload(bot.run_image, [btn, state, txt], [chatbot, state, txt])
1216
+ clear.click(bot.memory.clear)
1217
+ clear.click(lambda: [], None, chatbot)
1218
+ clear.click(lambda: [], None, state)
1219
+ gr.Examples(
1220
+ examples=["Generate an image of a happy vicuna running in the grass",
1221
+ "Tell me a funny story about dog"],
1222
+ inputs=txt
1223
+ )
1224
+ demo.launch(server_name="0.0.0.0", server_port=80)
images/overview.png ADDED
llama.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Wrapper around HuggingFace APIs."""
2
+ import torch
3
+ from typing import Any, Dict, List, Mapping, Optional
4
+
5
+ from pydantic import BaseModel, Extra, root_validator
6
+
7
+ from langchain.llms.base import LLM
8
+ from langchain.llms.utils import enforce_stop_tokens
9
+ from langchain.utils import get_from_dict_or_env
10
+
11
+ from peft import PeftModel
12
+ from transformers import LlamaForCausalLM, LlamaTokenizer, GenerationConfig
13
+
14
+ DEFAULT_REPO_ID = "gpt2"
15
+ VALID_TASKS = ("text2text-generation", "text-generation")
16
+
17
+
18
+ class LlamaHuggingFace:
19
+
20
+ def __init__(self,
21
+ base_model,
22
+ lora_model,
23
+ task='text-generation',
24
+ device='cpu',
25
+ max_new_tokens=512,
26
+ temperature=0.1,
27
+ top_p=0.75,
28
+ top_k=40,
29
+ num_beams=1):
30
+ self.task = task
31
+ self.device = device
32
+ self.temperature = temperature
33
+ self.max_new_tokens = max_new_tokens
34
+ self.top_p = top_p
35
+ self.top_k = top_k
36
+ self.num_beams = num_beams
37
+ self.tokenizer = LlamaTokenizer.from_pretrained(
38
+ base_model, use_fast=False)
39
+ model = LlamaForCausalLM.from_pretrained(
40
+ base_model,
41
+ torch_dtype=torch.float16)
42
+ self.model = PeftModel.from_pretrained(
43
+ model,
44
+ lora_model,
45
+ torch_dtype=torch.float16)
46
+ self.model.to(device)
47
+
48
+ self.tokenizer.pad_token_id = 0
49
+ self.model.config.pad_token_id = 0
50
+ self.model.config.bos_token_id = 1
51
+ self.model.config.eos_token_id = 2
52
+
53
+ if device == "cpu":
54
+ self.model.float()
55
+ else:
56
+ self.model.half()
57
+ self.model.eval()
58
+
59
+ @torch.no_grad()
60
+ def __call__(self, inputs, params):
61
+ if inputs.endswith('Thought:'):
62
+ inputs = inputs[:-len('Thought:')]
63
+ inputs = inputs.replace('Observation:\n\nObservation:', 'Observation:')
64
+ inputs = inputs + '### ASSISTANT:\n'
65
+ input_ids = self.tokenizer(inputs, return_tensors="pt").to(self.device).input_ids
66
+
67
+ generation_config = GenerationConfig(
68
+ temperature=self.temperature,
69
+ top_p=self.top_p,
70
+ top_k=self.top_k,
71
+ num_beams=self.num_beams)
72
+
73
+ generate_ids = self.model.generate(
74
+ input_ids=input_ids,
75
+ generation_config=generation_config,
76
+ max_new_tokens=self.max_new_tokens)
77
+ response = self.tokenizer.batch_decode(
78
+ generate_ids,
79
+ skip_special_tokens=True,
80
+ clean_up_tokenization_spaces=False)
81
+
82
+ response = [res.replace('### ASSISTANT:\n', '') for res in response]
83
+ response = [{'generated_text': res} for res in response]
84
+ return response
85
+
86
+
87
+ class Llama(LLM, BaseModel):
88
+ """Wrapper around LLAMA models.
89
+ """
90
+
91
+ client: Any #: :meta private:
92
+ repo_id: str = DEFAULT_REPO_ID
93
+ """Model name to use."""
94
+ task: Optional[str] = "text-generation"
95
+ """Task to call the model with. Should be a task that returns `generated_text`."""
96
+ model_kwargs: Optional[dict] = None
97
+ """Key word arguments to pass to the model."""
98
+
99
+ class Config:
100
+ """Configuration for this pydantic object."""
101
+
102
+ extra = Extra.forbid
103
+
104
+ @root_validator()
105
+ def validate_environment(cls, values: Dict) -> Dict:
106
+ """Validate that api key and python package exists in environment."""
107
+ repo_id = values["repo_id"]
108
+ model_kwargs = values.get("model_kwargs")
109
+ client = LlamaHuggingFace(
110
+ base_model=model_kwargs.get("base_model"),
111
+ lora_model=model_kwargs.get("lora_model"),
112
+ task=values.get("task"),
113
+ device=model_kwargs.get("device"),
114
+ max_new_tokens=model_kwargs.get("max_new_tokens"),
115
+ temperature=model_kwargs.get("temperature"),
116
+ top_p=model_kwargs.get("top_p"),
117
+ top_k=model_kwargs.get("top_k"),
118
+ num_beams=model_kwargs.get("num_beams")
119
+ )
120
+ if client.task not in VALID_TASKS:
121
+ raise ValueError(
122
+ f"Got invalid task {client.task}, "
123
+ f"currently only {VALID_TASKS} are supported"
124
+ )
125
+ values["client"] = client
126
+ return values
127
+
128
+ @property
129
+ def _identifying_params(self) -> Mapping[str, Any]:
130
+ """Get the identifying parameters."""
131
+ _model_kwargs = self.model_kwargs or {}
132
+ return {
133
+ **{"repo_id": self.repo_id, "task": self.task},
134
+ **{"model_kwargs": _model_kwargs},
135
+ }
136
+
137
+ @property
138
+ def _llm_type(self) -> str:
139
+ """Return type of llm."""
140
+ return "huggingface_hub"
141
+
142
+ def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
143
+ """Call out to HuggingFace Hub's inference endpoint.
144
+
145
+ Args:
146
+ prompt: The prompt to pass into the model.
147
+ stop: Optional list of stop words to use when generating.
148
+
149
+ Returns:
150
+ The string generated by the model.
151
+
152
+ Example:
153
+ .. code-block:: python
154
+
155
+ response = hf("Tell me a joke.")
156
+ """
157
+ _model_kwargs = self.model_kwargs or {}
158
+ response = self.client(inputs=prompt, params=_model_kwargs)
159
+ if "error" in response:
160
+ raise ValueError(f"Error raised by inference API: {response['error']}")
161
+ if self.client.task == "text-generation":
162
+ # Text generation return includes the starter text.
163
+ text = response[0]["generated_text"][len(prompt) :]
164
+ elif self.client.task == "text2text-generation":
165
+ text = response[0]["generated_text"]
166
+ else:
167
+ raise ValueError(
168
+ f"Got invalid task {self.client.task}, "
169
+ f"currently only {VALID_TASKS} are supported"
170
+ )
171
+ if stop is not None:
172
+ # This is a bit hacky, but I can't figure out a better way to enforce
173
+ # stop tokens when making calls to huggingface_hub.
174
+ text = enforce_stop_tokens(text, stop)
175
+ return text
lora_finetune.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ from typing import List
4
+
5
+ import fire
6
+ import torch
7
+ import transformers
8
+ from datasets import load_dataset
9
+
10
+ """
11
+ Unused imports:
12
+ import torch.nn as nn
13
+ import bitsandbytes as bnb
14
+ """
15
+
16
+ from peft import (
17
+ LoraConfig,
18
+ get_peft_model,
19
+ get_peft_model_state_dict,
20
+ prepare_model_for_int8_training,
21
+ set_peft_model_state_dict,
22
+ )
23
+ from transformers import LlamaForCausalLM, LlamaTokenizer
24
+
25
+ from utils.prompter import Prompter
26
+
27
+
28
+ def train(
29
+ # model/data params
30
+ base_model: str = "", # the only required argument
31
+ data_path: str = "yahma/alpaca-cleaned",
32
+ output_dir: str = "./lora-alpaca",
33
+ # training hyperparams
34
+ batch_size: int = 128,
35
+ micro_batch_size: int = 4,
36
+ num_epochs: int = 3,
37
+ learning_rate: float = 3e-4,
38
+ cutoff_len: int = 256,
39
+ val_set_size: int = 2000,
40
+ # lora hyperparams
41
+ lora_r: int = 8,
42
+ lora_alpha: int = 16,
43
+ lora_dropout: float = 0.05,
44
+ lora_target_modules: List[str] = [
45
+ "q_proj",
46
+ "v_proj",
47
+ ],
48
+ # llm hyperparams
49
+ train_on_inputs: bool = True, # if False, masks out inputs in loss
50
+ add_eos_token: bool = False,
51
+ group_by_length: bool = False, # faster, but produces an odd training loss curve
52
+ # wandb params
53
+ wandb_project: str = "",
54
+ wandb_run_name: str = "",
55
+ wandb_watch: str = "", # options: false | gradients | all
56
+ wandb_log_model: str = "", # options: false | true
57
+ resume_from_checkpoint: str = None, # either training checkpoint or final adapter
58
+ prompt_template_name: str = "alpaca", # The prompt template to use, will default to alpaca.
59
+ ):
60
+ if int(os.environ.get("LOCAL_RANK", 0)) == 0:
61
+ print(
62
+ f"Training Alpaca-LoRA model with params:\n"
63
+ f"base_model: {base_model}\n"
64
+ f"data_path: {data_path}\n"
65
+ f"output_dir: {output_dir}\n"
66
+ f"batch_size: {batch_size}\n"
67
+ f"micro_batch_size: {micro_batch_size}\n"
68
+ f"num_epochs: {num_epochs}\n"
69
+ f"learning_rate: {learning_rate}\n"
70
+ f"cutoff_len: {cutoff_len}\n"
71
+ f"val_set_size: {val_set_size}\n"
72
+ f"lora_r: {lora_r}\n"
73
+ f"lora_alpha: {lora_alpha}\n"
74
+ f"lora_dropout: {lora_dropout}\n"
75
+ f"lora_target_modules: {lora_target_modules}\n"
76
+ f"train_on_inputs: {train_on_inputs}\n"
77
+ f"add_eos_token: {add_eos_token}\n"
78
+ f"group_by_length: {group_by_length}\n"
79
+ f"wandb_project: {wandb_project}\n"
80
+ f"wandb_run_name: {wandb_run_name}\n"
81
+ f"wandb_watch: {wandb_watch}\n"
82
+ f"wandb_log_model: {wandb_log_model}\n"
83
+ f"resume_from_checkpoint: {resume_from_checkpoint or False}\n"
84
+ f"prompt template: {prompt_template_name}\n"
85
+ )
86
+ assert (
87
+ base_model
88
+ ), "Please specify a --base_model, e.g. --base_model='huggyllama/llama-7b'"
89
+ gradient_accumulation_steps = batch_size // micro_batch_size
90
+
91
+ prompter = Prompter(prompt_template_name)
92
+
93
+ device_map = "auto"
94
+ world_size = int(os.environ.get("WORLD_SIZE", 1))
95
+ ddp = world_size != 1
96
+ if ddp:
97
+ device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}
98
+ gradient_accumulation_steps = gradient_accumulation_steps // world_size
99
+
100
+ # Check if parameter passed or if set within environ
101
+ use_wandb = len(wandb_project) > 0 or (
102
+ "WANDB_PROJECT" in os.environ and len(os.environ["WANDB_PROJECT"]) > 0
103
+ )
104
+ # Only overwrite environ if wandb param passed
105
+ if len(wandb_project) > 0:
106
+ os.environ["WANDB_PROJECT"] = wandb_project
107
+ if len(wandb_watch) > 0:
108
+ os.environ["WANDB_WATCH"] = wandb_watch
109
+ if len(wandb_log_model) > 0:
110
+ os.environ["WANDB_LOG_MODEL"] = wandb_log_model
111
+
112
+ model = LlamaForCausalLM.from_pretrained(
113
+ base_model,
114
+ load_in_8bit=True,
115
+ torch_dtype=torch.float16,
116
+ device_map=device_map,
117
+ )
118
+
119
+ tokenizer = LlamaTokenizer.from_pretrained(base_model)
120
+
121
+ tokenizer.pad_token_id = (
122
+ 0 # unk. we want this to be different from the eos token
123
+ )
124
+ tokenizer.padding_side = "left" # Allow batched inference
125
+
126
+ def tokenize(prompt, add_eos_token=True):
127
+ # there's probably a way to do this with the tokenizer settings
128
+ # but again, gotta move fast
129
+ result = tokenizer(
130
+ prompt,
131
+ truncation=True,
132
+ max_length=cutoff_len,
133
+ padding=False,
134
+ return_tensors=None,
135
+ )
136
+ if (
137
+ result["input_ids"][-1] != tokenizer.eos_token_id
138
+ and len(result["input_ids"]) < cutoff_len
139
+ and add_eos_token
140
+ ):
141
+ result["input_ids"].append(tokenizer.eos_token_id)
142
+ result["attention_mask"].append(1)
143
+
144
+ result["labels"] = result["input_ids"].copy()
145
+
146
+ return result
147
+
148
+ def generate_and_tokenize_prompt(data_point):
149
+ full_prompt = prompter.generate_prompt(
150
+ data_point["instruction"],
151
+ data_point["input"],
152
+ data_point["output"],
153
+ )
154
+ tokenized_full_prompt = tokenize(full_prompt)
155
+ if not train_on_inputs:
156
+ user_prompt = prompter.generate_prompt(
157
+ data_point["instruction"], data_point["input"]
158
+ )
159
+ tokenized_user_prompt = tokenize(
160
+ user_prompt, add_eos_token=add_eos_token
161
+ )
162
+ user_prompt_len = len(tokenized_user_prompt["input_ids"])
163
+
164
+ if add_eos_token:
165
+ user_prompt_len -= 1
166
+
167
+ tokenized_full_prompt["labels"] = [
168
+ -100
169
+ ] * user_prompt_len + tokenized_full_prompt["labels"][
170
+ user_prompt_len:
171
+ ] # could be sped up, probably
172
+ return tokenized_full_prompt
173
+
174
+ model = prepare_model_for_int8_training(model)
175
+
176
+ config = LoraConfig(
177
+ r=lora_r,
178
+ lora_alpha=lora_alpha,
179
+ target_modules=lora_target_modules,
180
+ lora_dropout=lora_dropout,
181
+ bias="none",
182
+ task_type="CAUSAL_LM",
183
+ )
184
+ model = get_peft_model(model, config)
185
+
186
+ if data_path.endswith(".json") or data_path.endswith(".jsonl"):
187
+ data = load_dataset("json", data_files=data_path)
188
+ else:
189
+ data = load_dataset(data_path)
190
+
191
+ if resume_from_checkpoint:
192
+ # Check the available weights and load them
193
+ checkpoint_name = os.path.join(
194
+ resume_from_checkpoint, "pytorch_model.bin"
195
+ ) # Full checkpoint
196
+ if not os.path.exists(checkpoint_name):
197
+ checkpoint_name = os.path.join(
198
+ resume_from_checkpoint, "adapter_model.bin"
199
+ ) # only LoRA model - LoRA config above has to fit
200
+ resume_from_checkpoint = (
201
+ False # So the trainer won't try loading its state
202
+ )
203
+ # The two files above have a different name depending on how they were saved, but are actually the same.
204
+ if os.path.exists(checkpoint_name):
205
+ print(f"Restarting from {checkpoint_name}")
206
+ adapters_weights = torch.load(checkpoint_name)
207
+ set_peft_model_state_dict(model, adapters_weights)
208
+ else:
209
+ print(f"Checkpoint {checkpoint_name} not found")
210
+
211
+ model.print_trainable_parameters() # Be more transparent about the % of trainable params.
212
+
213
+ if val_set_size > 0:
214
+ train_val = data["train"].train_test_split(
215
+ test_size=val_set_size, shuffle=True, seed=42
216
+ )
217
+ train_data = (
218
+ train_val["train"].shuffle().map(generate_and_tokenize_prompt)
219
+ )
220
+ val_data = (
221
+ train_val["test"].shuffle().map(generate_and_tokenize_prompt)
222
+ )
223
+ else:
224
+ train_data = data["train"].shuffle().map(generate_and_tokenize_prompt)
225
+ val_data = None
226
+
227
+ if not ddp and torch.cuda.device_count() > 1:
228
+ # keeps Trainer from trying its own DataParallelism when more than 1 gpu is available
229
+ model.is_parallelizable = True
230
+ model.model_parallel = True
231
+
232
+ trainer = transformers.Trainer(
233
+ model=model,
234
+ train_dataset=train_data,
235
+ eval_dataset=val_data,
236
+ args=transformers.TrainingArguments(
237
+ per_device_train_batch_size=micro_batch_size,
238
+ gradient_accumulation_steps=gradient_accumulation_steps,
239
+ warmup_steps=100,
240
+ num_train_epochs=num_epochs,
241
+ learning_rate=learning_rate,
242
+ fp16=True,
243
+ logging_steps=10,
244
+ optim="adamw_torch",
245
+ evaluation_strategy="steps" if val_set_size > 0 else "no",
246
+ save_strategy="steps",
247
+ eval_steps=200 if val_set_size > 0 else None,
248
+ save_steps=200,
249
+ output_dir=output_dir,
250
+ save_total_limit=3,
251
+ load_best_model_at_end=True if val_set_size > 0 else False,
252
+ ddp_find_unused_parameters=False if ddp else None,
253
+ group_by_length=group_by_length,
254
+ report_to="wandb" if use_wandb else None,
255
+ run_name=wandb_run_name if use_wandb else None,
256
+ ),
257
+ data_collator=transformers.DataCollatorForSeq2Seq(
258
+ tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True
259
+ ),
260
+ )
261
+ model.config.use_cache = False
262
+
263
+ old_state_dict = model.state_dict
264
+ model.state_dict = (
265
+ lambda self, *_, **__: get_peft_model_state_dict(
266
+ self, old_state_dict()
267
+ )
268
+ ).__get__(model, type(model))
269
+
270
+ if torch.__version__ >= "2" and sys.platform != "win32":
271
+ model = torch.compile(model)
272
+
273
+ trainer.train(resume_from_checkpoint=resume_from_checkpoint)
274
+
275
+ model.save_pretrained(output_dir)
276
+
277
+ print(
278
+ "\n If there's a warning about missing keys above, please disregard :)"
279
+ )
280
+
281
+
282
+ if __name__ == "__main__":
283
+ fire.Fire(train)
requirements.txt ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain==0.0.101
2
+ torch==1.13.1
3
+ torchvision==0.14.1
4
+ wget==3.2
5
+ accelerate
6
+ addict
7
+ albumentations
8
+ basicsr
9
+ controlnet-aux
10
+ diffusers
11
+ einops
12
+ gradio
13
+ imageio
14
+ imageio-ffmpeg
15
+ invisible-watermark
16
+ kornia
17
+ numpy
18
+ omegaconf
19
+ open_clip_torch
20
+ openai
21
+ opencv-python
22
+ prettytable
23
+ safetensors
24
+ streamlit
25
+ test-tube
26
+ timm
27
+ torchmetrics
28
+ webdataset
29
+ yapf
30
+ appdirs
31
+ loralib
32
+ bitsandbytes==0.37.2
33
+ black
34
+ datasets
35
+ fire
36
+ git+https://github.com/huggingface/peft.git@e536616888d51b453ed354a6f1e243fecb02ea08
37
+ transformers>=4.28.0
38
+ sentencepiece
39
+ fire
40
+ git+https://github.com/IDEA-Research/GroundingDINO.git
41
+ git+https://github.com/facebookresearch/segment-anything.git
templates/gpt4tools.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "description": "A short template for gpt4tools.",
3
+ "prompt_input": "{instruction}\n### USER: {input}\n### ASSISTANT:\n",
4
+ "prompt_no_input": "{instruction}\n### ASSISTANT:\n",
5
+ "response_split": "### ASSISTANT:"
6
+ }
utils/__init__.py ADDED
File without changes
utils/prompter.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A dedicated helper to manage templates and prompt building.
3
+ """
4
+
5
+ import json
6
+ import os.path as osp
7
+ from typing import Union
8
+
9
+
10
+ class Prompter(object):
11
+ __slots__ = ("template", "_verbose")
12
+
13
+ def __init__(self, template_name: str = "", verbose: bool = False):
14
+ self._verbose = verbose
15
+ if not template_name:
16
+ # Enforce the default here, so the constructor can be called with '' and will not break.
17
+ template_name = "alpaca"
18
+ file_name = osp.join("templates", f"{template_name}.json")
19
+ if not osp.exists(file_name):
20
+ raise ValueError(f"Can't read {file_name}")
21
+ with open(file_name) as fp:
22
+ self.template = json.load(fp)
23
+ if self._verbose:
24
+ print(
25
+ f"Using prompt template {template_name}: {self.template['description']}"
26
+ )
27
+
28
+ def generate_prompt(
29
+ self,
30
+ instruction: str,
31
+ input: Union[None, str] = None,
32
+ label: Union[None, str] = None,
33
+ ) -> str:
34
+ # returns the full prompt from instruction and optional input
35
+ # if a label (=response, =output) is provided, it's also appended.
36
+ if input:
37
+ res = self.template["prompt_input"].format(
38
+ instruction=instruction, input=input
39
+ )
40
+ else:
41
+ res = self.template["prompt_no_input"].format(
42
+ instruction=instruction
43
+ )
44
+ if label:
45
+ res = f"{res}{label}"
46
+ if self._verbose:
47
+ print(res)
48
+ return res
49
+
50
+ def get_response(self, output: str) -> str:
51
+ return output.split(self.template["response_split"])[1].strip()