Spaces:
Runtime error
Runtime error
zetavg
commited on
rename project
Browse files- LLaMA_LoRA.ipynb +5 -5
- README.md +10 -10
- llama_lora/globals.py +1 -1
- llama_lora/ui/main_page.py +1 -1
LLaMA_LoRA.ipynb
CHANGED
|
@@ -27,13 +27,13 @@
|
|
| 27 |
"colab_type": "text"
|
| 28 |
},
|
| 29 |
"source": [
|
| 30 |
-
"<a href=\"https://colab.research.google.com/github/zetavg/LLaMA-LoRA/blob/main/LLaMA_LoRA.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
| 31 |
]
|
| 32 |
},
|
| 33 |
{
|
| 34 |
"cell_type": "markdown",
|
| 35 |
"source": [
|
| 36 |
-
"# π¦ποΈ LLaMA-LoRA\n",
|
| 37 |
"\n",
|
| 38 |
"TL;DR: **Runtime > Run All** (`β/Ctrl+F9`). Takes about 5 minutes to start. You will be promped to authorize Google Drive access."
|
| 39 |
],
|
|
@@ -72,9 +72,9 @@
|
|
| 72 |
"# @title Git/Project { display-mode: \"form\", run: \"auto\" }\n",
|
| 73 |
"# @markdown Project settings.\n",
|
| 74 |
"\n",
|
| 75 |
-
"# @markdown The URL of the LLaMA-LoRA project<br> (default: `https://github.com/zetavg/
|
| 76 |
-
"llama_lora_project_url = \"https://github.com/zetavg/
|
| 77 |
-
"# @markdown The branch to use for LLaMA-LoRA project:\n",
|
| 78 |
"llama_lora_project_branch = \"main\" # @param {type:\"string\"}\n",
|
| 79 |
"\n",
|
| 80 |
"# # @markdown Forces the local directory to be updated by the remote branch:\n",
|
|
|
|
| 27 |
"colab_type": "text"
|
| 28 |
},
|
| 29 |
"source": [
|
| 30 |
+
"<a href=\"https://colab.research.google.com/github/zetavg/LLaMA-LoRA-Tuner/blob/main/LLaMA_LoRA.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
| 31 |
]
|
| 32 |
},
|
| 33 |
{
|
| 34 |
"cell_type": "markdown",
|
| 35 |
"source": [
|
| 36 |
+
"# π¦ποΈ LLaMA-LoRA Tuner\n",
|
| 37 |
"\n",
|
| 38 |
"TL;DR: **Runtime > Run All** (`β/Ctrl+F9`). Takes about 5 minutes to start. You will be promped to authorize Google Drive access."
|
| 39 |
],
|
|
|
|
| 72 |
"# @title Git/Project { display-mode: \"form\", run: \"auto\" }\n",
|
| 73 |
"# @markdown Project settings.\n",
|
| 74 |
"\n",
|
| 75 |
+
"# @markdown The URL of the LLaMA-LoRA-Tuner project<br> (default: `https://github.com/zetavg/LLaMA-LoRA-Tuner.git`):\n",
|
| 76 |
+
"llama_lora_project_url = \"https://github.com/zetavg/LLaMA-LoRA-Tuner.git\" # @param {type:\"string\"}\n",
|
| 77 |
+
"# @markdown The branch to use for LLaMA-LoRA-Tuner project:\n",
|
| 78 |
"llama_lora_project_branch = \"main\" # @param {type:\"string\"}\n",
|
| 79 |
"\n",
|
| 80 |
"# # @markdown Forces the local directory to be updated by the remote branch:\n",
|
README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
-
# π¦ποΈ LLaMA-LoRA
|
| 2 |
|
| 3 |
-
<a href="https://colab.research.google.com/github/zetavg/LLaMA-LoRA/blob/main/LLaMA_LoRA.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
|
| 4 |
|
| 5 |
Making evaluating and fine-tuning LLaMA models with low-rank adaptation (LoRA) easy.
|
| 6 |
|
|
@@ -27,7 +27,7 @@ There are various ways to run this app:
|
|
| 27 |
|
| 28 |
### Run On Google Colab
|
| 29 |
|
| 30 |
-
Open [this Colab Notebook](https://colab.research.google.com/github/zetavg/LLaMA-LoRA/blob/main/LLaMA_LoRA.ipynb) and select **Runtime > Run All** (`β/Ctrl+F9`).
|
| 31 |
|
| 32 |
You will be prompted to authorize Google Drive access, as Google Drive will be used to store your data. See the "Config"/"Google Drive" section for settings and more info.
|
| 33 |
|
|
@@ -38,7 +38,7 @@ After approximately 5 minutes of running, you will see the public URL in the out
|
|
| 38 |
After following the [installation guide of SkyPilot](https://skypilot.readthedocs.io/en/latest/getting-started/installation.html), create a `.yaml` to define a task for running the app:
|
| 39 |
|
| 40 |
```yaml
|
| 41 |
-
# llama-lora-
|
| 42 |
|
| 43 |
resources:
|
| 44 |
accelerators: A10:1 # 1x NVIDIA A10 GPU
|
|
@@ -49,13 +49,13 @@ file_mounts:
|
|
| 49 |
# (to store train datasets trained models)
|
| 50 |
# See https://skypilot.readthedocs.io/en/latest/reference/storage.html for details.
|
| 51 |
/data:
|
| 52 |
-
name: llama-lora-
|
| 53 |
store: gcs # Could be either of [s3, gcs]
|
| 54 |
mode: MOUNT
|
| 55 |
|
| 56 |
-
# Clone the LLaMA-LoRA repo and install its dependencies.
|
| 57 |
setup: |
|
| 58 |
-
git clone https://github.com/zetavg/LLaMA-LoRA.git llama_lora
|
| 59 |
cd llama_lora && pip install -r requirements.lock.txt
|
| 60 |
cd ..
|
| 61 |
echo 'Dependencies installed.'
|
|
@@ -69,7 +69,7 @@ run: |
|
|
| 69 |
Then launch a cluster to run the task:
|
| 70 |
|
| 71 |
```
|
| 72 |
-
sky launch -c llama-lora-
|
| 73 |
```
|
| 74 |
|
| 75 |
`-c ...` is an optional flag to specify a cluster name. If not specified, SkyPilot will automatically generate one.
|
|
@@ -86,8 +86,8 @@ When you are done, run `sky stop <cluster_name>` to stop the cluster. To termina
|
|
| 86 |
<summary>Prepare environment with conda</summary>
|
| 87 |
|
| 88 |
```bash
|
| 89 |
-
conda create -y python=3.8 -n llama-lora-
|
| 90 |
-
conda activate llama-lora-
|
| 91 |
```
|
| 92 |
</details>
|
| 93 |
|
|
|
|
| 1 |
+
# π¦ποΈ LLaMA-LoRA Tuner
|
| 2 |
|
| 3 |
+
<a href="https://colab.research.google.com/github/zetavg/LLaMA-LoRA-Tuner/blob/main/LLaMA_LoRA.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
|
| 4 |
|
| 5 |
Making evaluating and fine-tuning LLaMA models with low-rank adaptation (LoRA) easy.
|
| 6 |
|
|
|
|
| 27 |
|
| 28 |
### Run On Google Colab
|
| 29 |
|
| 30 |
+
Open [this Colab Notebook](https://colab.research.google.com/github/zetavg/LLaMA-LoRA-Tuner/blob/main/LLaMA_LoRA.ipynb) and select **Runtime > Run All** (`β/Ctrl+F9`).
|
| 31 |
|
| 32 |
You will be prompted to authorize Google Drive access, as Google Drive will be used to store your data. See the "Config"/"Google Drive" section for settings and more info.
|
| 33 |
|
|
|
|
| 38 |
After following the [installation guide of SkyPilot](https://skypilot.readthedocs.io/en/latest/getting-started/installation.html), create a `.yaml` to define a task for running the app:
|
| 39 |
|
| 40 |
```yaml
|
| 41 |
+
# llama-lora-tuner.yaml
|
| 42 |
|
| 43 |
resources:
|
| 44 |
accelerators: A10:1 # 1x NVIDIA A10 GPU
|
|
|
|
| 49 |
# (to store train datasets trained models)
|
| 50 |
# See https://skypilot.readthedocs.io/en/latest/reference/storage.html for details.
|
| 51 |
/data:
|
| 52 |
+
name: llama-lora-tuner-data # Make sure this name is unique or you own this bucket. If it does not exists, SkyPilot will try to create a bucket with this name.
|
| 53 |
store: gcs # Could be either of [s3, gcs]
|
| 54 |
mode: MOUNT
|
| 55 |
|
| 56 |
+
# Clone the LLaMA-LoRA Tuner repo and install its dependencies.
|
| 57 |
setup: |
|
| 58 |
+
git clone https://github.com/zetavg/LLaMA-LoRA-Tuner.git llama_lora
|
| 59 |
cd llama_lora && pip install -r requirements.lock.txt
|
| 60 |
cd ..
|
| 61 |
echo 'Dependencies installed.'
|
|
|
|
| 69 |
Then launch a cluster to run the task:
|
| 70 |
|
| 71 |
```
|
| 72 |
+
sky launch -c llama-lora-tuner llama-lora-tuner.yaml
|
| 73 |
```
|
| 74 |
|
| 75 |
`-c ...` is an optional flag to specify a cluster name. If not specified, SkyPilot will automatically generate one.
|
|
|
|
| 86 |
<summary>Prepare environment with conda</summary>
|
| 87 |
|
| 88 |
```bash
|
| 89 |
+
conda create -y python=3.8 -n llama-lora-tuner
|
| 90 |
+
conda activate llama-lora-tuner
|
| 91 |
```
|
| 92 |
</details>
|
| 93 |
|
llama_lora/globals.py
CHANGED
|
@@ -41,7 +41,7 @@ class Global:
|
|
| 41 |
gpu_total_memory = None
|
| 42 |
|
| 43 |
# UI related
|
| 44 |
-
ui_title: str = "LLaMA-LoRA"
|
| 45 |
ui_emoji: str = "π¦ποΈ"
|
| 46 |
ui_subtitle: str = "Toolkit for evaluating and fine-tuning LLaMA models with low-rank adaptation (LoRA)."
|
| 47 |
ui_show_sys_info: bool = True
|
|
|
|
| 41 |
gpu_total_memory = None
|
| 42 |
|
| 43 |
# UI related
|
| 44 |
+
ui_title: str = "LLaMA-LoRA Tuner"
|
| 45 |
ui_emoji: str = "π¦ποΈ"
|
| 46 |
ui_subtitle: str = "Toolkit for evaluating and fine-tuning LLaMA models with low-rank adaptation (LoRA)."
|
| 47 |
ui_show_sys_info: bool = True
|
llama_lora/ui/main_page.py
CHANGED
|
@@ -30,7 +30,7 @@ def main_page():
|
|
| 30 |
tokenizer_ui()
|
| 31 |
info = []
|
| 32 |
if Global.version:
|
| 33 |
-
info.append(f"LLaMA-LoRA `{Global.version}`")
|
| 34 |
info.append(f"Base model: `{Global.base_model}`")
|
| 35 |
if Global.ui_show_sys_info:
|
| 36 |
info.append(f"Data dir: `{Global.data_dir}`")
|
|
|
|
| 30 |
tokenizer_ui()
|
| 31 |
info = []
|
| 32 |
if Global.version:
|
| 33 |
+
info.append(f"LLaMA-LoRA Tuner `{Global.version}`")
|
| 34 |
info.append(f"Base model: `{Global.base_model}`")
|
| 35 |
if Global.ui_show_sys_info:
|
| 36 |
info.append(f"Data dir: `{Global.data_dir}`")
|