Tested and should be ready!
This commit is contained in:
parent
a2a4c1d117
commit
399c3d124e
12
README.md
12
README.md
|
|
@ -1,7 +1,7 @@
|
|||
# Alpaca Lora 4bit
|
||||
Made some adjust for the code in peft and gptq for llama, and make it possible for lora finetuning with a 4 bits base model. The same adjustment can be made for 2, 3 and 8 bits.
|
||||
<br>
|
||||
* Install Manual by s4rduk4r: https://github.com/s4rduk4r/alpaca_lora_4bit_readme/blob/main/README.md
|
||||
* Install Manual by s4rduk4r: https://github.com/s4rduk4r/alpaca_lora_4bit_readme/blob/main/README.md (**NOTE:** don't use the install script, use the requirements.txt instead.)
|
||||
|
||||
# Update Logs
|
||||
* Resolved numerically unstable issue
|
||||
|
|
@ -28,16 +28,10 @@ peft: https://github.com/huggingface/peft.git<br>
|
|||
~copy files from GPTQ-for-LLaMa into GPTQ-for-LLaMa path and re-compile cuda extension~<br>
|
||||
~copy files from peft/tuners/lora.py to peft path, replace it~<br>
|
||||
|
||||
Linux:
|
||||
**NOTE:** Install scripts are no longer needed! requirements.txt now pulls from forks with the necessary patches.
|
||||
|
||||
```
|
||||
./install.sh
|
||||
```
|
||||
|
||||
Windows:
|
||||
|
||||
```
|
||||
./install.bat
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
# Finetune
|
||||
|
|
|
|||
|
|
@ -18,9 +18,6 @@
|
|||
"""
|
||||
|
||||
import sys
|
||||
sys.path.insert(0, './repository/transformers/src')
|
||||
sys.path.insert(0, './repository/GPTQ-for-LLaMa')
|
||||
sys.path.insert(0, './repository/peft/src')
|
||||
|
||||
import peft
|
||||
import peft.tuners.lora
|
||||
|
|
|
|||
|
|
@ -1,8 +1,5 @@
|
|||
import os
|
||||
import sys
|
||||
sys.path.insert(0, './repository/transformers/src')
|
||||
sys.path.insert(0, './repository/GPTQ-for-LLaMa')
|
||||
sys.path.insert(0, './repository/peft/src')
|
||||
import time
|
||||
import torch
|
||||
from autograd_4bit import load_llama_model_4bit_low_ram
|
||||
|
|
|
|||
27
install.bat
27
install.bat
|
|
@ -1,27 +0,0 @@
|
|||
REM This is a install script for Alpaca_LoRA_4bit
|
||||
|
||||
REM makedir ./repository/ if not exists
|
||||
if not exist .\repository mkdir .\repository
|
||||
|
||||
REM Clone repos into current repository into ./repository/
|
||||
git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git ./repository/GPTQ-for-LLaMa
|
||||
git clone https://github.com/huggingface/peft.git ./repository/peft
|
||||
git clone https://github.com/huggingface/transformers.git ./repository/transformers
|
||||
|
||||
REM replace ./repository/peft/src/peft/tuners/lora.py with ./peft/tuners/lora.py
|
||||
copy .\peft\tuners\lora.py .\repository\peft\src\peft\tuners\lora.py /Y
|
||||
|
||||
REM replace ./repository/GPTQ-for-LLaMa/quant_cuda.cpp and quant_cuda_kernel.cu with ./GPTQ-for-LLaMa/quant_cuda.cpp and quant_cuda_kernel.cu
|
||||
copy .\GPTQ-for-LLaMa\quant_cuda.cpp .\repository\GPTQ-for-LLaMa\quant_cuda.cpp /Y
|
||||
copy .\GPTQ-for-LLaMa\quant_cuda_kernel.cu .\repository\GPTQ-for-LLaMa\quant_cuda_kernel.cu /Y
|
||||
|
||||
REM copy files into ./repository/GPTQ-for-LLaMa/
|
||||
copy .\GPTQ-for-LLaMa\autograd_4bit.py .\repository\GPTQ-for-LLaMa\autograd_4bit.py /Y
|
||||
copy .\GPTQ-for-LLaMa\gradient_checkpointing.py .\repository\GPTQ-for-LLaMa\gradient_checkpointing.py /Y
|
||||
|
||||
REM install quant_cuda
|
||||
cd .\repository\GPTQ-for-LLaMa
|
||||
python setup_cuda.py install
|
||||
|
||||
echo "Install finished"
|
||||
@pause
|
||||
31
install.sh
31
install.sh
|
|
@ -1,31 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This is an install script for Alpaca_LoRA_4bit
|
||||
|
||||
# makedir ./repository/ if not exists
|
||||
if [ ! -d "./repository" ]; then
|
||||
mkdir ./repository
|
||||
fi
|
||||
|
||||
# Clone repos into current repository into ./repository/
|
||||
git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git ./repository/GPTQ-for-LLaMa
|
||||
git clone https://github.com/huggingface/peft.git ./repository/peft
|
||||
git clone https://github.com/huggingface/transformers.git ./repository/transformers
|
||||
|
||||
# Replace ./repository/peft/src/peft/tuners/lora.py with ./peft/tuners/lora.py
|
||||
cp ./peft/tuners/lora.py ./repository/peft/src/peft/tuners/lora.py
|
||||
|
||||
# Replace ./repository/GPTQ-for-LLaMa/quant_cuda.cpp and quant_cuda_kernel.cu with ./GPTQ-for-LLaMa/quant_cuda.cpp and quant_cuda_kernel.cu
|
||||
cp ./GPTQ-for-LLaMa/quant_cuda.cpp ./repository/GPTQ-for-LLaMa/quant_cuda.cpp
|
||||
cp ./GPTQ-for-LLaMa/quant_cuda_kernel.cu ./repository/GPTQ-for-LLaMa/quant_cuda_kernel.cu
|
||||
|
||||
# Copy files into ./repository/GPTQ-for-LLaMa/
|
||||
cp ./GPTQ-for-LLaMa/autograd_4bit.py ./repository/GPTQ-for-LLaMa/autograd_4bit.py
|
||||
cp ./GPTQ-for-LLaMa/gradient_checkpointing.py ./repository/GPTQ-for-LLaMa/gradient_checkpointing.py
|
||||
|
||||
# Install quant_cuda and cd into ./repository/GPTQ-for-LLaMa
|
||||
cd ./repository/GPTQ-for-LLaMa
|
||||
python setup_cuda.py install
|
||||
|
||||
echo "Install finished"
|
||||
read -p "Press [Enter] to continue..."
|
||||
|
|
@ -1,7 +1,3 @@
|
|||
import sys
|
||||
sys.path.insert(0, '../repository/transformers/src')
|
||||
sys.path.insert(0, '../repository/GPTQ-for-LLaMa')
|
||||
sys.path.insert(0, '../repository/peft/src')
|
||||
import time
|
||||
import torch
|
||||
import autograd_4bit
|
||||
|
|
|
|||
Loading…
Reference in New Issue