diff --git a/README.md b/README.md
index 7f64534..fb0326d 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
# Alpaca Lora 4bit
Made some adjust for the code in peft and gptq for llama, and make it possible for lora finetuning with a 4 bits base model. The same adjustment can be made for 2, 3 and 8 bits.
-* Install Manual by s4rduk4r: https://github.com/s4rduk4r/alpaca_lora_4bit_readme/blob/main/README.md
+* Install Manual by s4rduk4r: https://github.com/s4rduk4r/alpaca_lora_4bit_readme/blob/main/README.md (**NOTE:** don't use the install script, use the requirements.txt instead.)
# Update Logs
* Resolved numerically unstable issue
@@ -28,16 +28,10 @@ peft: https://github.com/huggingface/peft.git
~copy files from GPTQ-for-LLaMa into GPTQ-for-LLaMa path and re-compile cuda extension~
~copy files from peft/tuners/lora.py to peft path, replace it~
-Linux:
+**NOTE:** Install scripts are no longer needed! requirements.txt now pulls from forks with the necessary patches.
```
-./install.sh
-```
-
-Windows:
-
-```
-./install.bat
+pip install -r requirements.txt
```
# Finetune
diff --git a/finetune.py b/finetune.py
index 3f8821e..3970cc8 100644
--- a/finetune.py
+++ b/finetune.py
@@ -18,9 +18,6 @@
"""
import sys
-sys.path.insert(0, './repository/transformers/src')
-sys.path.insert(0, './repository/GPTQ-for-LLaMa')
-sys.path.insert(0, './repository/peft/src')
import peft
import peft.tuners.lora
diff --git a/inference.py b/inference.py
index 0a25272..7b172ae 100644
--- a/inference.py
+++ b/inference.py
@@ -1,8 +1,5 @@
import os
import sys
-sys.path.insert(0, './repository/transformers/src')
-sys.path.insert(0, './repository/GPTQ-for-LLaMa')
-sys.path.insert(0, './repository/peft/src')
import time
import torch
from autograd_4bit import load_llama_model_4bit_low_ram
diff --git a/install.bat b/install.bat
deleted file mode 100644
index bd79a55..0000000
--- a/install.bat
+++ /dev/null
@@ -1,27 +0,0 @@
-REM This is a install script for Alpaca_LoRA_4bit
-
-REM makedir ./repository/ if not exists
-if not exist .\repository mkdir .\repository
-
-REM Clone repos into current repository into ./repository/
-git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git ./repository/GPTQ-for-LLaMa
-git clone https://github.com/huggingface/peft.git ./repository/peft
-git clone https://github.com/huggingface/transformers.git ./repository/transformers
-
-REM replace ./repository/peft/src/peft/tuners/lora.py with ./peft/tuners/lora.py
-copy .\peft\tuners\lora.py .\repository\peft\src\peft\tuners\lora.py /Y
-
-REM replace ./repository/GPTQ-for-LLaMa/quant_cuda.cpp and quant_cuda_kernel.cu with ./GPTQ-for-LLaMa/quant_cuda.cpp and quant_cuda_kernel.cu
-copy .\GPTQ-for-LLaMa\quant_cuda.cpp .\repository\GPTQ-for-LLaMa\quant_cuda.cpp /Y
-copy .\GPTQ-for-LLaMa\quant_cuda_kernel.cu .\repository\GPTQ-for-LLaMa\quant_cuda_kernel.cu /Y
-
-REM copy files into ./repository/GPTQ-for-LLaMa/
-copy .\GPTQ-for-LLaMa\autograd_4bit.py .\repository\GPTQ-for-LLaMa\autograd_4bit.py /Y
-copy .\GPTQ-for-LLaMa\gradient_checkpointing.py .\repository\GPTQ-for-LLaMa\gradient_checkpointing.py /Y
-
-REM install quant_cuda
-cd .\repository\GPTQ-for-LLaMa
-python setup_cuda.py install
-
-echo "Install finished"
-@pause
diff --git a/install.sh b/install.sh
deleted file mode 100644
index fd5a67c..0000000
--- a/install.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-# This is an install script for Alpaca_LoRA_4bit
-
-# makedir ./repository/ if not exists
-if [ ! -d "./repository" ]; then
- mkdir ./repository
-fi
-
-# Clone repos into current repository into ./repository/
-git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git ./repository/GPTQ-for-LLaMa
-git clone https://github.com/huggingface/peft.git ./repository/peft
-git clone https://github.com/huggingface/transformers.git ./repository/transformers
-
-# Replace ./repository/peft/src/peft/tuners/lora.py with ./peft/tuners/lora.py
-cp ./peft/tuners/lora.py ./repository/peft/src/peft/tuners/lora.py
-
-# Replace ./repository/GPTQ-for-LLaMa/quant_cuda.cpp and quant_cuda_kernel.cu with ./GPTQ-for-LLaMa/quant_cuda.cpp and quant_cuda_kernel.cu
-cp ./GPTQ-for-LLaMa/quant_cuda.cpp ./repository/GPTQ-for-LLaMa/quant_cuda.cpp
-cp ./GPTQ-for-LLaMa/quant_cuda_kernel.cu ./repository/GPTQ-for-LLaMa/quant_cuda_kernel.cu
-
-# Copy files into ./repository/GPTQ-for-LLaMa/
-cp ./GPTQ-for-LLaMa/autograd_4bit.py ./repository/GPTQ-for-LLaMa/autograd_4bit.py
-cp ./GPTQ-for-LLaMa/gradient_checkpointing.py ./repository/GPTQ-for-LLaMa/gradient_checkpointing.py
-
-# Install quant_cuda and cd into ./repository/GPTQ-for-LLaMa
-cd ./repository/GPTQ-for-LLaMa
-python setup_cuda.py install
-
-echo "Install finished"
-read -p "Press [Enter] to continue..."
diff --git a/text-generation-webui/custom_monkey_patch.py b/text-generation-webui/custom_monkey_patch.py
index 83bbddb..139d4f6 100644
--- a/text-generation-webui/custom_monkey_patch.py
+++ b/text-generation-webui/custom_monkey_patch.py
@@ -1,7 +1,3 @@
-import sys
-sys.path.insert(0, '../repository/transformers/src')
-sys.path.insert(0, '../repository/GPTQ-for-LLaMa')
-sys.path.insert(0, '../repository/peft/src')
import time
import torch
import autograd_4bit