From 3b18aa1cc625b308b405b846658ac8a22b327476 Mon Sep 17 00:00:00 2001 From: John Smith Date: Thu, 20 Apr 2023 09:51:57 +0800 Subject: [PATCH] fix bug and remove bnb --- autograd_4bit.py | 2 +- monkeypatch/peft_tuners_lora_monkey_patch.py | 1 - requirements.txt | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/autograd_4bit.py b/autograd_4bit.py index 5a47921..708e134 100644 --- a/autograd_4bit.py +++ b/autograd_4bit.py @@ -249,7 +249,7 @@ def load_llama_model_4bit_low_ram_and_offload(config_path, model_path, lora_path if lora_path is not None: from peft import PeftModel from monkeypatch.peft_tuners_lora_monkey_patch import Linear4bitLt - model = PeftModel.from_pretrained(model, lora_path, device_map={'': 'cpu'}, torch_dtype=torch.float32) + model = PeftModel.from_pretrained(model, lora_path, device_map={'': 'cpu'}, torch_dtype=torch.float32, is_trainable=True) print(Style.BRIGHT + Fore.GREEN + '{} Lora Applied.'.format(lora_path)) model.seqlen = seqlen diff --git a/monkeypatch/peft_tuners_lora_monkey_patch.py b/monkeypatch/peft_tuners_lora_monkey_patch.py index 4e6b677..c89dbe4 100644 --- a/monkeypatch/peft_tuners_lora_monkey_patch.py +++ b/monkeypatch/peft_tuners_lora_monkey_patch.py @@ -2,7 +2,6 @@ import math import re import torch import warnings -import bitsandbytes as bnb from peft.tuners import lora from peft.tuners.lora import is_bnb_available, Linear, Linear8bitLt, LoraLayer diff --git a/requirements.txt b/requirements.txt index e536fe5..e7b9ed1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ torch accelerate -bitsandbytes datasets sentencepiece safetensors