Compare commits
No commits in common. "2f704b93c961bf202937b10aac9322b092afdce0" and "d6791790ed2e967f968b528e3d7660acc53fa940" have entirely different histories.
2f704b93c9
...
d6791790ed
58
README.md
58
README.md
|
|
@ -5,59 +5,6 @@ Made some adjust for the code in peft and gptq for llama, and make it possible f
|
||||||
pip install git+https://github.com/johnsmith0031/alpaca_lora_4bit@winglian-setup_pip
|
pip install git+https://github.com/johnsmith0031/alpaca_lora_4bit@winglian-setup_pip
|
||||||
```
|
```
|
||||||
|
|
||||||
# Model Server
|
|
||||||
|
|
||||||
Better inference performance with text_generation_webui, about <b>40% faster</b>
|
|
||||||
|
|
||||||
Simple expriment results:<br>
|
|
||||||
7b model with groupsize=128 no act-order<br>
|
|
||||||
improved from 13 tokens/sec to 20 tokens/sec
|
|
||||||
|
|
||||||
<b>Step:</b>
|
|
||||||
1. run model server process
|
|
||||||
2. run webui process with monkey patch
|
|
||||||
|
|
||||||
<b>Example</b>
|
|
||||||
|
|
||||||
run_server.sh
|
|
||||||
```
|
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
export PYTHONPATH=$PYTHONPATH:./
|
|
||||||
|
|
||||||
CONFIG_PATH=
|
|
||||||
MODEL_PATH=
|
|
||||||
LORA_PATH=
|
|
||||||
|
|
||||||
VENV_PATH=
|
|
||||||
source $VENV_PATH/bin/activate
|
|
||||||
python ./scripts/run_server.py --config_path $CONFIG_PATH --model_path $MODEL_PATH --lora_path $LORA_PATH --groupsize=128 --quant_attn --port 5555 --pub_port 5556
|
|
||||||
```
|
|
||||||
|
|
||||||
run_webui.sh
|
|
||||||
```
|
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
if [ -f "server2.py" ]; then
|
|
||||||
rm server2.py
|
|
||||||
fi
|
|
||||||
echo "import custom_model_server_monkey_patch" > server2.py
|
|
||||||
cat server.py >> server2.py
|
|
||||||
|
|
||||||
export PYTHONPATH=$PYTHONPATH:../
|
|
||||||
|
|
||||||
VENV_PATH=
|
|
||||||
source $VENV_PATH/bin/activate
|
|
||||||
python server2.py --chat --listen
|
|
||||||
```
|
|
||||||
|
|
||||||
<b>Note:</b>
|
|
||||||
* quant_attn only support torch 2.0+
|
|
||||||
* lora support is only for simple lora with only q_proj and v_proj
|
|
||||||
* this patch breaks model selection, lora selection and training feature in webui
|
|
||||||
|
|
||||||
# Docker
|
|
||||||
|
|
||||||
## Quick start for running the chat UI
|
## Quick start for running the chat UI
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
@ -96,13 +43,12 @@ It's fast on a 3070 Ti mobile. Uses 5-6 GB of GPU RAM.
|
||||||
* Removed bitsandbytes from requirements
|
* Removed bitsandbytes from requirements
|
||||||
* Added pip installable branch based on winglian's PR
|
* Added pip installable branch based on winglian's PR
|
||||||
* Added cuda backend quant attention and fused mlp from GPTQ_For_Llama.
|
* Added cuda backend quant attention and fused mlp from GPTQ_For_Llama.
|
||||||
* Added lora patch for GPTQ_For_Llama repo triton backend.<br>
|
* Added lora patch for GPTQ_For_Llama triton backend.
|
||||||
Usage:
|
|
||||||
```
|
```
|
||||||
from monkeypatch.gptq_for_llala_lora_monkey_patch import inject_lora_layers
|
from monkeypatch.gptq_for_llala_lora_monkey_patch import inject_lora_layers
|
||||||
inject_lora_layers(model, lora_path, device, dtype)
|
inject_lora_layers(model, lora_path, device, dtype)
|
||||||
```
|
```
|
||||||
* Added Model server for better inference performance with webui (40% faster than original webui which runs model and gradio in same process)
|
|
||||||
|
|
||||||
# Requirements
|
# Requirements
|
||||||
gptq-for-llama <br>
|
gptq-for-llama <br>
|
||||||
|
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
from .server import ModelClient, ModelServer, _SentinelTokenStoppingCriteria
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from model_server import ModelServer
|
from server import ModelServer
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
from .server import ModelClient, ModelServer
|
||||||
|
|
@ -1,7 +1,8 @@
|
||||||
|
from .. import autograd_4bit
|
||||||
import time
|
import time
|
||||||
import torch
|
import torch
|
||||||
from autograd_4bit import load_llama_model_4bit_low_ram, Autograd4bitQuantLinear
|
from ..autograd_4bit import load_llama_model_4bit_low_ram, Autograd4bitQuantLinear
|
||||||
from model_attn_mlp_patch import make_quant_attn, make_fused_mlp, inject_lora_layers
|
from alpaca_lora_4bit.model_attn_mlp_patch import make_quant_attn, make_fused_mlp, inject_lora_layers
|
||||||
import zmq
|
import zmq
|
||||||
from transformers import StoppingCriteria, StoppingCriteriaList
|
from transformers import StoppingCriteria, StoppingCriteriaList
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
@ -23,28 +24,6 @@ def clear_torch_cache():
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
|
||||||
# Copied from https://github.com/PygmalionAI/gradio-ui/
|
|
||||||
class _SentinelTokenStoppingCriteria(StoppingCriteria):
|
|
||||||
|
|
||||||
def __init__(self, sentinel_token_ids: list, starting_idx: int):
|
|
||||||
StoppingCriteria.__init__(self)
|
|
||||||
self.sentinel_token_ids = sentinel_token_ids
|
|
||||||
self.starting_idx = starting_idx
|
|
||||||
|
|
||||||
def __call__(self, input_ids: torch.LongTensor, _scores: torch.FloatTensor) -> bool:
|
|
||||||
for sample in input_ids:
|
|
||||||
trimmed_sample = sample[self.starting_idx:]
|
|
||||||
|
|
||||||
for i in range(len(self.sentinel_token_ids)):
|
|
||||||
# Can't unfold, output is still too tiny. Skip.
|
|
||||||
if trimmed_sample.shape[-1] < self.sentinel_token_ids[i].shape[-1]:
|
|
||||||
continue
|
|
||||||
for window in trimmed_sample.unfold(0, self.sentinel_token_ids[i].shape[-1], 1):
|
|
||||||
if torch.all(torch.eq(self.sentinel_token_ids[i][0], window)):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
# Copy from text-generation-webui/modules/callbacks.py
|
# Copy from text-generation-webui/modules/callbacks.py
|
||||||
class Stream(StoppingCriteria):
|
class Stream(StoppingCriteria):
|
||||||
def __init__(self, callback_func=None):
|
def __init__(self, callback_func=None):
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from model_server import ModelClient
|
from server import ModelClient
|
||||||
from transformers import LlamaTokenizer
|
from transformers import LlamaTokenizer
|
||||||
|
|
||||||
def load_model_llama(*args, **kwargs):
|
def load_model_llama(*args, **kwargs):
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
import modules.text_generation
|
import modules.text_generation
|
||||||
from modules.text_generation import *
|
from modules.text_generation import *
|
||||||
from model_server import _SentinelTokenStoppingCriteria
|
from modules.callbacks import _SentinelTokenStoppingCriteria
|
||||||
|
|
||||||
def generate_reply_patched(question, state, eos_token=None, stopping_strings=[]):
|
def generate_reply_patched(question, state, eos_token=None, stopping_strings=[]):
|
||||||
if shared.model_name == 'None' or shared.model is None:
|
if shared.model_name == 'None' or shared.model is None:
|
||||||
|
|
@ -115,8 +115,34 @@ def generate_reply_patched(question, state, eos_token=None, stopping_strings=[])
|
||||||
# Stream the reply 1 token at a time.
|
# Stream the reply 1 token at a time.
|
||||||
# This is based on the trick of using 'stopping_criteria' to create an iterator.
|
# This is based on the trick of using 'stopping_criteria' to create an iterator.
|
||||||
elif not shared.args.flexgen:
|
elif not shared.args.flexgen:
|
||||||
|
|
||||||
# Repalced Original with another socket server
|
# def generate_with_callback(callback=None, **kwargs):
|
||||||
|
# kwargs['stopping_criteria'].append(Stream(callback_func=callback))
|
||||||
|
# clear_torch_cache()
|
||||||
|
# with torch.no_grad():
|
||||||
|
# shared.model.generate(**kwargs)
|
||||||
|
|
||||||
|
# def generate_with_streaming(**kwargs):
|
||||||
|
# return Iteratorize(generate_with_callback, kwargs, callback=None)
|
||||||
|
|
||||||
|
# if not shared.is_chat():
|
||||||
|
# yield formatted_outputs(original_question, shared.model_name)
|
||||||
|
|
||||||
|
# with generate_with_streaming(**generate_params) as generator:
|
||||||
|
# for output in generator:
|
||||||
|
# if shared.soft_prompt:
|
||||||
|
# output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
||||||
|
|
||||||
|
# new_tokens = len(output) - len(input_ids[0])
|
||||||
|
# reply = decode(output[-new_tokens:], state['skip_special_tokens'])
|
||||||
|
# if not shared.is_chat():
|
||||||
|
# reply = original_question + apply_extensions('output', reply)
|
||||||
|
|
||||||
|
# if output[-1] in eos_token_ids:
|
||||||
|
# break
|
||||||
|
|
||||||
|
# yield formatted_outputs(reply, shared.model_name)
|
||||||
|
|
||||||
from queue import Queue
|
from queue import Queue
|
||||||
queue = Queue()
|
queue = Queue()
|
||||||
def callback_func(x, is_end=False):
|
def callback_func(x, is_end=False):
|
||||||
|
|
@ -125,6 +151,9 @@ def generate_reply_patched(question, state, eos_token=None, stopping_strings=[])
|
||||||
else:
|
else:
|
||||||
queue.put(None)
|
queue.put(None)
|
||||||
|
|
||||||
|
# remove stopping_criteria
|
||||||
|
generate_params.pop('stopping_criteria')
|
||||||
|
|
||||||
shared.model.callback_func = callback_func
|
shared.model.callback_func = callback_func
|
||||||
shared.model.generate(**generate_params)
|
shared.model.generate(**generate_params)
|
||||||
shared.model.start_recieving()
|
shared.model.start_recieving()
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue