diff --git a/server/__init__.py b/model_server/__init__.py similarity index 100% rename from server/__init__.py rename to model_server/__init__.py diff --git a/server/server.py b/model_server/server.py similarity index 100% rename from server/server.py rename to model_server/server.py diff --git a/scripts/run_server.py b/scripts/run_server.py index 602a631..d3b748c 100644 --- a/scripts/run_server.py +++ b/scripts/run_server.py @@ -1,4 +1,4 @@ -from server import ModelServer +from model_server import ModelServer import argparse if __name__ == '__main__': diff --git a/text-generation-webui/custom_model_server_monkey_patch.py b/text-generation-webui/custom_model_server_monkey_patch.py index ea76f8f..840e16f 100644 --- a/text-generation-webui/custom_model_server_monkey_patch.py +++ b/text-generation-webui/custom_model_server_monkey_patch.py @@ -1,4 +1,4 @@ -from server import ModelClient +from model_server import ModelClient from transformers import LlamaTokenizer def load_model_llama(*args, **kwargs): diff --git a/text-generation-webui/generate_monkey_patch.py b/text-generation-webui/generate_monkey_patch.py index 3848840..1d5dc85 100644 --- a/text-generation-webui/generate_monkey_patch.py +++ b/text-generation-webui/generate_monkey_patch.py @@ -1,6 +1,6 @@ import modules.text_generation from modules.text_generation import * -from server import _SentinelTokenStoppingCriteria +from model_server import _SentinelTokenStoppingCriteria def generate_reply_patched(question, state, eos_token=None, stopping_strings=[]): if shared.model_name == 'None' or shared.model is None: