Make ExLlama_HF the new default for GPTQ

This commit is contained in:
oobabooga 2023-07-14 14:03:56 -07:00
parent 32f12b8bbf
commit b284f2407d
2 changed files with 2 additions and 2 deletions

View File

@ -23,7 +23,7 @@ def infer_loader(model_name):
if not path_to_model.exists():
loader = None
elif Path(f'{shared.args.model_dir}/{model_name}/quantize_config.json').exists() or ('wbits' in model_settings and type(model_settings['wbits']) is int and model_settings['wbits'] > 0):
loader = 'AutoGPTQ'
loader = 'ExLlama_HF'
elif len(list(path_to_model.glob('*ggml*.bin'))) > 0:
loader = 'llama.cpp'
elif re.match('.*ggml.*\.bin', model_name.lower()):

View File

@ -204,7 +204,7 @@ def create_model_menus():
with gr.Row():
with gr.Column():
shared.gradio['loader'] = gr.Dropdown(label="Model loader", choices=["Transformers", "AutoGPTQ", "GPTQ-for-LLaMa", "ExLlama", "ExLlama_HF", "llama.cpp"], value=None)
shared.gradio['loader'] = gr.Dropdown(label="Model loader", choices=["Transformers", "ExLlama_HF", "AutoGPTQ", "llama.cpp", "ExLlama", "GPTQ-for-LLaMa"], value=None)
with gr.Box():
with gr.Row():
with gr.Column():