mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-09-20 08:15:41 +00:00
Refactor model loading function
This commit is contained in:
parent
96a75b616b
commit
00a12889e9
21
server.py
21
server.py
@ -36,15 +36,18 @@ def load_model(model_name):
|
|||||||
if not args.cpu and Path(f"torch-dumps/{model_name}.pt").exists():
|
if not args.cpu and Path(f"torch-dumps/{model_name}.pt").exists():
|
||||||
print("Loading in .pt format...")
|
print("Loading in .pt format...")
|
||||||
model = torch.load(Path(f"torch-dumps/{model_name}.pt"))
|
model = torch.load(Path(f"torch-dumps/{model_name}.pt"))
|
||||||
elif model_name.lower().startswith(('gpt-neo', 'opt-', 'galactica')):
|
elif model_name.lower().startswith(('gpt-neo', 'opt-', 'galactica')) and any(size in model_name.lower() for size in ('13b', '20b', '30b')):
|
||||||
if any(size in model_name.lower() for size in ('13b', '20b', '30b')):
|
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), device_map='auto', load_in_8bit=True)
|
||||||
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), device_map='auto', load_in_8bit=True)
|
|
||||||
else:
|
|
||||||
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), low_cpu_mem_usage=True, torch_dtype=dtype)
|
|
||||||
elif model_name in ['flan-t5', 't5-large']:
|
elif model_name in ['flan-t5', 't5-large']:
|
||||||
model = T5ForConditionalGeneration.from_pretrained(Path(f"models/{model_name}"))
|
if args.cpu:
|
||||||
|
model = T5ForConditionalGeneration.from_pretrained(Path(f"models/{model_name}"))
|
||||||
|
else:
|
||||||
|
model = T5ForConditionalGeneration.from_pretrained(Path(f"models/{model_name}")).cuda()
|
||||||
else:
|
else:
|
||||||
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), low_cpu_mem_usage=True, torch_dtype=dtype)
|
if args.cpu:
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), low_cpu_mem_usage=True, torch_dtype=dtype)
|
||||||
|
else:
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{model_name}"), low_cpu_mem_usage=True, torch_dtype=dtype).cuda()
|
||||||
|
|
||||||
# Loading the tokenizer
|
# Loading the tokenizer
|
||||||
if model_name.lower().startswith('gpt4chan') and Path(f"models/gpt-j-6B/").exists():
|
if model_name.lower().startswith('gpt4chan') and Path(f"models/gpt-j-6B/").exists():
|
||||||
@ -54,10 +57,6 @@ def load_model(model_name):
|
|||||||
else:
|
else:
|
||||||
tokenizer = AutoTokenizer.from_pretrained(Path(f"models/{model_name}/"))
|
tokenizer = AutoTokenizer.from_pretrained(Path(f"models/{model_name}/"))
|
||||||
|
|
||||||
# Sending to the GPU
|
|
||||||
if not (args.cpu or any(size in model_name.lower() for size in ('13b', '20b', '30b'))):
|
|
||||||
model = model.cuda()
|
|
||||||
|
|
||||||
print(f"Loaded the model in {(time.time()-t0):.2f} seconds.")
|
print(f"Loaded the model in {(time.time()-t0):.2f} seconds.")
|
||||||
return model, tokenizer
|
return model, tokenizer
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user