Conversion seems to work better this way

This commit is contained in:
oobabooga 2023-02-22 00:35:10 -03:00
parent eef6fc3cbf
commit 193fb1660a

View File

@ -45,9 +45,9 @@ if __name__ == '__main__':
model_name = path.name model_name = path.name
print(f"Loading {model_name}...") print(f"Loading {model_name}...")
disable_torch_init() #disable_torch_init()
model = AutoModelForCausalLM.from_pretrained(path, torch_dtype=torch.float16, _fast_init=True) model = AutoModelForCausalLM.from_pretrained(path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
restore_torch_init() #restore_torch_init()
tokenizer = AutoTokenizer.from_pretrained(path) tokenizer = AutoTokenizer.from_pretrained(path)