From f51c5c8109dc76aaf60ef285013705fb161a0485 Mon Sep 17 00:00:00 2001 From: Zach Nussbaum Date: Sat, 25 Mar 2023 16:48:05 +0000 Subject: [PATCH] fix: cutoff prompt correctly --- generate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generate.py b/generate.py index f593510f..6ac0d5fa 100644 --- a/generate.py +++ b/generate.py @@ -11,7 +11,7 @@ def generate(tokenizer, prompt, model, config): outputs = model.generate(input_ids=input_ids, max_new_tokens=config["max_new_tokens"], temperature=config["temperature"]) - decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) + decoded = tokenizer.decode(outputs[0], skip_special_tokens=True).strip() return decoded[len(prompt):]