mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-09-19 15:25:53 +00:00
fix: cutoff prompt correctly
This commit is contained in:
parent
b6e3ba07c4
commit
f51c5c8109
@ -11,7 +11,7 @@ def generate(tokenizer, prompt, model, config):
|
||||
|
||||
outputs = model.generate(input_ids=input_ids, max_new_tokens=config["max_new_tokens"], temperature=config["temperature"])
|
||||
|
||||
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
||||
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
|
||||
|
||||
return decoded[len(prompt):]
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user