From 52065ae4cd1a64c22db6867443b33e414677b0e5 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 31 Mar 2023 19:01:34 -0300 Subject: [PATCH] Add repetition_penalty --- modules/llamacpp_model.py | 2 +- modules/text_generation.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/llamacpp_model.py b/modules/llamacpp_model.py index 1c67afb1..5304ab7f 100644 --- a/modules/llamacpp_model.py +++ b/modules/llamacpp_model.py @@ -48,7 +48,7 @@ class LlamaCppModel: params.repeat_penalty = repetition_penalty #params.repeat_last_n = repeat_last_n - # model.params = params + #self.model.params = params self.model.add_bos() self.model.update_input(context) diff --git a/modules/text_generation.py b/modules/text_generation.py index f4cc25d4..b7116d9a 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -130,7 +130,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi # RWKV has proper streaming, which is very nice. # No need to generate 8 tokens at a time. - for reply in shared.model.generate_with_streaming(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k): + for reply in shared.model.generate(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty): output = original_question+reply if not (shared.args.chat or shared.args.cai_chat): reply = original_question + apply_extensions(reply, "output")