From 09b0a3aafb1a3b2d86912db0114b84ad3bc6029a Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 31 Mar 2023 14:45:17 -0300 Subject: [PATCH] Add repetition_penalty --- modules/RWKV.py | 2 +- modules/text_generation.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/RWKV.py b/modules/RWKV.py index 8c7ea2b9..10c4c366 100644 --- a/modules/RWKV.py +++ b/modules/RWKV.py @@ -34,7 +34,7 @@ class RWKVModel: result.pipeline = pipeline return result - def generate(self, context="", token_count=20, temperature=1, top_p=1, top_k=50, alpha_frequency=0.1, alpha_presence=0.1, token_ban=[0], token_stop=[], callback=None): + def generate(self, context="", token_count=20, temperature=1, top_p=1, top_k=50, repetition_penalty=None, alpha_frequency=0.1, alpha_presence=0.1, token_ban=[0], token_stop=[], callback=None): args = PIPELINE_ARGS( temperature = temperature, top_p = top_p, diff --git a/modules/text_generation.py b/modules/text_generation.py index 8d54961e..b8b2f496 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -119,7 +119,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi if any((shared.is_RWKV, shared.is_llamacpp)): try: if shared.args.no_stream: - reply = shared.model.generate(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k) + reply = shared.model.generate(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty) if not (shared.args.chat or shared.args.cai_chat): reply = original_question + apply_extensions(reply, "output") yield formatted_outputs(reply, shared.model_name)