mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Add repetition_penalty
This commit is contained in:
parent
2259143fec
commit
52065ae4cd
@ -48,7 +48,7 @@ class LlamaCppModel:
|
|||||||
params.repeat_penalty = repetition_penalty
|
params.repeat_penalty = repetition_penalty
|
||||||
#params.repeat_last_n = repeat_last_n
|
#params.repeat_last_n = repeat_last_n
|
||||||
|
|
||||||
# model.params = params
|
#self.model.params = params
|
||||||
self.model.add_bos()
|
self.model.add_bos()
|
||||||
self.model.update_input(context)
|
self.model.update_input(context)
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
|
|||||||
|
|
||||||
# RWKV has proper streaming, which is very nice.
|
# RWKV has proper streaming, which is very nice.
|
||||||
# No need to generate 8 tokens at a time.
|
# No need to generate 8 tokens at a time.
|
||||||
for reply in shared.model.generate_with_streaming(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k):
|
for reply in shared.model.generate(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty):
|
||||||
output = original_question+reply
|
output = original_question+reply
|
||||||
if not (shared.args.chat or shared.args.cai_chat):
|
if not (shared.args.chat or shared.args.cai_chat):
|
||||||
reply = original_question + apply_extensions(reply, "output")
|
reply = original_question + apply_extensions(reply, "output")
|
||||||
|
Loading…
Reference in New Issue
Block a user