From fc332164771d23d5d28add4fd9f27ff0734d2ddc Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 25 May 2023 13:55:51 -0300 Subject: [PATCH] Small fix for n_ctx in llama.cpp --- server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.py b/server.py index 50a30268..72d09232 100644 --- a/server.py +++ b/server.py @@ -404,7 +404,7 @@ def create_model_menus(): shared.gradio['threads'] = gr.Slider(label="threads", minimum=0, step=1, maximum=32, value=shared.args.threads) shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, value=shared.args.n_batch) shared.gradio['n_gpu_layers'] = gr.Slider(label="n-gpu-layers", minimum=0, maximum=128, value=shared.args.n_gpu_layers) - shared.gradio['n_ctx'] = gr.Slider(0, 8192, label="n_ctx", value=shared.args.n_ctx) + shared.gradio['n_ctx'] = gr.Slider(minimum=0, maximum=8192, step=1, label="n_ctx", value=shared.args.n_ctx) with gr.Column(): shared.gradio['no_mmap'] = gr.Checkbox(label="no-mmap", value=shared.args.no_mmap)