Small fix for n_ctx in llama.cpp

This commit is contained in:
oobabooga 2023-05-25 13:55:51 -03:00
parent 35009c32f0
commit fc33216477

View File

@ -404,7 +404,7 @@ def create_model_menus():
shared.gradio['threads'] = gr.Slider(label="threads", minimum=0, step=1, maximum=32, value=shared.args.threads)
shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, value=shared.args.n_batch)
shared.gradio['n_gpu_layers'] = gr.Slider(label="n-gpu-layers", minimum=0, maximum=128, value=shared.args.n_gpu_layers)
shared.gradio['n_ctx'] = gr.Slider(0, 8192, label="n_ctx", value=shared.args.n_ctx)
shared.gradio['n_ctx'] = gr.Slider(minimum=0, maximum=8192, step=1, label="n_ctx", value=shared.args.n_ctx)
with gr.Column():
shared.gradio['no_mmap'] = gr.Checkbox(label="no-mmap", value=shared.args.no_mmap)