diff --git a/modules/html_generator.py b/modules/html_generator.py
index 902ffcc7..278f1632 100644
--- a/modules/html_generator.py
+++ b/modules/html_generator.py
@@ -1,8 +1,8 @@
+import functools
import html
import os
import re
import time
-import functools
from pathlib import Path
import markdown
diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py
index 0e86a9f0..a31bbcf5 100644
--- a/modules/ui_model_menu.py
+++ b/modules/ui_model_menu.py
@@ -90,7 +90,7 @@ def create_ui():
shared.gradio['quant_type'] = gr.Dropdown(label="quant_type", choices=["nf4", "fp4"], value=shared.args.quant_type)
shared.gradio['hqq_backend'] = gr.Dropdown(label="hqq_backend", choices=["PYTORCH", "PYTORCH_COMPILE", "ATEN"], value=shared.args.hqq_backend)
- shared.gradio['n_gpu_layers'] = gr.Slider(label="n-gpu-layers", minimum=0, maximum=256, value=shared.args.n_gpu_layers, info='This must be set to more than 0 for your GPU to be used.')
+ shared.gradio['n_gpu_layers'] = gr.Slider(label="n-gpu-layers", minimum=0, maximum=256, value=shared.args.n_gpu_layers, info='Must be set to more than 0 for your GPU to be used.')
shared.gradio['n_ctx'] = gr.Slider(minimum=0, maximum=shared.settings['truncation_length_max'], step=256, label="n_ctx", value=shared.args.n_ctx, info='Context length. Try lowering this if you run out of memory while loading the model.')
shared.gradio['tensor_split'] = gr.Textbox(label='tensor_split', info='List of proportions to split the model across multiple GPUs. Example: 18,17')
shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, step=1, value=shared.args.n_batch)