diff --git a/webui.py b/webui.py index 90dfa1e4..00b0fbd6 100644 --- a/webui.py +++ b/webui.py @@ -232,7 +232,7 @@ if __name__ == "__main__": os.chdir(script_dir) # Check if a model has been downloaded yet - if len(glob.glob("text-generation-webui/models/*/")) == 0: + if len([item for item in glob.glob('text-generation-webui/models/*') if not item.endswith(('.txt', '.yaml'))]) == 0: print_big_message("WARNING: You haven't downloaded any model yet.\nOnce the web UI launches, head over to the bottom of the \"Model\" tab and download one.") # Workaround for llama-cpp-python loading paths in CUDA env vars even if they do not exist