mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
cli: Allow for multiline prompt and visual processing feedback
Signed-off-by: Abraham Kiggundu <abraham@thekiggys.com>
This commit is contained in:
parent
b999d07d93
commit
1d6ad35fad
@ -5,6 +5,8 @@ The GPT4All CLI is a self-contained script based on the `gpt4all` and `typer` pa
|
||||
REPL to communicate with a language model similar to the chat GUI application, but more basic.
|
||||
"""
|
||||
|
||||
import time
|
||||
import threading
|
||||
import importlib.metadata
|
||||
import io
|
||||
import sys
|
||||
@ -42,6 +44,7 @@ CLI_START_MESSAGE = f"""
|
||||
|
||||
|
||||
Welcome to the GPT4All CLI! Version {VERSION}
|
||||
Type '.<enter>' on its own line to submit a prompt or command
|
||||
Type /help for special commands.
|
||||
|
||||
"""
|
||||
@ -136,14 +139,23 @@ def _old_loop(gpt4all_instance):
|
||||
|
||||
|
||||
def _new_loop(gpt4all_instance):
|
||||
processing_done = threading.Event()
|
||||
|
||||
with gpt4all_instance.chat_session():
|
||||
while True:
|
||||
message = input(" ⇢ ")
|
||||
loading_thread = threading.Thread(
|
||||
target=display_processing_animation, args=(processing_done,)
|
||||
)
|
||||
|
||||
message = get_prompt()
|
||||
|
||||
# Check if special command and take action
|
||||
if message in SPECIAL_COMMANDS:
|
||||
SPECIAL_COMMANDS[message](MESSAGES)
|
||||
processing_done.set()
|
||||
continue
|
||||
else:
|
||||
loading_thread.start()
|
||||
|
||||
# if regular message, append to messages
|
||||
MESSAGES.append({"role": "user", "content": message})
|
||||
@ -165,10 +177,21 @@ def _new_loop(gpt4all_instance):
|
||||
streaming=True,
|
||||
)
|
||||
response = io.StringIO()
|
||||
|
||||
# Consider processing complete when first token is received
|
||||
first = True
|
||||
for token in response_generator:
|
||||
print(token, end='', flush=True)
|
||||
if first:
|
||||
processing_done.set()
|
||||
loading_thread.join()
|
||||
first = False
|
||||
|
||||
print(token, end="", flush=True)
|
||||
response.write(token)
|
||||
|
||||
# reset the 'processing' flag ready for next prompt
|
||||
processing_done.clear
|
||||
|
||||
# record assistant's response to messages
|
||||
response_message = {'role': 'assistant', 'content': response.getvalue()}
|
||||
response.close()
|
||||
@ -183,5 +206,41 @@ def version():
|
||||
print(f"gpt4all-cli v{VERSION}")
|
||||
|
||||
|
||||
def get_prompt():
|
||||
message = ""
|
||||
print(" ⇢ ")
|
||||
while True:
|
||||
line = input()
|
||||
# Special commands on any line override the entire prompt
|
||||
if line in SPECIAL_COMMANDS:
|
||||
message = line
|
||||
break
|
||||
# /. is a special command that ends and submits the prompt
|
||||
if line.strip() == "/.":
|
||||
break
|
||||
else:
|
||||
message += line
|
||||
|
||||
return message
|
||||
|
||||
|
||||
def display_processing_animation(processing_done):
|
||||
processing_animation = ["|", "/", "-", "\\"]
|
||||
display_processing_feedback = True
|
||||
processing_done.clear()
|
||||
a = 0
|
||||
while display_processing_feedback:
|
||||
time.sleep(0.2)
|
||||
a = a + 1
|
||||
print(
|
||||
"\r " + processing_animation[a % len(processing_animation)] + "\r",
|
||||
end="",
|
||||
)
|
||||
|
||||
if processing_done.is_set():
|
||||
display_processing_feedback = False
|
||||
print(" 🤖" + " " * 100, end="\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app()
|
||||
|
Loading…
Reference in New Issue
Block a user