fixed response formatting when streaming

This commit is contained in:
Richard Guo 2023-05-18 11:34:39 -04:00
parent d1b17e1fb3
commit 8bc9a4ca83
3 changed files with 5 additions and 3 deletions

View File

@ -155,7 +155,7 @@ class GPT4All():
print("Model downloaded at: " + download_path)
return download_path
def generate(self, prompt: str, streaming: bool = False, **generate_kwargs) -> str:
def generate(self, prompt: str, streaming: bool = True, **generate_kwargs) -> str:
"""
Surfaced method of running generate without accessing model object.
@ -211,7 +211,7 @@ class GPT4All():
response = self.model.generate(full_prompt, streaming=streaming, **generate_kwargs)
if verbose:
if verbose and not streaming:
print(response)
response_dict = {

View File

@ -225,6 +225,8 @@ class LLModel:
# Revert to old stdout
sys.stdout = old_stdout
# Force new line
print()
return stream_processor.output

View File

@ -61,7 +61,7 @@ copy_prebuilt_C_lib(SRC_CLIB_DIRECtORY,
setup(
name=package_name,
version="0.2.2",
version="0.2.3",
description="Python bindings for GPT4All",
author="Richard Guo",
author_email="richard@nomic.ai",