documentation and cleanup

This commit is contained in:
Richard Guo 2023-05-11 11:02:44 -04:00
parent 113d04dce3
commit 3368924229
5 changed files with 46 additions and 26 deletions

View File

@ -1,11 +1,9 @@
# Python GPT4All
This package contains a set of Python bindings that runs the `llmodel` C-API.
This package contains a set of Python bindings around the `llmodel` C-API.
# Local Installation Instructions
TODO: Right now instructions in main README still depend on Qt6 setup. To setup Python bindings, we just need `llmodel` to be built which is much simpler. However, in the future, the below installation instructions should be sequentially organized such that we expect the main README's instructions were followed first.
## Local Build Instructions
1. Setup `llmodel`
@ -23,7 +21,6 @@ Confirm that `libllmodel.*` exists in `gpt4all-backend/llmodel/build`.
```
cd ../../gpt4all-bindings/python
pip3 install -r requirements.txt
pip3 install -e .
```

View File

@ -1,6 +1,6 @@
# GPT4All
In this package, we introduce Python bindings built around GPT4All's C/C++ ecosystem.
In this package, we introduce Python bindings built around GPT4All's C/C++ model backends.
## Quickstart
@ -11,7 +11,7 @@ pip install gpt4all
In Python, run the following commands to retrieve a GPT4All model and generate a response
to a prompt.
**Download Note*:*
**Download Note*:**
By default, models are stored in `~/.cache/gpt4all/` (you can change this with `model_path`). If the file already exists, model download will be skipped.
```python
@ -19,4 +19,14 @@ import gpt4all
gptj = gpt4all.GPT4All("ggml-gpt4all-j-v1.3-groovy")
messages = [{"role": "user", "content": "Name 3 colors"}]
gptj.chat_completion(messages)
```
```
## Give it a try!
[Google Colab Tutorial](https://colab.research.google.com/drive/1QRFHV5lj1Kb7_tGZZGZ-E6BfX6izpeMI?usp=sharing)
## Best Practices
GPT4All models are designed to run locally on your own CPU. Large prompts may require longer computation time and
result in worse performance. Giving an instruction to the model will typically produce the best results.
There are two methods to interface with the underlying language model, `chat_completion()` and `generate()`. Chat completion formats a user-provided message dictionary into a prompt template (see API documentation for more details and options). This will usually produce much better results and is the approach we recommend. You may also prompt the model with `generate()` which will just pass the raw input string to the model.

View File

@ -15,7 +15,7 @@ from . import pyllmodel
DEFAULT_MODEL_DIRECTORY = os.path.join(str(Path.home()), ".cache", "gpt4all").replace("\\", "\\\\")
class GPT4All():
"""Python API for retrieving and interacting with GPT4All models
"""Python API for retrieving and interacting with GPT4All models.
Attribuies:
model: Pointer to underlying C model.
@ -50,7 +50,7 @@ class GPT4All():
@staticmethod
def list_models():
"""
Fetch model list from https://gpt4all.io/models/models.json
Fetch model list from https://gpt4all.io/models/models.json.
Returns:
Model list in JSON format.
@ -60,7 +60,7 @@ class GPT4All():
return model_json
@staticmethod
def retrieve_model(model_name: str, model_path: str = None, allow_download = True):
def retrieve_model(model_name: str, model_path: str = None, allow_download: bool = True) -> str:
"""
Find model file, and if it doesn't exist, download the model.
@ -113,8 +113,18 @@ class GPT4All():
raise ValueError("Invalid model directory")
@staticmethod
def download_model(model_filename, model_path):
# TODO: Find good way of safely removing file that got interrupted.
def download_model(model_filename: str, model_path: str) -> str:
"""
Download model from https://gpt4all.io.
Args:
model_filename: Filename of model (with .bin extension).
model_path: Path to download model to.
Returns:
Model file destination.
"""
def get_download_url(model_filename):
return f"https://gpt4all.io/models/{model_filename}"
@ -122,6 +132,7 @@ class GPT4All():
download_path = os.path.join(model_path, model_filename).replace("\\", "\\\\")
download_url = get_download_url(model_filename)
# TODO: Find good way of safely removing file that got interrupted.
response = requests.get(download_url, stream=True)
total_size_in_bytes = int(response.headers.get("content-length", 0))
block_size = 1048576 # 1 MB
@ -141,9 +152,16 @@ class GPT4All():
print("Model downloaded at: " + download_path)
return download_path
def generate(self, prompt: str, **generate_kwargs):
def generate(self, prompt: str, **generate_kwargs) -> str:
"""
Surfaced method of running generate without accessing model object.
Args:
prompt: Raw string to be passed to model.
**generate_kwargs: Optional kwargs to pass to prompt context.
Returns:
Raw string of generated model response.
"""
return self.model.generate(prompt, **generate_kwargs)
@ -158,13 +176,13 @@ class GPT4All():
generated content.
Args:
messages: Each dictionary should have a "role" key
messages: List of dictionaries. Each dictionary should have a "role" key
with value of "system", "assistant", or "user" and a "content" key with a
string value. Messages are organized such that "system" messages are at top of prompt,
and "user" and "assistant" messages are displayed in order. Assistant messages get formatted as
"Reponse: {content}".
default_prompt_header: If True (default), add default prompt header after any user specified system messages and
before user/assistant messages.
default_prompt_header: If True (default), add default prompt header after any system role messages and
before user/assistant role messages.
default_prompt_footer: If True (default), add default footer at end of prompt.
verbose: If True (default), print full prompt and generated response.
@ -175,7 +193,6 @@ class GPT4All():
generated tokens in response, and total tokens.
"choices": List of message dictionary where "content" is generated response and "role" is set
as "assistant". Right now, only one choice is returned by model.
"""
full_prompt = self._build_prompt(messages,
@ -210,6 +227,7 @@ class GPT4All():
def _build_prompt(messages: List[Dict],
default_prompt_header=True,
default_prompt_footer=False) -> str:
# Helper method to format messages into prompt.
full_prompt = ""
for message in messages:
@ -238,7 +256,7 @@ class GPT4All():
@staticmethod
def get_model_from_type(model_type: str) -> pyllmodel.LLModel:
# This needs to be updated for each new model
# This needs to be updated for each new model type
# TODO: Might be worth converting model_type to enum
if model_type == "gptj":

View File

@ -68,9 +68,4 @@ extra:
generator: false
analytics:
provider: google
property: G-NPXC8BYHJV
#social:
# - icon: fontawesome/brands/twitter
# link: https://twitter.com/nomic_ai
# - icon: material/fruit-pineapple
# link: https://www.youtube.com/watch?v=628eVJgHD6I
property: G-NPXC8BYHJV

View File

@ -61,7 +61,7 @@ copy_prebuilt_C_lib(SRC_CLIB_DIRECtORY,
setup(
name=package_name,
version="0.1.9",
version="0.2.0",
description="Python bindings for GPT4All",
author="Richard Guo",
author_email="richard@nomic.ai",