This commit is contained in:
mudler 2023-06-01 16:09:06 +02:00 committed by AT
parent 5220356273
commit 243c762411
4 changed files with 21 additions and 23 deletions

View File

@ -46,6 +46,7 @@ To use the bindings in your own software:
- Import `github.com/nomic-ai/gpt4all/gpt4all-bindings/golang`;
- Compile `libgpt4all.a` (you can use `make libgpt4all.a` in the bindings/go directory);
- Link your go binary against whisper by setting the environment variables `C_INCLUDE_PATH` and `LIBRARY_PATH` to point to the `binding.h` file directory and `libgpt4all.a` file directory respectively.
- Note: you need to have *.so/*.dynlib/*.dll files of the implementation nearby the binary produced by the binding in order to make this to work
## Testing

View File

@ -16,30 +16,27 @@
#include <iostream>
#include <unistd.h>
void* load_gpt4all_model(const char *fname, int n_threads) {
void* load_model(const char *fname, int n_threads) {
// load the model
llmodel_error new_error{};
fprintf(stderr, "%s: fname '%s'\n",
__func__, fname);
auto gptj4all = llmodel_model_create2(fname, "auto", &new_error);
if (gptj4all == NULL ){
auto model = llmodel_model_create2(fname, "auto", &new_error);
if (model == nullptr ){
fprintf(stderr, "%s: error '%s'\n",
__func__, new_error.message);
return nullptr;
}
llmodel_setThreadCount(gptj4all, n_threads);
if (!llmodel_loadModel(gptj4all, fname)) {
llmodel_setThreadCount(model, n_threads);
if (!llmodel_loadModel(model, fname)) {
return nullptr;
}
return gptj4all;
return model;
}
std::string res = "";
void * mm;
void gpt4all_model_prompt( const char *prompt, void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens, int top_k,
void model_prompt( const char *prompt, void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens, int top_k,
float top_p, float temp, int n_batch,float ctx_erase)
{
llmodel_model* model = (llmodel_model*) m;
@ -101,7 +98,7 @@ void gpt4all_model_prompt( const char *prompt, void *m, char* result, int repeat
free(prompt_context);
}
void gpt4all_free_model(void *state_ptr) {
void free_model(void *state_ptr) {
llmodel_model* ctx = (llmodel_model*) state_ptr;
llmodel_model_destroy(*ctx);
}

View File

@ -4,12 +4,12 @@ extern "C" {
#include <stdbool.h>
void* load_gpt4all_model(const char *fname, int n_threads);
void* load_model(const char *fname, int n_threads);
void gpt4all_model_prompt( const char *prompt, void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens, int top_k,
void model_prompt( const char *prompt, void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens, int top_k,
float top_p, float temp, int n_batch,float ctx_erase);
void gpt4all_free_model(void *state_ptr);
void free_model(void *state_ptr);
extern unsigned char getTokenCallback(void *, char *);

View File

@ -5,10 +5,10 @@ package gpt4all
// #cgo darwin LDFLAGS: -framework Accelerate
// #cgo darwin CXXFLAGS: -std=c++17
// #cgo LDFLAGS: -lgpt4all -lm -lstdc++
// void* load_gpt4all_model(const char *fname, int n_threads);
// void gpt4all_model_prompt( const char *prompt, void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens, int top_k,
// void* load_model(const char *fname, int n_threads);
// void model_prompt( const char *prompt, void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens, int top_k,
// float top_p, float temp, int n_batch,float ctx_erase);
// void gpt4all_free_model(void *state_ptr);
// void free_model(void *state_ptr);
// extern unsigned char getTokenCallback(void *, char *);
import "C"
import (
@ -27,7 +27,7 @@ type Model struct {
func New(model string, opts ...ModelOption) (*Model, error) {
ops := NewModelOptions(opts...)
state := C.load_gpt4all_model(C.CString(model), C.int(ops.Threads))
state := C.load_model(C.CString(model), C.int(ops.Threads))
if state == nil {
return nil, fmt.Errorf("failed loading model")
@ -52,7 +52,7 @@ func (l *Model) Predict(text string, opts ...PredictOption) (string, error) {
}
out := make([]byte, po.Tokens)
C.gpt4all_model_prompt(input, l.state, (*C.char)(unsafe.Pointer(&out[0])), C.int(po.RepeatLastN), C.float(po.RepeatPenalty), C.int(po.ContextSize),
C.model_prompt(input, l.state, (*C.char)(unsafe.Pointer(&out[0])), C.int(po.RepeatLastN), C.float(po.RepeatPenalty), C.int(po.ContextSize),
C.int(po.Tokens), C.int(po.TopK), C.float(po.TopP), C.float(po.Temperature), C.int(po.Batch), C.float(po.ContextErase))
res := C.GoString((*C.char)(unsafe.Pointer(&out[0])))
@ -65,7 +65,7 @@ func (l *Model) Predict(text string, opts ...PredictOption) (string, error) {
}
func (l *Model) Free() {
C.gpt4all_free_model(l.state)
C.free_model(l.state)
}
func (l *Model) SetTokenCallback(callback func(token string) bool) {