mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
1b87aa2dbc
* fixed bindings to match new API Signed-off-by: Jerry Caligiure <jerry@noof.biz> * added update to readme Signed-off-by: Jerry Caligiure <jerry@noof.biz> --------- Signed-off-by: Jerry Caligiure <jerry@noof.biz> Co-authored-by: Jerry Caligiure <jerry@noof.biz>
139 lines
3.0 KiB
Go
139 lines
3.0 KiB
Go
package gpt4all
|
|
|
|
type PredictOptions struct {
|
|
ContextSize, RepeatLastN, Tokens, TopK, Batch, Special int
|
|
TopP, MinP, Temperature, ContextErase, RepeatPenalty float64
|
|
}
|
|
|
|
type PredictOption func(p *PredictOptions)
|
|
|
|
var DefaultOptions PredictOptions = PredictOptions{
|
|
Tokens: 200,
|
|
TopK: 10,
|
|
TopP: 0.90,
|
|
MinP: 0.0,
|
|
Temperature: 0.96,
|
|
Batch: 1,
|
|
Special: 0,
|
|
ContextErase: 0.55,
|
|
ContextSize: 1024,
|
|
RepeatLastN: 10,
|
|
RepeatPenalty: 1.2,
|
|
}
|
|
|
|
var DefaultModelOptions ModelOptions = ModelOptions{
|
|
Threads: 4,
|
|
}
|
|
|
|
type ModelOptions struct {
|
|
Threads int
|
|
LibrarySearchPath string
|
|
}
|
|
type ModelOption func(p *ModelOptions)
|
|
|
|
// SetTokens sets the number of tokens to generate.
|
|
func SetTokens(tokens int) PredictOption {
|
|
return func(p *PredictOptions) {
|
|
p.Tokens = tokens
|
|
}
|
|
}
|
|
|
|
// SetTopK sets the value for top-K sampling.
|
|
func SetTopK(topk int) PredictOption {
|
|
return func(p *PredictOptions) {
|
|
p.TopK = topk
|
|
}
|
|
}
|
|
|
|
// SetTopP sets the value for nucleus sampling.
|
|
func SetTopP(topp float64) PredictOption {
|
|
return func(p *PredictOptions) {
|
|
p.TopP = topp
|
|
}
|
|
}
|
|
|
|
// SetMinP sets the value for min p sampling
|
|
func SetMinP(minp float64) PredictOption {
|
|
return func(p *PredictOptions) {
|
|
p.MinP = minp
|
|
}
|
|
}
|
|
|
|
// SetRepeatPenalty sets the repeat penalty.
|
|
func SetRepeatPenalty(ce float64) PredictOption {
|
|
return func(p *PredictOptions) {
|
|
p.RepeatPenalty = ce
|
|
}
|
|
}
|
|
|
|
// SetRepeatLastN sets the RepeatLastN.
|
|
func SetRepeatLastN(ce int) PredictOption {
|
|
return func(p *PredictOptions) {
|
|
p.RepeatLastN = ce
|
|
}
|
|
}
|
|
|
|
// SetContextErase sets the context erase %.
|
|
func SetContextErase(ce float64) PredictOption {
|
|
return func(p *PredictOptions) {
|
|
p.ContextErase = ce
|
|
}
|
|
}
|
|
|
|
// SetTemperature sets the temperature value for text generation.
|
|
func SetTemperature(temp float64) PredictOption {
|
|
return func(p *PredictOptions) {
|
|
p.Temperature = temp
|
|
}
|
|
}
|
|
|
|
// SetBatch sets the batch size.
|
|
func SetBatch(size int) PredictOption {
|
|
return func(p *PredictOptions) {
|
|
p.Batch = size
|
|
}
|
|
}
|
|
|
|
// SetSpecial is true if special tokens in the prompt should be processed, false otherwise.
|
|
func SetSpecial(special bool) PredictOption {
|
|
return func(p *PredictOptions) {
|
|
if special {
|
|
p.Special = 1
|
|
} else {
|
|
p.Special = 0
|
|
}
|
|
}
|
|
}
|
|
|
|
// Create a new PredictOptions object with the given options.
|
|
func NewPredictOptions(opts ...PredictOption) PredictOptions {
|
|
p := DefaultOptions
|
|
for _, opt := range opts {
|
|
opt(&p)
|
|
}
|
|
return p
|
|
}
|
|
|
|
// SetThreads sets the number of threads to use for text generation.
|
|
func SetThreads(c int) ModelOption {
|
|
return func(p *ModelOptions) {
|
|
p.Threads = c
|
|
}
|
|
}
|
|
|
|
// SetLibrarySearchPath sets the dynamic libraries used by gpt4all for the various ggml implementations.
|
|
func SetLibrarySearchPath(t string) ModelOption {
|
|
return func(p *ModelOptions) {
|
|
p.LibrarySearchPath = t
|
|
}
|
|
}
|
|
|
|
// Create a new PredictOptions object with the given options.
|
|
func NewModelOptions(opts ...ModelOption) ModelOptions {
|
|
p := DefaultModelOptions
|
|
for _, opt := range opts {
|
|
opt(&p)
|
|
}
|
|
return p
|
|
}
|