2023-04-13 22:15:40 -04:00
|
|
|
#ifndef LLMODEL_H
|
|
|
|
#define LLMODEL_H
|
|
|
|
|
|
|
|
#include <string>
|
|
|
|
#include <functional>
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
class LLModel {
|
|
|
|
public:
|
|
|
|
explicit LLModel() {}
|
|
|
|
virtual ~LLModel() {}
|
|
|
|
|
2023-04-15 15:57:32 -04:00
|
|
|
virtual bool loadModel(const std::string &modelPath) = 0;
|
2023-04-13 22:15:40 -04:00
|
|
|
virtual bool loadModel(const std::string &modelPath, std::istream &fin) = 0;
|
|
|
|
virtual bool isModelLoaded() const = 0;
|
|
|
|
struct PromptContext {
|
2023-04-25 08:38:29 -04:00
|
|
|
std::vector<float> logits; // logits of current context
|
|
|
|
std::vector<int32_t> tokens; // current tokens in the context window
|
|
|
|
int32_t n_past = 0; // number of tokens in past conversation
|
|
|
|
int32_t n_ctx = 0; // number of tokens possible in context window
|
|
|
|
int32_t n_predict = 200;
|
|
|
|
int32_t top_k = 40;
|
|
|
|
float top_p = 0.9f;
|
|
|
|
float temp = 0.9f;
|
|
|
|
int32_t n_batch = 9;
|
|
|
|
float repeat_penalty = 1.10f;
|
|
|
|
int32_t repeat_last_n = 64; // last n tokens to penalize
|
2023-04-25 11:20:51 -04:00
|
|
|
float contextErase = 0.75f; // percent of context to erase if we exceed the context
|
|
|
|
// window
|
2023-04-13 22:15:40 -04:00
|
|
|
};
|
2023-04-25 08:38:29 -04:00
|
|
|
virtual void prompt(const std::string &prompt,
|
|
|
|
std::function<bool(int32_t, const std::string&)> response,
|
2023-04-25 11:20:51 -04:00
|
|
|
std::function<bool(bool)> recalculate,
|
2023-04-25 08:38:29 -04:00
|
|
|
PromptContext &ctx) = 0;
|
2023-04-15 15:57:32 -04:00
|
|
|
virtual void setThreadCount(int32_t n_threads) {}
|
|
|
|
virtual int32_t threadCount() { return 1; }
|
2023-04-25 11:20:51 -04:00
|
|
|
|
|
|
|
protected:
|
|
|
|
virtual void recalculateContext(PromptContext &promptCtx,
|
|
|
|
std::function<bool(bool)> recalculate) = 0;
|
2023-04-13 22:15:40 -04:00
|
|
|
};
|
|
|
|
|
2023-04-18 09:46:03 -04:00
|
|
|
#endif // LLMODEL_H
|