2023-05-31 17:04:01 -04:00
|
|
|
#define LLAMAMODEL_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE
|
|
|
|
#include "llamamodel_impl.h"
|
2023-04-15 15:57:32 -04:00
|
|
|
|
|
|
|
#include <cassert>
|
|
|
|
#include <cmath>
|
|
|
|
#include <cstdio>
|
|
|
|
#include <cstring>
|
|
|
|
#include <fstream>
|
2024-02-21 15:45:32 -05:00
|
|
|
#include <iomanip>
|
2023-04-15 15:57:32 -04:00
|
|
|
#include <iostream>
|
2024-02-21 15:45:32 -05:00
|
|
|
#include <map>
|
2023-04-15 15:57:32 -04:00
|
|
|
#include <random>
|
2024-02-21 15:45:32 -05:00
|
|
|
#include <sstream>
|
|
|
|
#include <stdexcept>
|
|
|
|
#include <string>
|
2023-04-15 15:57:32 -04:00
|
|
|
#include <thread>
|
2023-05-03 11:58:26 -04:00
|
|
|
#include <unordered_set>
|
2024-02-21 15:45:32 -05:00
|
|
|
#include <vector>
|
2023-04-15 15:57:32 -04:00
|
|
|
|
2023-05-31 17:04:01 -04:00
|
|
|
#include <llama.h>
|
|
|
|
#include <ggml.h>
|
2023-08-30 09:43:56 -04:00
|
|
|
#ifdef GGML_USE_KOMPUTE
|
2024-02-21 15:45:32 -05:00
|
|
|
#include <ggml-kompute.h>
|
2023-08-30 09:43:56 -04:00
|
|
|
#endif
|
2023-05-31 17:04:01 -04:00
|
|
|
|
2024-02-21 15:45:32 -05:00
|
|
|
using namespace std::string_literals;
|
|
|
|
|
2024-01-31 14:17:44 -05:00
|
|
|
// Maximum supported GGUF version
|
|
|
|
static constexpr int GGUF_VER_MAX = 3;
|
|
|
|
|
2024-02-21 15:45:32 -05:00
|
|
|
static const char * const modelType_ = "LLaMA";
|
2023-05-31 17:04:01 -04:00
|
|
|
|
2023-10-10 14:10:25 -04:00
|
|
|
static bool llama_verbose() {
|
|
|
|
const char* var = getenv("GPT4ALL_VERBOSE_LLAMACPP");
|
|
|
|
return var && *var;
|
|
|
|
}
|
|
|
|
|
2023-10-19 13:46:33 -04:00
|
|
|
static void llama_log_callback(enum ggml_log_level level, const char *text, void *userdata) {
|
|
|
|
(void)userdata;
|
|
|
|
if (llama_verbose() || level <= GGML_LOG_LEVEL_ERROR) {
|
|
|
|
fputs(text, stderr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-31 17:04:01 -04:00
|
|
|
struct gpt_params {
|
|
|
|
int32_t seed = -1; // RNG seed
|
|
|
|
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
|
|
|
|
|
|
|
// sampling parameters
|
|
|
|
float tfs_z = 1.0f; // 1.0 = disabled
|
|
|
|
float typical_p = 1.0f; // 1.0 = disabled
|
|
|
|
|
|
|
|
std::string prompt = "";
|
|
|
|
|
2024-01-16 16:36:21 -05:00
|
|
|
enum ggml_type kv_type = GGML_TYPE_F16; // use f16 instead of f32 for memory kv
|
2023-05-31 17:04:01 -04:00
|
|
|
|
|
|
|
bool use_mmap = true; // use mmap for faster loads
|
|
|
|
bool use_mlock = false; // use mlock to keep model in memory
|
|
|
|
};
|
|
|
|
|
|
|
|
static int llama_sample_top_p_top_k(
|
|
|
|
llama_context *ctx,
|
|
|
|
const llama_token *last_n_tokens_data,
|
|
|
|
int last_n_tokens_size,
|
|
|
|
int top_k,
|
|
|
|
float top_p,
|
|
|
|
float temp,
|
2023-12-01 16:51:15 -05:00
|
|
|
float repeat_penalty,
|
|
|
|
int32_t pos) {
|
|
|
|
auto logits = llama_get_logits_ith(ctx, pos);
|
|
|
|
auto n_vocab = llama_n_vocab(llama_get_model(ctx));
|
2023-05-31 17:04:01 -04:00
|
|
|
// Populate initial list of all candidates
|
|
|
|
std::vector<llama_token_data> candidates;
|
|
|
|
candidates.reserve(n_vocab);
|
|
|
|
for (int token_id = 0; token_id < n_vocab; token_id++) {
|
|
|
|
candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
|
|
|
|
}
|
|
|
|
llama_token_data_array candidates_p = {candidates.data(), candidates.size(), false};
|
|
|
|
// Sample repeat penalty
|
2023-12-01 16:51:15 -05:00
|
|
|
llama_sample_repetition_penalties(nullptr, &candidates_p, last_n_tokens_data, last_n_tokens_size, repeat_penalty, 0.0f, 0.0f);
|
2023-05-31 17:04:01 -04:00
|
|
|
// Temperature sampling
|
|
|
|
llama_sample_top_k(ctx, &candidates_p, top_k, 1);
|
|
|
|
llama_sample_tail_free(ctx, &candidates_p, 1.0f, 1);
|
|
|
|
llama_sample_typical(ctx, &candidates_p, 1.0f, 1);
|
|
|
|
llama_sample_top_p(ctx, &candidates_p, top_p, 1);
|
2023-12-01 16:51:15 -05:00
|
|
|
llama_sample_temp(ctx, &candidates_p, temp);
|
2023-05-31 17:04:01 -04:00
|
|
|
return llama_sample_token(ctx, &candidates_p);
|
|
|
|
}
|
|
|
|
|
2024-02-21 15:45:32 -05:00
|
|
|
std::string get_arch_name(gguf_context *ctx_gguf) {
|
|
|
|
std::string arch_name;
|
|
|
|
const int kid = gguf_find_key(ctx_gguf, "general.architecture");
|
|
|
|
enum gguf_type ktype = gguf_get_kv_type(ctx_gguf, kid);
|
|
|
|
if (ktype != (GGUF_TYPE_STRING)) {
|
|
|
|
throw std::runtime_error("ERROR: Can't get general architecture from gguf file.");
|
|
|
|
}
|
|
|
|
return gguf_get_val_str(ctx_gguf, kid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static gguf_context *load_gguf(const char *fname) {
|
|
|
|
struct gguf_init_params params = {
|
|
|
|
/*.no_alloc = */ true,
|
|
|
|
/*.ctx = */ nullptr,
|
|
|
|
};
|
|
|
|
gguf_context *ctx = gguf_init_from_file(fname, params);
|
|
|
|
if (!ctx) {
|
|
|
|
std::cerr << __func__ << ": gguf_init_from_file failed\n";
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
int gguf_ver = gguf_get_version(ctx);
|
|
|
|
if (gguf_ver > GGUF_VER_MAX) {
|
|
|
|
std::cerr << __func__ << ": unsupported gguf version: " << gguf_ver << "\n";
|
|
|
|
gguf_free(ctx);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int32_t get_arch_key_u32(std::string const &modelPath, std::string const &archKey) {
|
|
|
|
auto * ctx = load_gguf(modelPath.c_str());
|
|
|
|
auto arch = get_arch_name(ctx);
|
|
|
|
|
|
|
|
int32_t value = -1;
|
|
|
|
if (ctx) {
|
|
|
|
auto key = arch + "." + archKey;
|
|
|
|
int keyidx = gguf_find_key(ctx, key.c_str());
|
|
|
|
if (keyidx != -1) {
|
|
|
|
value = gguf_get_val_u32(ctx, keyidx);
|
|
|
|
} else {
|
|
|
|
std::cerr << __func__ << ": " << key << "not found in " << modelPath << "\n";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
gguf_free(ctx);
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2023-04-15 15:57:32 -04:00
|
|
|
struct LLamaPrivate {
|
|
|
|
const std::string modelPath;
|
|
|
|
bool modelLoaded;
|
2024-01-25 16:58:46 -05:00
|
|
|
int device = -1;
|
2023-12-01 16:51:15 -05:00
|
|
|
llama_model *model = nullptr;
|
2023-04-15 15:57:32 -04:00
|
|
|
llama_context *ctx = nullptr;
|
2023-12-01 16:51:15 -05:00
|
|
|
llama_model_params model_params;
|
|
|
|
llama_context_params ctx_params;
|
2023-04-15 15:57:32 -04:00
|
|
|
int64_t n_threads = 0;
|
2023-10-04 15:12:10 -04:00
|
|
|
std::vector<LLModel::Token> end_tokens;
|
2023-04-15 15:57:32 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
LLamaModel::LLamaModel()
|
|
|
|
: d_ptr(new LLamaPrivate) {
|
|
|
|
d_ptr->modelLoaded = false;
|
|
|
|
}
|
|
|
|
|
2023-06-26 15:17:34 -04:00
|
|
|
// default hparams (LLaMA 7B)
|
|
|
|
struct llama_file_hparams {
|
|
|
|
uint32_t n_vocab = 32000;
|
|
|
|
uint32_t n_embd = 4096;
|
|
|
|
uint32_t n_mult = 256;
|
|
|
|
uint32_t n_head = 32;
|
|
|
|
uint32_t n_layer = 32;
|
|
|
|
uint32_t n_rot = 64;
|
|
|
|
enum llama_ftype ftype = LLAMA_FTYPE_MOSTLY_F16;
|
|
|
|
};
|
|
|
|
|
2024-01-31 14:17:44 -05:00
|
|
|
size_t LLamaModel::requiredMem(const std::string &modelPath, int n_ctx, int ngl) {
|
2023-12-16 17:58:15 -05:00
|
|
|
// TODO(cebtenzzre): update to GGUF
|
2024-01-31 14:17:44 -05:00
|
|
|
(void)ngl; // FIXME(cetenzzre): use this value
|
2023-06-26 15:17:34 -04:00
|
|
|
auto fin = std::ifstream(modelPath, std::ios::binary);
|
|
|
|
fin.seekg(0, std::ios_base::end);
|
|
|
|
size_t filesize = fin.tellg();
|
|
|
|
fin.seekg(0, std::ios_base::beg);
|
|
|
|
uint32_t magic = 0;
|
|
|
|
fin.read(reinterpret_cast<char*>(&magic), sizeof(magic));
|
|
|
|
if (magic != 0x67676a74) return 0;
|
|
|
|
uint32_t version = 0;
|
|
|
|
fin.read(reinterpret_cast<char*>(&version), sizeof(version));
|
|
|
|
llama_file_hparams hparams;
|
|
|
|
fin.read(reinterpret_cast<char*>(&hparams.n_vocab), sizeof(hparams.n_vocab));
|
|
|
|
fin.read(reinterpret_cast<char*>(&hparams.n_embd), sizeof(hparams.n_embd));
|
|
|
|
fin.read(reinterpret_cast<char*>(&hparams.n_head), sizeof(hparams.n_head));
|
|
|
|
fin.read(reinterpret_cast<char*>(&hparams.n_layer), sizeof(hparams.n_layer));
|
|
|
|
fin.read(reinterpret_cast<char*>(&hparams.n_rot), sizeof(hparams.n_rot));
|
|
|
|
fin.read(reinterpret_cast<char*>(&hparams.ftype), sizeof(hparams.ftype));
|
|
|
|
const size_t kvcache_element_size = 2; // fp16
|
|
|
|
const size_t est_kvcache_size = hparams.n_embd * hparams.n_layer * 2u * n_ctx * kvcache_element_size;
|
|
|
|
return filesize + est_kvcache_size;
|
|
|
|
}
|
|
|
|
|
2024-02-21 15:45:32 -05:00
|
|
|
bool LLamaModel::isModelBlacklisted(const std::string &modelPath) {
|
|
|
|
auto * ctx = load_gguf(modelPath.c_str());
|
|
|
|
if (!ctx) {
|
|
|
|
std::cerr << __func__ << ": failed to load " << modelPath << "\n";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto get_key = [ctx, &modelPath](const char *name) {
|
|
|
|
int keyidx = gguf_find_key(ctx, name);
|
|
|
|
if (keyidx == -1) {
|
|
|
|
throw std::logic_error(name + " not found in "s + modelPath);
|
|
|
|
}
|
|
|
|
return keyidx;
|
|
|
|
};
|
|
|
|
|
|
|
|
bool res = false;
|
|
|
|
try {
|
|
|
|
std::string name(gguf_get_val_str(ctx, get_key("general.name")));
|
|
|
|
int token_idx = get_key("tokenizer.ggml.tokens");
|
|
|
|
int n_vocab = gguf_get_arr_n(ctx, token_idx);
|
|
|
|
|
|
|
|
// check for known bad models
|
|
|
|
if (name == "open-orca_mistral-7b-openorca"
|
|
|
|
&& n_vocab == 32002
|
|
|
|
&& gguf_get_arr_str(ctx, token_idx, 32000) == "<dummy32000>"s // should be <|im_end|>
|
|
|
|
) {
|
|
|
|
res = true;
|
|
|
|
}
|
|
|
|
} catch (const std::logic_error &e) {
|
|
|
|
std::cerr << __func__ << ": " << e.what() << "\n";
|
|
|
|
}
|
|
|
|
|
|
|
|
gguf_free(ctx);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2024-01-31 14:17:44 -05:00
|
|
|
bool LLamaModel::loadModel(const std::string &modelPath, int n_ctx, int ngl)
|
2023-04-15 15:57:32 -04:00
|
|
|
{
|
2024-02-06 11:01:15 -05:00
|
|
|
d_ptr->modelLoaded = false;
|
|
|
|
|
2024-02-01 15:45:45 -05:00
|
|
|
// clean up after previous loadModel()
|
|
|
|
if (d_ptr->model) {
|
|
|
|
llama_free_model(d_ptr->model);
|
|
|
|
d_ptr->model = nullptr;
|
|
|
|
}
|
|
|
|
if (d_ptr->ctx) {
|
|
|
|
llama_free(d_ptr->ctx);
|
|
|
|
d_ptr->ctx = nullptr;
|
|
|
|
}
|
2023-12-01 16:51:15 -05:00
|
|
|
|
2023-12-16 17:58:15 -05:00
|
|
|
if (n_ctx < 8) {
|
|
|
|
std::cerr << "warning: minimum context size is 8, using minimum size.\n";
|
|
|
|
n_ctx = 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
// -- load the model --
|
|
|
|
|
2024-02-01 15:45:45 -05:00
|
|
|
gpt_params params;
|
|
|
|
|
2023-12-01 16:51:15 -05:00
|
|
|
d_ptr->model_params = llama_model_default_params();
|
2023-04-20 12:07:43 -04:00
|
|
|
|
2023-12-16 17:58:15 -05:00
|
|
|
d_ptr->model_params.use_mmap = params.use_mmap;
|
2023-06-02 20:15:38 -04:00
|
|
|
#if defined (__APPLE__)
|
2023-12-16 17:58:15 -05:00
|
|
|
d_ptr->model_params.use_mlock = true;
|
2023-06-02 20:15:38 -04:00
|
|
|
#else
|
2023-12-16 17:58:15 -05:00
|
|
|
d_ptr->model_params.use_mlock = params.use_mlock;
|
2023-06-04 08:59:24 -04:00
|
|
|
#endif
|
2023-12-01 16:51:15 -05:00
|
|
|
|
2024-02-07 09:37:59 -05:00
|
|
|
d_ptr->model_params.progress_callback = &LLModel::staticProgressCallback;
|
|
|
|
d_ptr->model_params.progress_callback_user_data = this;
|
|
|
|
|
2023-06-09 16:48:46 -04:00
|
|
|
#ifdef GGML_USE_METAL
|
2023-10-10 14:10:25 -04:00
|
|
|
if (llama_verbose()) {
|
|
|
|
std::cerr << "llama.cpp: using Metal" << std::endl;
|
|
|
|
}
|
2024-01-31 14:17:44 -05:00
|
|
|
|
|
|
|
// always fully offload on Metal
|
|
|
|
// TODO(cebtenzzre): use this parameter to allow using more than 53% of system RAM to load a model
|
2024-01-25 16:58:46 -05:00
|
|
|
d_ptr->model_params.n_gpu_layers = 100;
|
|
|
|
#elif defined(GGML_USE_KOMPUTE)
|
|
|
|
if (d_ptr->device != -1) {
|
|
|
|
d_ptr->model_params.main_gpu = d_ptr->device;
|
2024-01-31 14:17:44 -05:00
|
|
|
d_ptr->model_params.n_gpu_layers = ngl;
|
2023-08-30 09:43:56 -04:00
|
|
|
}
|
|
|
|
#endif
|
2023-04-20 12:07:43 -04:00
|
|
|
|
2023-12-01 16:51:15 -05:00
|
|
|
d_ptr->model = llama_load_model_from_file_gpt4all(modelPath.c_str(), &d_ptr->model_params);
|
|
|
|
if (!d_ptr->model) {
|
2024-02-06 11:01:15 -05:00
|
|
|
fflush(stdout);
|
2024-01-25 16:58:46 -05:00
|
|
|
d_ptr->device = -1;
|
2023-04-15 15:57:32 -04:00
|
|
|
std::cerr << "LLAMA ERROR: failed to load model from " << modelPath << std::endl;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-12-16 17:58:15 -05:00
|
|
|
const int n_ctx_train = llama_n_ctx_train(d_ptr->model);
|
|
|
|
if (n_ctx > n_ctx_train) {
|
|
|
|
std::cerr << "warning: model was trained on only " << n_ctx_train << " context tokens ("
|
|
|
|
<< n_ctx << " specified)\n";
|
|
|
|
}
|
|
|
|
|
|
|
|
// -- initialize the context --
|
|
|
|
|
|
|
|
d_ptr->ctx_params = llama_context_default_params();
|
|
|
|
|
2024-01-16 16:36:21 -05:00
|
|
|
d_ptr->ctx_params.n_ctx = n_ctx;
|
|
|
|
d_ptr->ctx_params.seed = params.seed;
|
|
|
|
d_ptr->ctx_params.type_k = params.kv_type;
|
|
|
|
d_ptr->ctx_params.type_v = params.kv_type;
|
2023-12-16 17:58:15 -05:00
|
|
|
|
|
|
|
// The new batch API provides space for n_vocab*n_tokens logits. Tell llama.cpp early
|
|
|
|
// that we want this many logits so the state serializes consistently.
|
|
|
|
d_ptr->ctx_params.logits_all = true;
|
|
|
|
|
|
|
|
d_ptr->n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
|
|
|
d_ptr->ctx_params.n_threads = d_ptr->n_threads;
|
|
|
|
d_ptr->ctx_params.n_threads_batch = d_ptr->n_threads;
|
|
|
|
|
2023-12-01 16:51:15 -05:00
|
|
|
d_ptr->ctx = llama_new_context_with_model(d_ptr->model, d_ptr->ctx_params);
|
|
|
|
if (!d_ptr->ctx) {
|
2024-02-06 11:01:15 -05:00
|
|
|
fflush(stdout);
|
2023-12-01 16:51:15 -05:00
|
|
|
std::cerr << "LLAMA ERROR: failed to init context for model " << modelPath << std::endl;
|
2024-02-01 15:45:45 -05:00
|
|
|
llama_free_model(d_ptr->model);
|
|
|
|
d_ptr->model = nullptr;
|
|
|
|
d_ptr->device = -1;
|
2023-12-01 16:51:15 -05:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
d_ptr->end_tokens = {llama_token_eos(d_ptr->model)};
|
2023-10-04 15:12:10 -04:00
|
|
|
|
2023-08-30 09:43:56 -04:00
|
|
|
#ifdef GGML_USE_KOMPUTE
|
2024-01-25 16:58:46 -05:00
|
|
|
if (usingGPUDevice() && ggml_vk_has_device()) {
|
2023-08-30 09:43:56 -04:00
|
|
|
std::cerr << "llama.cpp: using Vulkan on " << ggml_vk_current_device().name << std::endl;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2024-02-06 11:01:15 -05:00
|
|
|
fflush(stdout);
|
2023-04-15 15:57:32 -04:00
|
|
|
d_ptr->modelLoaded = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLamaModel::setThreadCount(int32_t n_threads) {
|
|
|
|
d_ptr->n_threads = n_threads;
|
2023-12-01 16:51:15 -05:00
|
|
|
llama_set_n_threads(d_ptr->ctx, n_threads, n_threads);
|
2023-04-15 15:57:32 -04:00
|
|
|
}
|
|
|
|
|
2023-05-31 17:04:01 -04:00
|
|
|
int32_t LLamaModel::threadCount() const {
|
2023-04-15 15:57:32 -04:00
|
|
|
return d_ptr->n_threads;
|
|
|
|
}
|
|
|
|
|
|
|
|
LLamaModel::~LLamaModel()
|
|
|
|
{
|
2023-09-14 16:52:31 -04:00
|
|
|
if (d_ptr->ctx) {
|
2023-06-26 17:53:17 -04:00
|
|
|
llama_free(d_ptr->ctx);
|
|
|
|
}
|
2023-12-01 16:51:15 -05:00
|
|
|
llama_free_model(d_ptr->model);
|
2023-04-15 15:57:32 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
bool LLamaModel::isModelLoaded() const
|
|
|
|
{
|
|
|
|
return d_ptr->modelLoaded;
|
|
|
|
}
|
|
|
|
|
2023-05-04 15:31:41 -04:00
|
|
|
size_t LLamaModel::stateSize() const
|
|
|
|
{
|
|
|
|
return llama_get_state_size(d_ptr->ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t LLamaModel::saveState(uint8_t *dest) const
|
|
|
|
{
|
|
|
|
return llama_copy_state_data(d_ptr->ctx, dest);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t LLamaModel::restoreState(const uint8_t *src)
|
|
|
|
{
|
2023-05-31 17:04:01 -04:00
|
|
|
// const_cast is required, see: https://github.com/ggerganov/llama.cpp/pull/1540
|
|
|
|
return llama_set_state_data(d_ptr->ctx, const_cast<uint8_t*>(src));
|
2023-05-04 15:31:41 -04:00
|
|
|
}
|
|
|
|
|
2024-02-21 15:45:32 -05:00
|
|
|
std::vector<LLModel::Token> LLamaModel::tokenize(PromptContext &ctx, const std::string &str, bool special) const
|
2023-06-04 08:59:24 -04:00
|
|
|
{
|
2024-02-21 15:45:32 -05:00
|
|
|
const bool wantBOS = ctx.n_past == 0 && ctx.tokens.empty();
|
|
|
|
const bool useBOS = wantBOS && shouldAddBOS();
|
|
|
|
auto strCat = wantBOS && !special ? " " + str : str; // insert leading space ourselves, llama.cpp fork doesn't anymore
|
|
|
|
std::vector<LLModel::Token> fres(strCat.size()+4);
|
|
|
|
auto fres_len = llama_tokenize(d_ptr->model, strCat.c_str(), strCat.length(), fres.data(), fres.size(), useBOS, special);
|
2023-06-04 08:59:24 -04:00
|
|
|
fres.resize(fres_len);
|
|
|
|
return fres;
|
|
|
|
}
|
2023-04-15 15:57:32 -04:00
|
|
|
|
2023-06-13 07:14:02 -04:00
|
|
|
std::string LLamaModel::tokenToString(Token id) const
|
2023-06-04 08:59:24 -04:00
|
|
|
{
|
2023-12-01 16:51:15 -05:00
|
|
|
return llama_token_to_piece(d_ptr->ctx, id);
|
2023-06-04 08:59:24 -04:00
|
|
|
}
|
2023-04-15 15:57:32 -04:00
|
|
|
|
2023-06-04 08:59:24 -04:00
|
|
|
LLModel::Token LLamaModel::sampleToken(PromptContext &promptCtx) const
|
|
|
|
{
|
|
|
|
const size_t n_prev_toks = std::min((size_t) promptCtx.repeat_last_n, promptCtx.tokens.size());
|
|
|
|
return llama_sample_top_p_top_k(d_ptr->ctx,
|
|
|
|
promptCtx.tokens.data() + promptCtx.tokens.size() - n_prev_toks,
|
|
|
|
n_prev_toks, promptCtx.top_k, promptCtx.top_p, promptCtx.temp,
|
2023-12-01 16:51:15 -05:00
|
|
|
promptCtx.repeat_penalty, promptCtx.n_last_batch_tokens - 1);
|
2023-06-04 08:59:24 -04:00
|
|
|
}
|
2023-04-15 15:57:32 -04:00
|
|
|
|
2023-06-04 08:59:24 -04:00
|
|
|
bool LLamaModel::evalTokens(PromptContext &ctx, const std::vector<int32_t> &tokens) const
|
|
|
|
{
|
2024-01-03 14:06:08 -05:00
|
|
|
llama_kv_cache_seq_rm(d_ptr->ctx, 0, ctx.n_past, -1);
|
|
|
|
|
2023-12-01 16:51:15 -05:00
|
|
|
llama_batch batch = llama_batch_init(tokens.size(), 0, 1);
|
|
|
|
|
|
|
|
batch.n_tokens = tokens.size();
|
|
|
|
ctx.n_last_batch_tokens = tokens.size();
|
|
|
|
|
|
|
|
for (int32_t i = 0; i < batch.n_tokens; i++) {
|
|
|
|
batch.token [i] = tokens[i];
|
|
|
|
batch.pos [i] = ctx.n_past + i;
|
|
|
|
batch.n_seq_id[i] = 1;
|
|
|
|
batch.seq_id [i][0] = 0;
|
|
|
|
batch.logits [i] = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// llama_decode will output logits only for the last token of the prompt
|
|
|
|
batch.logits[batch.n_tokens - 1] = true;
|
|
|
|
|
|
|
|
int res = llama_decode(d_ptr->ctx, batch);
|
|
|
|
llama_batch_free(batch);
|
|
|
|
return res == 0;
|
2023-06-04 08:59:24 -04:00
|
|
|
}
|
2023-04-15 15:57:32 -04:00
|
|
|
|
2023-06-04 08:59:24 -04:00
|
|
|
int32_t LLamaModel::contextLength() const
|
|
|
|
{
|
|
|
|
return llama_n_ctx(d_ptr->ctx);
|
2023-04-15 15:57:32 -04:00
|
|
|
}
|
2023-04-25 11:20:51 -04:00
|
|
|
|
2023-06-04 08:59:24 -04:00
|
|
|
const std::vector<LLModel::Token> &LLamaModel::endTokens() const
|
2023-04-25 11:20:51 -04:00
|
|
|
{
|
2023-10-04 15:12:10 -04:00
|
|
|
return d_ptr->end_tokens;
|
2023-04-25 11:20:51 -04:00
|
|
|
}
|
2023-05-31 17:04:01 -04:00
|
|
|
|
2024-02-21 15:45:32 -05:00
|
|
|
bool LLamaModel::shouldAddBOS() const
|
|
|
|
{
|
|
|
|
int add_bos = llama_add_bos_token(d_ptr->model);
|
|
|
|
return add_bos != -1 ? bool(add_bos) : llama_vocab_type(d_ptr->model) == LLAMA_VOCAB_TYPE_SPM;
|
2024-01-31 14:17:44 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
int32_t LLamaModel::maxContextLength(std::string const &modelPath) const
|
2023-08-30 09:43:56 -04:00
|
|
|
{
|
2024-01-31 14:17:44 -05:00
|
|
|
return get_arch_key_u32(modelPath, "context_length");
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t LLamaModel::layerCount(std::string const &modelPath) const
|
|
|
|
{
|
|
|
|
return get_arch_key_u32(modelPath, "block_count");
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<LLModel::GPUDevice> LLamaModel::availableGPUDevices(size_t memoryRequired) const
|
|
|
|
{
|
|
|
|
#ifdef GGML_USE_KOMPUTE
|
2024-01-25 16:58:46 -05:00
|
|
|
size_t count = 0;
|
|
|
|
auto * vkDevices = ggml_vk_available_devices(memoryRequired, &count);
|
|
|
|
|
|
|
|
if (vkDevices) {
|
|
|
|
std::vector<LLModel::GPUDevice> devices;
|
|
|
|
devices.reserve(count);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < count; ++i) {
|
|
|
|
auto & dev = vkDevices[i];
|
|
|
|
devices.emplace_back(
|
|
|
|
/* index = */ dev.index,
|
|
|
|
/* type = */ dev.type,
|
|
|
|
/* heapSize = */ dev.heapSize,
|
|
|
|
/* name = */ dev.name,
|
|
|
|
/* vendor = */ dev.vendor
|
|
|
|
);
|
2024-01-31 14:17:44 -05:00
|
|
|
ggml_vk_device_destroy(&dev);
|
2024-01-25 16:58:46 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
free(vkDevices);
|
|
|
|
return devices;
|
2023-08-30 09:43:56 -04:00
|
|
|
}
|
2024-02-09 21:40:32 -05:00
|
|
|
#else
|
|
|
|
std::cerr << __func__ << ": built without Kompute\n";
|
2023-08-30 09:43:56 -04:00
|
|
|
#endif
|
2024-01-25 16:58:46 -05:00
|
|
|
|
|
|
|
return {};
|
2023-08-30 09:43:56 -04:00
|
|
|
}
|
|
|
|
|
2024-01-31 14:17:44 -05:00
|
|
|
bool LLamaModel::initializeGPUDevice(size_t memoryRequired, const std::string &name) const
|
2023-08-30 09:43:56 -04:00
|
|
|
{
|
|
|
|
#if defined(GGML_USE_KOMPUTE)
|
2024-01-25 16:58:46 -05:00
|
|
|
ggml_vk_device device;
|
|
|
|
bool ok = ggml_vk_get_device(&device, memoryRequired, name.c_str());
|
|
|
|
if (ok) {
|
|
|
|
d_ptr->device = device.index;
|
|
|
|
return true;
|
|
|
|
}
|
2023-08-30 09:43:56 -04:00
|
|
|
#else
|
2024-01-25 16:58:46 -05:00
|
|
|
(void)memoryRequired;
|
|
|
|
(void)name;
|
2023-08-30 09:43:56 -04:00
|
|
|
#endif
|
2024-01-25 16:58:46 -05:00
|
|
|
return false;
|
2023-08-30 09:43:56 -04:00
|
|
|
}
|
|
|
|
|
2024-01-31 14:17:44 -05:00
|
|
|
bool LLamaModel::initializeGPUDevice(int device, std::string *unavail_reason) const
|
2023-08-30 09:43:56 -04:00
|
|
|
{
|
|
|
|
#if defined(GGML_USE_KOMPUTE)
|
2024-01-25 16:58:46 -05:00
|
|
|
(void)unavail_reason;
|
2024-01-31 14:17:44 -05:00
|
|
|
d_ptr->device = device;
|
2024-01-25 16:58:46 -05:00
|
|
|
return true;
|
2023-08-30 09:43:56 -04:00
|
|
|
#else
|
2024-01-25 16:58:46 -05:00
|
|
|
(void)device;
|
2023-10-04 15:51:46 -04:00
|
|
|
if (unavail_reason) {
|
2023-10-06 11:30:55 -04:00
|
|
|
*unavail_reason = "built without Kompute";
|
2023-10-04 15:51:46 -04:00
|
|
|
}
|
2024-01-25 16:58:46 -05:00
|
|
|
return false;
|
2023-08-30 09:43:56 -04:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
bool LLamaModel::hasGPUDevice()
|
|
|
|
{
|
|
|
|
#if defined(GGML_USE_KOMPUTE)
|
2024-01-25 16:58:46 -05:00
|
|
|
return d_ptr->device != -1;
|
2023-08-30 09:43:56 -04:00
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2023-09-14 09:59:19 -04:00
|
|
|
bool LLamaModel::usingGPUDevice()
|
|
|
|
{
|
|
|
|
#if defined(GGML_USE_KOMPUTE)
|
2024-01-25 16:58:46 -05:00
|
|
|
return hasGPUDevice() && d_ptr->model_params.n_gpu_layers > 0;
|
2023-09-14 09:59:19 -04:00
|
|
|
#elif defined(GGML_USE_METAL)
|
|
|
|
return true;
|
2024-01-25 16:58:46 -05:00
|
|
|
#else
|
2023-09-14 09:59:19 -04:00
|
|
|
return false;
|
2024-01-25 16:58:46 -05:00
|
|
|
#endif
|
2023-09-14 09:59:19 -04:00
|
|
|
}
|
|
|
|
|
2023-05-31 17:04:01 -04:00
|
|
|
#if defined(_WIN32)
|
|
|
|
#define DLL_EXPORT __declspec(dllexport)
|
|
|
|
#else
|
|
|
|
#define DLL_EXPORT __attribute__ ((visibility ("default")))
|
|
|
|
#endif
|
|
|
|
|
|
|
|
extern "C" {
|
|
|
|
DLL_EXPORT bool is_g4a_backend_model_implementation() {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
DLL_EXPORT const char *get_model_type() {
|
|
|
|
return modelType_;
|
|
|
|
}
|
|
|
|
|
|
|
|
DLL_EXPORT const char *get_build_variant() {
|
|
|
|
return GGML_BUILD_VARIANT;
|
|
|
|
}
|
|
|
|
|
2024-01-31 14:17:44 -05:00
|
|
|
DLL_EXPORT bool magic_match(const char *fname) {
|
2024-02-21 15:45:32 -05:00
|
|
|
auto * ctx = load_gguf(fname);
|
|
|
|
auto arch = get_arch_name(ctx);
|
2023-11-07 11:20:14 -05:00
|
|
|
|
|
|
|
bool valid = true;
|
llamamodel: add 12 new architectures for CPU inference (#1914)
Baichuan, BLOOM, CodeShell, GPT-2, Orion, Persimmon, Phi and Phi-2,
Plamo, Qwen, Qwen2, Refact, StableLM
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
2024-02-05 16:49:31 -05:00
|
|
|
|
|
|
|
static const std::vector<const char *> known_arches {
|
2024-02-21 11:41:37 -05:00
|
|
|
"baichuan", "bloom", "codeshell", "falcon", "gemma", "gpt2", "llama", "mpt", "orion", "persimmon", "phi2",
|
|
|
|
"plamo", "qwen", "qwen2", "refact", "stablelm", "starcoder"
|
llamamodel: add 12 new architectures for CPU inference (#1914)
Baichuan, BLOOM, CodeShell, GPT-2, Orion, Persimmon, Phi and Phi-2,
Plamo, Qwen, Qwen2, Refact, StableLM
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
2024-02-05 16:49:31 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
if (std::find(known_arches.begin(), known_arches.end(), arch) == known_arches.end()) {
|
|
|
|
// not supported by this version of llama.cpp
|
2023-11-07 11:20:14 -05:00
|
|
|
if (!(arch == "gptj" || arch == "bert")) { // we support these via other modules
|
|
|
|
std::cerr << __func__ << ": unsupported model architecture: " << arch << "\n";
|
|
|
|
}
|
|
|
|
valid = false;
|
|
|
|
}
|
2023-09-21 12:41:48 -04:00
|
|
|
|
2024-01-31 14:17:44 -05:00
|
|
|
gguf_free(ctx);
|
2023-11-07 11:20:14 -05:00
|
|
|
return valid;
|
2023-05-31 17:04:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
DLL_EXPORT LLModel *construct() {
|
2023-10-19 13:46:33 -04:00
|
|
|
llama_log_set(llama_log_callback, nullptr);
|
2023-05-31 17:04:01 -04:00
|
|
|
return new LLamaModel;
|
|
|
|
}
|
|
|
|
}
|