2023-05-05 14:04:32 -04:00
|
|
|
#include "mpt.h"
|
|
|
|
#include "llama.cpp/ggml.h"
|
|
|
|
|
|
|
|
#include <cassert>
|
|
|
|
#include <cmath>
|
|
|
|
#include <cstdio>
|
|
|
|
#include <cstring>
|
|
|
|
#include <fstream>
|
|
|
|
#include <map>
|
|
|
|
#include <random>
|
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
|
|
|
#include <iostream>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <sstream>
|
|
|
|
#include <thread>
|
|
|
|
#include <unordered_set>
|
2023-05-06 15:35:02 -04:00
|
|
|
#include <regex>
|
2023-05-05 14:04:32 -04:00
|
|
|
|
|
|
|
static const size_t MB = 1024*1024;
|
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
// default hparams (MPT 7B)
|
2023-05-05 14:04:32 -04:00
|
|
|
struct mpt_hparams {
|
2023-05-08 09:47:10 -04:00
|
|
|
int32_t n_vocab = 50432;
|
|
|
|
int32_t n_ctx = 2048;
|
|
|
|
int32_t n_embd = 4096;
|
|
|
|
int32_t n_head = 32;
|
|
|
|
int32_t n_layer = 32;
|
|
|
|
float alibi_bias_max = 8;
|
|
|
|
float clip_qkv = 0;
|
|
|
|
int32_t expand = 4;
|
|
|
|
int32_t f16 = 1;
|
2023-05-05 14:04:32 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct mpt_layer {
|
2023-05-05 20:20:47 -04:00
|
|
|
// normalization
|
2023-05-08 09:47:10 -04:00
|
|
|
struct ggml_tensor * norm_1_w;
|
|
|
|
struct ggml_tensor * norm_2_w;
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
// attention
|
2023-05-08 09:47:10 -04:00
|
|
|
struct ggml_tensor * attn_Wqkv_w;
|
|
|
|
struct ggml_tensor * attn_out_proj_w;
|
2023-05-08 12:01:40 -04:00
|
|
|
|
2023-05-05 20:20:47 -04:00
|
|
|
// ff
|
2023-05-08 09:47:10 -04:00
|
|
|
struct ggml_tensor * ffn_up_proj_w;
|
|
|
|
struct ggml_tensor * ffn_down_proj_w;
|
2023-05-05 14:04:32 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct mpt_buffer {
|
|
|
|
uint8_t * addr = NULL;
|
|
|
|
size_t size = 0;
|
|
|
|
|
|
|
|
void resize(size_t size) {
|
|
|
|
delete[] addr;
|
|
|
|
addr = new uint8_t[size];
|
|
|
|
this->size = size;
|
|
|
|
}
|
|
|
|
|
|
|
|
~mpt_buffer() {
|
|
|
|
fflush(stdout);
|
|
|
|
delete[] addr;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mpt_kv_cache {
|
|
|
|
struct ggml_tensor * k;
|
|
|
|
struct ggml_tensor * v;
|
|
|
|
|
|
|
|
struct ggml_context * ctx = NULL;
|
|
|
|
|
|
|
|
mpt_buffer buf;
|
|
|
|
|
|
|
|
int n; // number of tokens currently in the cache
|
|
|
|
|
|
|
|
~mpt_kv_cache() {
|
|
|
|
if (ctx) {
|
|
|
|
ggml_free(ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mpt_model {
|
|
|
|
mpt_hparams hparams;
|
|
|
|
|
2023-05-05 20:20:47 -04:00
|
|
|
// normalization
|
2023-05-08 09:47:10 -04:00
|
|
|
struct ggml_tensor * norm_f_w;
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
struct ggml_tensor * wte; // position embedding
|
2023-05-08 12:01:40 -04:00
|
|
|
|
2023-05-05 20:20:47 -04:00
|
|
|
// mpt does weight tying
|
|
|
|
|
|
|
|
std::vector<mpt_layer> layers;
|
|
|
|
|
2023-05-05 14:04:32 -04:00
|
|
|
struct mpt_kv_cache kv_self;
|
|
|
|
struct ggml_context * ctx;
|
|
|
|
std::map<std::string, struct ggml_tensor *> tensors;
|
|
|
|
|
|
|
|
|
|
|
|
mpt_buffer buf;
|
|
|
|
|
|
|
|
~mpt_model() {
|
|
|
|
if (ctx) {
|
|
|
|
ggml_free(ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool kv_cache_init(
|
|
|
|
const struct mpt_hparams & hparams,
|
|
|
|
struct mpt_kv_cache & cache,
|
|
|
|
ggml_type wtype,
|
|
|
|
int n_ctx) {
|
|
|
|
const int n_embd = hparams.n_embd;
|
|
|
|
const int n_layer = hparams.n_layer;
|
|
|
|
|
|
|
|
const int64_t n_mem = (int64_t)n_layer*n_ctx;
|
|
|
|
const int64_t n_elements = n_embd*n_mem;
|
|
|
|
|
|
|
|
cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
|
|
|
|
|
|
|
|
struct ggml_init_params params;
|
|
|
|
params.mem_size = cache.buf.size;
|
|
|
|
params.mem_buffer = cache.buf.addr;
|
|
|
|
params.no_alloc = false;
|
|
|
|
|
|
|
|
cache.ctx = ggml_init(params);
|
|
|
|
|
|
|
|
if (!cache.ctx) {
|
|
|
|
fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
|
|
|
|
cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct mpt_vocab {
|
2023-05-05 20:20:47 -04:00
|
|
|
using id = int32_t;
|
|
|
|
using token = std::string;
|
|
|
|
|
|
|
|
std::map<token, id> token_to_id;
|
|
|
|
std::map<id, token> id_to_token;
|
2023-05-06 15:35:02 -04:00
|
|
|
std::vector<std::string> special_tokens;
|
|
|
|
|
2023-05-08 12:06:32 -04:00
|
|
|
void add_special_token(const std::string &token) {
|
|
|
|
special_tokens.push_back(token);
|
|
|
|
}
|
2023-05-05 14:04:32 -04:00
|
|
|
};
|
|
|
|
|
2023-05-08 12:06:32 -04:00
|
|
|
std::string regex_escape(const std::string &s) {
|
|
|
|
static const std::regex metacharacters(R"([\.\^\$\-\+\(\)\[\]\{\}\|\?\*])");
|
|
|
|
return std::regex_replace(s, metacharacters, "\\$&");
|
|
|
|
}
|
|
|
|
|
2023-05-05 14:04:32 -04:00
|
|
|
// load the model's weights from a stream
|
|
|
|
bool mpt_model_load(const std::string &fname, std::istream &fin, mpt_model & model, mpt_vocab & vocab) {
|
2023-05-05 20:20:47 -04:00
|
|
|
printf("%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
|
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
// verify magic
|
2023-05-06 14:21:46 -04:00
|
|
|
{
|
|
|
|
uint32_t magic;
|
|
|
|
fin.read((char *) &magic, sizeof(magic));
|
2023-05-08 09:47:10 -04:00
|
|
|
if (magic != 0x67676d6d) {
|
2023-05-06 14:21:46 -04:00
|
|
|
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
// load hparams
|
|
|
|
{
|
|
|
|
auto & hparams = model.hparams;
|
|
|
|
|
|
|
|
fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
|
|
|
|
fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
|
|
|
|
fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
|
2023-05-08 09:47:10 -04:00
|
|
|
fin.read((char *) &hparams.n_head, sizeof(hparams.n_head));
|
|
|
|
fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd));
|
|
|
|
fin.read((char *) &hparams.alibi_bias_max, sizeof(hparams.alibi_bias_max));
|
|
|
|
fin.read((char *) &hparams.clip_qkv, sizeof(hparams.clip_qkv));
|
|
|
|
fin.read((char *) &hparams.f16, sizeof(hparams.f16));
|
|
|
|
|
|
|
|
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
|
|
|
printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
|
|
|
|
printf("%s: n_embd = %d\n", __func__, hparams.n_embd);
|
|
|
|
printf("%s: n_head = %d\n", __func__, hparams.n_head);
|
|
|
|
printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
|
|
|
|
printf("%s: alibi_bias_max = %f\n", __func__, hparams.alibi_bias_max);
|
|
|
|
printf("%s: clip_qkv = %f\n", __func__, hparams.clip_qkv);
|
|
|
|
printf("%s: ftype = %d\n", __func__, hparams.f16);
|
2023-05-05 20:20:47 -04:00
|
|
|
}
|
2023-05-08 09:47:10 -04:00
|
|
|
|
2023-05-05 20:20:47 -04:00
|
|
|
// load vocab
|
|
|
|
{
|
2023-05-08 09:47:10 -04:00
|
|
|
int32_t n_vocab = model.hparams.n_vocab;
|
2023-05-05 20:20:47 -04:00
|
|
|
fin.read((char *) &n_vocab, sizeof(n_vocab));
|
|
|
|
|
|
|
|
if (n_vocab != model.hparams.n_vocab) {
|
|
|
|
fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n",
|
|
|
|
__func__, fname.c_str(), n_vocab, model.hparams.n_vocab);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string word;
|
|
|
|
for (int i = 0; i < n_vocab; i++) {
|
|
|
|
uint32_t len;
|
|
|
|
fin.read((char *) &len, sizeof(len));
|
2023-05-08 09:47:10 -04:00
|
|
|
bool special = false;
|
|
|
|
if (len & (1<<31)) {
|
|
|
|
len = len &~ (1<<31);
|
|
|
|
special = true;
|
|
|
|
}
|
2023-05-05 20:20:47 -04:00
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
if (len > 0) {
|
|
|
|
word.resize(len);
|
|
|
|
fin.read((char *) word.data(), len);
|
|
|
|
vocab.token_to_id[word] = i;
|
|
|
|
vocab.id_to_token[i] = word;
|
|
|
|
}
|
2023-05-05 20:20:47 -04:00
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
// TODO: this only kind-of works, the gpt_tokenize can still incorrectly
|
|
|
|
// tokenize special tokens
|
2023-05-08 12:06:32 -04:00
|
|
|
if(special) {
|
2023-05-08 17:42:20 -04:00
|
|
|
vocab.add_special_token(word);
|
2023-05-08 12:06:32 -04:00
|
|
|
}
|
2023-05-05 20:20:47 -04:00
|
|
|
}
|
|
|
|
}
|
2023-05-08 09:47:10 -04:00
|
|
|
|
2023-05-05 20:20:47 -04:00
|
|
|
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
|
|
|
|
// in order to save memory and also to speed up the computation
|
|
|
|
ggml_type wtype = GGML_TYPE_COUNT;
|
|
|
|
switch (model.hparams.f16) {
|
|
|
|
case 0: wtype = GGML_TYPE_F32; break;
|
|
|
|
case 1: wtype = GGML_TYPE_F16; break;
|
|
|
|
case 2: wtype = GGML_TYPE_Q4_0; break;
|
|
|
|
case 3: wtype = GGML_TYPE_Q4_1; break;
|
2023-05-08 17:52:17 -04:00
|
|
|
case 5: wtype = GGML_TYPE_Q4_2; break;
|
2023-05-05 20:20:47 -04:00
|
|
|
default:
|
|
|
|
{
|
|
|
|
fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n",
|
|
|
|
__func__, fname.c_str(), model.hparams.f16);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
auto & ctx = model.ctx;
|
|
|
|
|
|
|
|
size_t ctx_size = 0;
|
|
|
|
|
|
|
|
{
|
|
|
|
const auto & hparams = model.hparams;
|
|
|
|
|
|
|
|
const int n_embd = hparams.n_embd;
|
|
|
|
const int n_layer = hparams.n_layer;
|
|
|
|
const int n_ctx = hparams.n_ctx;
|
|
|
|
const int n_vocab = hparams.n_vocab;
|
2023-05-08 09:47:10 -04:00
|
|
|
const int expand = hparams.expand;
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_w
|
2023-05-05 20:20:47 -04:00
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
ctx_size += n_embd*n_vocab*ggml_type_sizef(GGML_TYPE_F32); // wte
|
2023-05-05 20:20:47 -04:00
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // norm_1_w
|
|
|
|
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // norm_2_w
|
2023-05-05 20:20:47 -04:00
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
ctx_size += n_layer*(3*n_embd*n_embd*ggml_type_sizef(wtype)); // attn_Wqkv_w
|
|
|
|
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // attn_out_proj_w
|
2023-05-05 20:20:47 -04:00
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
ctx_size += n_layer*(expand*n_embd*n_embd*ggml_type_sizef(wtype)); // ffn_up_proj_w
|
|
|
|
ctx_size += n_layer*(expand*n_embd*n_embd*ggml_type_sizef(wtype)); // ffn_down_proj_w
|
2023-05-08 12:01:40 -04:00
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_k
|
|
|
|
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_v
|
2023-05-05 20:20:47 -04:00
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
// TODO probably less now?
|
2023-05-05 20:20:47 -04:00
|
|
|
ctx_size += (5 + 10*n_layer)*256; // object overhead
|
|
|
|
|
|
|
|
printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0));
|
|
|
|
}
|
|
|
|
|
|
|
|
// create the ggml context
|
|
|
|
{
|
|
|
|
struct ggml_init_params params = {
|
|
|
|
.mem_size = ctx_size,
|
|
|
|
.mem_buffer = NULL,
|
2023-05-08 09:47:10 -04:00
|
|
|
.no_alloc = false,
|
2023-05-05 20:20:47 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
model.ctx = ggml_init(params);
|
|
|
|
if (!model.ctx) {
|
|
|
|
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2023-05-08 09:47:10 -04:00
|
|
|
|
|
|
|
// prepare memory for the weights
|
2023-05-05 20:20:47 -04:00
|
|
|
{
|
|
|
|
const auto & hparams = model.hparams;
|
|
|
|
|
|
|
|
const int n_embd = hparams.n_embd;
|
|
|
|
const int n_layer = hparams.n_layer;
|
|
|
|
const int n_ctx = hparams.n_ctx;
|
|
|
|
const int n_vocab = hparams.n_vocab;
|
2023-05-08 09:47:10 -04:00
|
|
|
const int expand = hparams.expand;
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
model.layers.resize(n_layer);
|
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
model.wte = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_vocab);
|
|
|
|
model.norm_f_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
// map by name
|
|
|
|
model.tensors["transformer.wte.weight"] = model.wte;
|
2023-05-08 09:47:10 -04:00
|
|
|
model.tensors["transformer.norm_f.weight"] = model.norm_f_w;
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
for (int i = 0; i < n_layer; ++i) {
|
|
|
|
auto & layer = model.layers[i];
|
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
layer.norm_1_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
|
|
|
layer.norm_2_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
2023-05-05 20:20:47 -04:00
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
layer.attn_Wqkv_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd * 3);
|
|
|
|
layer.attn_out_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
|
|
|
layer.ffn_up_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, expand*n_embd);
|
|
|
|
layer.ffn_down_proj_w = ggml_new_tensor_2d(ctx, wtype, expand*n_embd, n_embd);
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
// map by name
|
2023-05-08 09:47:10 -04:00
|
|
|
model.tensors["transformer.blocks." + std::to_string(i) + ".norm_1.weight"] = layer.norm_1_w;
|
|
|
|
model.tensors["transformer.blocks." + std::to_string(i) + ".norm_2.weight"] = layer.norm_2_w;
|
|
|
|
model.tensors["transformer.blocks." + std::to_string(i) + ".attn.Wqkv.weight"] = layer.attn_Wqkv_w;
|
|
|
|
model.tensors["transformer.blocks." + std::to_string(i) + ".attn.out_proj.weight"] = layer.attn_out_proj_w;
|
2023-05-05 20:20:47 -04:00
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
model.tensors["transformer.blocks." + std::to_string(i) + ".ffn.up_proj.weight"] = layer.ffn_up_proj_w;
|
|
|
|
model.tensors["transformer.blocks." + std::to_string(i) + ".ffn.down_proj.weight"] = layer.ffn_down_proj_w;
|
2023-05-05 20:20:47 -04:00
|
|
|
}
|
2023-05-08 09:47:10 -04:00
|
|
|
}
|
2023-05-05 20:20:47 -04:00
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
// key + value memory
|
2023-05-05 20:20:47 -04:00
|
|
|
{
|
|
|
|
const auto & hparams = model.hparams;
|
|
|
|
|
|
|
|
const int n_embd = hparams.n_embd;
|
|
|
|
const int n_layer = hparams.n_layer;
|
|
|
|
const int n_ctx = hparams.n_ctx;
|
|
|
|
|
|
|
|
const int n_mem = n_layer*n_ctx;
|
|
|
|
const int n_elements = n_embd*n_mem;
|
|
|
|
|
2023-05-08 12:07:34 -04:00
|
|
|
if (!kv_cache_init(hparams, model.kv_self, GGML_TYPE_F16, model.hparams.n_ctx)) {
|
2023-05-05 20:20:47 -04:00
|
|
|
fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__);
|
|
|
|
ggml_free(ctx);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const size_t memory_size = ggml_nbytes(model.kv_self.k) + ggml_nbytes(model.kv_self.v);
|
|
|
|
printf("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
|
|
|
|
}
|
2023-05-08 09:47:10 -04:00
|
|
|
|
|
|
|
// load weights
|
2023-05-05 20:20:47 -04:00
|
|
|
{
|
|
|
|
int n_tensors = 0;
|
|
|
|
size_t total_size = 0;
|
|
|
|
|
|
|
|
printf("%s: ", __func__);
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
int32_t n_dims;
|
|
|
|
int32_t length;
|
2023-05-08 09:47:10 -04:00
|
|
|
int32_t ttype;
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
|
|
|
|
fin.read(reinterpret_cast<char *>(&length), sizeof(length));
|
2023-05-08 09:47:10 -04:00
|
|
|
fin.read(reinterpret_cast<char *>(&ttype), sizeof(ttype));
|
2023-05-07 18:18:22 -04:00
|
|
|
|
2023-05-05 20:20:47 -04:00
|
|
|
if (fin.eof()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t nelements = 1;
|
|
|
|
int32_t ne[2] = { 1, 1 };
|
|
|
|
for (int i = 0; i < n_dims; ++i) {
|
|
|
|
fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
|
|
|
|
nelements *= ne[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string name(length, 0);
|
|
|
|
fin.read(&name[0], length);
|
2023-05-08 09:47:10 -04:00
|
|
|
|
2023-05-05 20:20:47 -04:00
|
|
|
if (model.tensors.find(name.data()) == model.tensors.end()) {
|
|
|
|
fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto tensor = model.tensors[name.data()];
|
|
|
|
if (ggml_nelements(tensor) != nelements) {
|
|
|
|
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
|
2023-05-08 09:47:10 -04:00
|
|
|
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
|
|
|
__func__, name.data(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]);
|
2023-05-05 20:20:47 -04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
// for debugging
|
2023-05-05 20:20:47 -04:00
|
|
|
if (0) {
|
2023-05-08 09:47:10 -04:00
|
|
|
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor));
|
2023-05-05 20:20:47 -04:00
|
|
|
}
|
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
const size_t bpe = ggml_type_size(ggml_type(ttype));
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
|
|
|
|
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
|
|
|
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
|
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
//printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ttype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0);
|
2023-05-05 20:20:47 -04:00
|
|
|
total_size += ggml_nbytes(tensor);
|
|
|
|
if (++n_tensors % 8 == 0) {
|
|
|
|
printf(".");
|
|
|
|
fflush(stdout);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
printf(" done\n");
|
|
|
|
|
|
|
|
printf("%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, n_tensors);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2023-05-05 14:04:32 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// load the model's weights from a file path
|
2023-05-08 12:01:40 -04:00
|
|
|
bool mpt_model_load(const std::string & fname, mpt_model & model, mpt_vocab & vocab) {
|
2023-05-05 14:04:32 -04:00
|
|
|
|
|
|
|
auto fin = std::ifstream(fname, std::ios::binary);
|
|
|
|
if (!fin) {
|
|
|
|
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool loaded = mpt_model_load(fname, fin, model, vocab);
|
|
|
|
fin.close();
|
|
|
|
return loaded;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool mpt_eval(
|
|
|
|
mpt_model & model,
|
|
|
|
const int n_threads,
|
|
|
|
const int n_past,
|
|
|
|
const std::vector<int> & embd_inp,
|
|
|
|
std::vector<float> & embd_w,
|
|
|
|
size_t & mem_per_token) {
|
2023-05-05 20:20:47 -04:00
|
|
|
const int N = embd_inp.size();
|
|
|
|
|
|
|
|
const auto & hparams = model.hparams;
|
|
|
|
|
|
|
|
const int n_embd = hparams.n_embd;
|
|
|
|
const int n_layer = hparams.n_layer;
|
|
|
|
const int n_ctx = hparams.n_ctx;
|
|
|
|
const int n_head = hparams.n_head;
|
|
|
|
const int n_vocab = hparams.n_vocab;
|
2023-05-08 09:47:10 -04:00
|
|
|
const int expand = hparams.expand;
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
const int d_key = n_embd/n_head;
|
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
static size_t buf_size = 256u*1024*1024;
|
|
|
|
static void * buf = malloc(buf_size);
|
2023-05-05 20:20:47 -04:00
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
|
2023-05-05 20:20:47 -04:00
|
|
|
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
|
2023-05-08 09:47:10 -04:00
|
|
|
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
// reallocate
|
2023-05-08 09:47:10 -04:00
|
|
|
buf_size = buf_size_new;
|
|
|
|
buf = realloc(buf, buf_size);
|
|
|
|
if (buf == nullptr) {
|
|
|
|
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
|
2023-05-05 20:20:47 -04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ggml_init_params params = {
|
2023-05-08 09:47:10 -04:00
|
|
|
.mem_size = buf_size,
|
|
|
|
.mem_buffer = buf,
|
|
|
|
.no_alloc = false,
|
2023-05-05 20:20:47 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ggml_context * ctx0 = ggml_init(params);
|
|
|
|
struct ggml_cgraph gf = { .n_threads = n_threads };
|
|
|
|
|
|
|
|
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
|
|
|
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));
|
|
|
|
|
|
|
|
// wte
|
|
|
|
struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte, embd);
|
|
|
|
|
|
|
|
for (int il = 0; il < n_layer; ++il) {
|
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
struct ggml_tensor * inpSA = inpL;
|
|
|
|
struct ggml_tensor * cur = inpSA;
|
|
|
|
// self-attention
|
2023-05-05 20:20:47 -04:00
|
|
|
{
|
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
// norm1
|
|
|
|
cur = ggml_norm(ctx0, cur);
|
|
|
|
cur = ggml_mul(ctx0,
|
|
|
|
ggml_repeat(ctx0, model.layers[il].norm_1_w, cur),
|
|
|
|
cur);
|
|
|
|
// compute QKV
|
|
|
|
cur = ggml_mul_mat(ctx0,
|
|
|
|
model.layers[il].attn_Wqkv_w,
|
|
|
|
cur);
|
2023-05-05 20:20:47 -04:00
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
// TODO: clip_qkv
|
|
|
|
struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0*ggml_element_size(cur)*n_embd));
|
|
|
|
struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1*ggml_element_size(cur)*n_embd));
|
|
|
|
struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2*ggml_element_size(cur)*n_embd));
|
2023-05-05 20:20:47 -04:00
|
|
|
|
2023-05-08 12:08:37 -04:00
|
|
|
// TODO: qk_ln? (seems to be False in MPT-7B configs)
|
|
|
|
{
|
|
|
|
Vcur = ggml_transpose(ctx0, Vcur);
|
|
|
|
|
2023-05-05 20:20:47 -04:00
|
|
|
struct ggml_tensor * k = ggml_view_1d(ctx0, model.kv_self.k, N*n_embd, (ggml_element_size(model.kv_self.k)*n_embd)*(il*n_ctx + n_past));
|
2023-05-08 12:08:37 -04:00
|
|
|
struct ggml_tensor * v = ggml_view_2d(ctx0, model.kv_self.v, N, n_embd,
|
|
|
|
( n_ctx)*ggml_element_size(model.kv_self.v),
|
|
|
|
(il*n_ctx)*ggml_element_size(model.kv_self.v)*n_embd + n_past*ggml_element_size(model.kv_self.v));
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
|
|
|
|
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
|
|
|
|
}
|
|
|
|
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
|
|
|
|
struct ggml_tensor * Q =
|
|
|
|
ggml_permute(ctx0,
|
2023-05-08 09:47:10 -04:00
|
|
|
ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, N),
|
2023-05-05 20:20:47 -04:00
|
|
|
0, 2, 1, 3);
|
|
|
|
|
|
|
|
struct ggml_tensor * K =
|
2023-05-08 12:01:40 -04:00
|
|
|
ggml_permute(ctx0,
|
2023-05-08 09:47:10 -04:00
|
|
|
ggml_reshape_3d(ctx0,
|
|
|
|
ggml_view_1d(ctx0, model.kv_self.k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.kv_self.k)*n_embd),
|
|
|
|
n_embd/n_head, n_head, n_past + N),
|
2023-05-05 20:20:47 -04:00
|
|
|
0, 2, 1, 3);
|
|
|
|
|
|
|
|
// K * Q
|
|
|
|
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
|
|
|
|
|
|
|
|
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
|
|
|
struct ggml_tensor * KQ_scaled =
|
|
|
|
ggml_scale(ctx0,
|
|
|
|
KQ,
|
|
|
|
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
|
|
|
|
);
|
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
|
|
|
|
// Alibi
|
|
|
|
struct ggml_tensor * KQ_scaled_biased = ggml_alibi(ctx0, ggml_cont(ctx0, KQ_scaled), n_past, n_head);
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
// KQ_masked = mask_past(KQ_scaled)
|
2023-05-08 09:47:10 -04:00
|
|
|
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled_biased, n_past);
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
// KQ = soft_max(KQ_masked)
|
|
|
|
struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
|
|
|
|
|
|
|
|
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
|
2023-05-08 09:47:10 -04:00
|
|
|
struct ggml_tensor * V =
|
|
|
|
ggml_view_3d(ctx0, model.kv_self.v,
|
|
|
|
n_past + N, n_embd/n_head, n_head,
|
|
|
|
n_ctx*ggml_element_size(model.kv_self.v),
|
|
|
|
n_ctx*ggml_element_size(model.kv_self.v)*n_embd/n_head,
|
|
|
|
il*n_ctx*ggml_element_size(model.kv_self.v)*n_embd);
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
// KQV = transpose(V) * KQ_soft_max
|
2023-05-08 09:47:10 -04:00
|
|
|
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
// KQV_merged = KQV.permute(0, 2, 1, 3)
|
|
|
|
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
|
|
|
|
|
|
|
|
// cur = KQV_merged.contiguous().view(n_embd, N)
|
|
|
|
cur = ggml_cpy(ctx0,
|
|
|
|
KQV_merged,
|
|
|
|
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
|
|
|
|
|
|
|
|
// projection (no bias)
|
|
|
|
cur = ggml_mul_mat(ctx0,
|
2023-05-08 09:47:10 -04:00
|
|
|
model.layers[il].attn_out_proj_w,
|
2023-05-05 20:20:47 -04:00
|
|
|
cur);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
// residual
|
|
|
|
struct ggml_tensor * resSA = ggml_add(ctx0, cur, inpSA);
|
2023-05-05 20:20:47 -04:00
|
|
|
// feed-forward network
|
|
|
|
{
|
2023-05-08 09:47:10 -04:00
|
|
|
cur = resSA;
|
|
|
|
// norm2
|
|
|
|
cur = ggml_norm(ctx0, cur);
|
|
|
|
cur = ggml_mul(ctx0,
|
|
|
|
ggml_repeat(ctx0, model.layers[il].norm_2_w, cur),
|
|
|
|
cur);
|
|
|
|
// ffn
|
2023-05-05 20:20:47 -04:00
|
|
|
cur = ggml_mul_mat(ctx0,
|
2023-05-08 09:47:10 -04:00
|
|
|
model.layers[il].ffn_up_proj_w,
|
2023-05-05 20:20:47 -04:00
|
|
|
cur);
|
2023-05-08 09:47:10 -04:00
|
|
|
cur = ggml_gelu(ctx0, cur);
|
2023-05-05 20:20:47 -04:00
|
|
|
cur = ggml_mul_mat(ctx0,
|
2023-05-08 09:47:10 -04:00
|
|
|
model.layers[il].ffn_down_proj_w,
|
2023-05-05 20:20:47 -04:00
|
|
|
cur);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// self-attention + FF
|
2023-05-08 09:47:10 -04:00
|
|
|
inpL = ggml_add(ctx0, cur, resSA);
|
2023-05-05 20:20:47 -04:00
|
|
|
}
|
|
|
|
|
2023-05-08 09:47:10 -04:00
|
|
|
struct ggml_tensor * out = inpL;
|
|
|
|
// -> logits
|
2023-05-05 20:20:47 -04:00
|
|
|
{
|
2023-05-08 09:47:10 -04:00
|
|
|
out = ggml_norm(ctx0, out);
|
|
|
|
out = ggml_mul(ctx0,
|
|
|
|
ggml_repeat(ctx0, model.norm_f_w, out),
|
|
|
|
out);
|
|
|
|
out = ggml_mul_mat(ctx0, model.wte, out);
|
2023-05-05 20:20:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// run the computation
|
2023-05-08 09:47:10 -04:00
|
|
|
ggml_build_forward_expand(&gf, out);
|
2023-05-05 20:20:47 -04:00
|
|
|
ggml_graph_compute (ctx0, &gf);
|
|
|
|
|
|
|
|
|
|
|
|
// return result for just the last token
|
|
|
|
embd_w.resize(n_vocab);
|
2023-05-08 09:47:10 -04:00
|
|
|
memcpy(embd_w.data(), (float *) ggml_get_data(out) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
if (mem_per_token == 0) {
|
|
|
|
mem_per_token = ggml_used_mem(ctx0)/N;
|
|
|
|
}
|
|
|
|
//printf("used_mem = %zu\n", ggml_used_mem(ctx0));
|
|
|
|
|
|
|
|
ggml_free(ctx0);
|
|
|
|
|
|
|
|
return true;
|
2023-05-05 14:04:32 -04:00
|
|
|
}
|
|
|
|
|
2023-05-08 17:42:20 -04:00
|
|
|
std::vector<int> mpt_tokenize_inner(const mpt_vocab & vocab, const std::string & text) {
|
2023-05-06 15:35:02 -04:00
|
|
|
// taken from stablelm example in ggml
|
|
|
|
// they both use the gpt-neox tokenizer
|
|
|
|
// not sure if this entirely right?
|
|
|
|
std::vector<std::string> words;
|
2023-05-05 14:04:32 -04:00
|
|
|
|
2023-05-08 12:01:40 -04:00
|
|
|
|
2023-05-06 15:35:02 -04:00
|
|
|
// first split the text into words
|
|
|
|
{
|
|
|
|
std::string str = text;
|
|
|
|
std::string pat = R"('s|'t|'re|'ve|'m|'ll|'d| ?[[:alpha:]]+| ?[[:digit:]]+| ?[^\s[:alpha:][:digit:]]+|\s+(?!\S)|\s+)";
|
|
|
|
std::regex re(pat);
|
|
|
|
std::smatch m;
|
|
|
|
|
|
|
|
while (std::regex_search(str, m, re)) {
|
|
|
|
for (auto x : m) {
|
|
|
|
words.push_back(x);
|
|
|
|
}
|
|
|
|
str = m.suffix();
|
|
|
|
}
|
|
|
|
}
|
2023-05-05 14:04:32 -04:00
|
|
|
|
2023-05-06 15:35:02 -04:00
|
|
|
// find the longest tokens that form the words:
|
|
|
|
std::vector<mpt_vocab::id> tokens;
|
|
|
|
for (const auto & word : words) {
|
|
|
|
if (word.size() == 0) continue;
|
|
|
|
|
|
|
|
int i = 0;
|
|
|
|
int n = word.size();
|
|
|
|
while (i < n) {
|
|
|
|
int j = n;
|
|
|
|
while (j > i) {
|
|
|
|
auto it = vocab.token_to_id.find(word.substr(i, j-i));
|
|
|
|
if (it != vocab.token_to_id.end()) {
|
|
|
|
tokens.push_back(it->second);
|
|
|
|
i = j;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
--j;
|
|
|
|
}
|
|
|
|
if (i == n) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (j == i) {
|
|
|
|
auto sub = word.substr(i, 1);
|
|
|
|
if (vocab.token_to_id.find(sub) != vocab.token_to_id.end()) {
|
|
|
|
tokens.push_back(vocab.token_to_id.at(sub));
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "%s: unknown token '%s'\n", __func__, sub.data());
|
|
|
|
}
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return tokens;
|
|
|
|
}
|
2023-05-05 14:04:32 -04:00
|
|
|
|
2023-05-08 17:42:20 -04:00
|
|
|
std::vector<mpt_vocab::id> mpt_tokenize(const mpt_vocab & vocab, const std::string & text) {
|
|
|
|
// Generate the subpattern from the special_tokens vector if it's not empty
|
|
|
|
if (!vocab.special_tokens.empty()) {
|
|
|
|
std::vector<mpt_vocab::id> out;
|
|
|
|
std::vector<std::string> chunks;
|
|
|
|
std::string str = text;
|
|
|
|
std::string special_tokens_subpattern;
|
|
|
|
for (const auto &token : vocab.special_tokens) {
|
|
|
|
if (!special_tokens_subpattern.empty()) {
|
|
|
|
special_tokens_subpattern += "|";
|
|
|
|
}
|
|
|
|
special_tokens_subpattern += regex_escape(token);
|
|
|
|
}
|
|
|
|
std::regex re(special_tokens_subpattern);
|
|
|
|
std::smatch m;
|
|
|
|
while (std::regex_search(str, m, re)) {
|
|
|
|
auto tok = vocab.token_to_id.find(m.str());
|
|
|
|
if (tok != vocab.token_to_id.end()) {
|
|
|
|
auto tokid = tok->second;
|
|
|
|
auto pfxtoks = mpt_tokenize_inner(vocab, m.prefix());
|
|
|
|
out.insert(out.end(), pfxtoks.begin(), pfxtoks.end());
|
|
|
|
out.push_back(tokid);
|
|
|
|
str = m.suffix();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!str.empty()) {
|
|
|
|
auto tokrest = mpt_tokenize_inner(vocab, str);
|
|
|
|
out.insert(out.end(), tokrest.begin(), tokrest.end());
|
|
|
|
}
|
|
|
|
return out;
|
|
|
|
} else {
|
|
|
|
return mpt_tokenize_inner(vocab, text);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-05 14:04:32 -04:00
|
|
|
#define MPT_MAX_RNG_STATE 64*1024
|
|
|
|
|
|
|
|
size_t mpt_get_state_size(const mpt_model &model)
|
|
|
|
{
|
|
|
|
// we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state.
|
|
|
|
// for reference, std::mt19937(1337) serializes to 6701 bytes.
|
|
|
|
const size_t s_rng_size = sizeof(size_t);
|
|
|
|
const size_t s_rng = MPT_MAX_RNG_STATE;
|
|
|
|
const size_t s_kv_size = sizeof(size_t);
|
|
|
|
const size_t s_kv_ntok = sizeof(int);
|
|
|
|
const size_t s_kv = model.kv_self.buf.size;
|
|
|
|
const size_t s_total = (
|
|
|
|
+ s_rng_size
|
|
|
|
+ s_rng
|
|
|
|
+ s_kv_size
|
|
|
|
+ s_kv_ntok
|
|
|
|
+ s_kv
|
|
|
|
);
|
|
|
|
fflush(stdout);
|
|
|
|
return s_total;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t mpt_copy_state_data(const mpt_model &model, const std::mt19937 &rng, uint8_t *dest)
|
|
|
|
{
|
|
|
|
uint8_t * out = dest;
|
|
|
|
fflush(stdout);
|
|
|
|
// copy rng
|
|
|
|
{
|
|
|
|
std::stringstream rng_ss;
|
|
|
|
rng_ss << rng;
|
|
|
|
|
|
|
|
const size_t rng_size = rng_ss.str().size();
|
|
|
|
char rng_buf[MPT_MAX_RNG_STATE];
|
|
|
|
|
|
|
|
memset(&rng_buf[0], 0, MPT_MAX_RNG_STATE);
|
|
|
|
memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size());
|
|
|
|
|
|
|
|
memcpy(out, &rng_size, sizeof(rng_size)); out += sizeof(rng_size);
|
|
|
|
memcpy(out, &rng_buf[0], MPT_MAX_RNG_STATE); out += MPT_MAX_RNG_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// copy kv cache
|
|
|
|
{
|
|
|
|
const size_t kv_size = model.kv_self.buf.size;
|
|
|
|
const int kv_ntok = model.kv_self.n;
|
|
|
|
|
|
|
|
memcpy(out, &kv_size, sizeof(kv_size)); out += sizeof(kv_size);
|
|
|
|
memcpy(out, &kv_ntok, sizeof(kv_ntok)); out += sizeof(kv_ntok);
|
|
|
|
|
|
|
|
if (kv_size) {
|
|
|
|
memcpy(out, model.kv_self.buf.addr, kv_size); out += kv_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const size_t written = out - dest;
|
|
|
|
const size_t expected = mpt_get_state_size(model);
|
|
|
|
assert(written == expected);
|
|
|
|
fflush(stdout);
|
|
|
|
return written;
|
|
|
|
}
|
|
|
|
|
2023-05-05 20:20:47 -04:00
|
|
|
mpt_vocab::id mpt_sample_top_k_top_p(
|
|
|
|
const mpt_vocab & vocab,
|
2023-05-08 12:01:40 -04:00
|
|
|
const size_t actualVocabSize,
|
2023-05-05 20:20:47 -04:00
|
|
|
const int32_t * last_n_tokens_data,
|
|
|
|
int last_n_tokens_size,
|
|
|
|
const std::vector<float> logits,
|
|
|
|
int top_k,
|
|
|
|
double top_p,
|
|
|
|
double temp,
|
|
|
|
float repeat_penalty,
|
|
|
|
std::mt19937 & rng) {
|
2023-05-08 12:01:40 -04:00
|
|
|
int n_logits = actualVocabSize;
|
2023-05-05 20:20:47 -04:00
|
|
|
|
|
|
|
const auto last_n_tokens = std::vector<int32_t>(last_n_tokens_data, last_n_tokens_data + last_n_tokens_size);
|
|
|
|
const auto * plogits = logits.data() + logits.size() - n_logits;
|
|
|
|
|
|
|
|
std::vector<std::pair<double, mpt_vocab::id>> logits_id;
|
|
|
|
logits_id.reserve(n_logits);
|
|
|
|
|
|
|
|
{
|
|
|
|
const float scale = 1.0f/temp;
|
|
|
|
for (int i = 0; i < n_logits; ++i) {
|
|
|
|
// repetition penalty from ctrl paper (https://arxiv.org/abs/1909.05858)
|
|
|
|
// credit https://github.com/facebookresearch/llama/compare/main...shawwn:llama:main
|
|
|
|
if (std::find(last_n_tokens.begin(), last_n_tokens.end(), i) != last_n_tokens.end()) {
|
|
|
|
// if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
|
|
|
|
if (plogits[i] < 0.0f) {
|
|
|
|
logits_id.push_back(std::make_pair(plogits[i]*scale*repeat_penalty, i));
|
|
|
|
} else {
|
|
|
|
logits_id.push_back(std::make_pair(plogits[i]*scale/repeat_penalty, i));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
logits_id.push_back(std::make_pair(plogits[i]*scale, i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// find the top K tokens
|
|
|
|
std::partial_sort(
|
|
|
|
logits_id.begin(),
|
|
|
|
logits_id.begin() + top_k, logits_id.end(),
|
|
|
|
[](const std::pair<double, mpt_vocab::id> & a, const std::pair<double, mpt_vocab::id> & b) {
|
|
|
|
return a.first > b.first;
|
|
|
|
});
|
|
|
|
|
|
|
|
logits_id.resize(top_k);
|
|
|
|
|
|
|
|
double maxl = -INFINITY;
|
|
|
|
for (const auto & kv : logits_id) {
|
|
|
|
maxl = std::max(maxl, kv.first);
|
|
|
|
}
|
|
|
|
|
|
|
|
// compute probs for the top K tokens
|
|
|
|
std::vector<double> probs;
|
|
|
|
probs.reserve(logits_id.size());
|
|
|
|
|
|
|
|
double sum = 0.0;
|
|
|
|
for (const auto & kv : logits_id) {
|
|
|
|
double p = exp(kv.first - maxl);
|
|
|
|
probs.push_back(p);
|
|
|
|
sum += p;
|
|
|
|
}
|
|
|
|
|
|
|
|
// normalize the probs
|
|
|
|
for (auto & p : probs) {
|
|
|
|
p /= sum;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (top_p < 1.0f) {
|
|
|
|
double cumsum = 0.0f;
|
|
|
|
for (int i = 0; i < top_k; i++) {
|
|
|
|
cumsum += probs[i];
|
|
|
|
if (cumsum >= top_p) {
|
|
|
|
top_k = i + 1;
|
|
|
|
probs.resize(top_k);
|
|
|
|
logits_id.resize(top_k);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cumsum = 1.0/cumsum;
|
|
|
|
for (int i = 0; i < (int) probs.size(); i++) {
|
|
|
|
probs[i] *= cumsum;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//printf("\n");
|
|
|
|
//for (int i = 0; i < (int) probs.size(); i++) {
|
|
|
|
// printf("%d: '%s' %f\n", i, vocab.id_to_token.at(logits_id[i].second).c_str(), probs[i]);
|
|
|
|
//}
|
|
|
|
//exit(0);
|
|
|
|
|
|
|
|
std::discrete_distribution<> dist(probs.begin(), probs.end());
|
|
|
|
int idx = dist(rng);
|
|
|
|
|
|
|
|
return logits_id[idx].second;
|
|
|
|
}
|
|
|
|
|
2023-05-05 14:04:32 -04:00
|
|
|
size_t mpt_set_state_data(mpt_model *model, std::mt19937 *rng, const uint8_t *src)
|
|
|
|
{
|
|
|
|
const uint8_t * in = src;
|
|
|
|
|
|
|
|
// set rng
|
|
|
|
{
|
|
|
|
size_t rng_size;
|
|
|
|
char rng_buf[MPT_MAX_RNG_STATE];
|
|
|
|
|
|
|
|
memcpy(&rng_size, in, sizeof(rng_size)); in += sizeof(rng_size);
|
|
|
|
memcpy(&rng_buf[0], in, MPT_MAX_RNG_STATE); in += MPT_MAX_RNG_STATE;
|
|
|
|
|
|
|
|
std::stringstream rng_ss;
|
|
|
|
rng_ss.str(std::string(&rng_buf[0], rng_size));
|
|
|
|
rng_ss >> *rng;
|
|
|
|
|
|
|
|
assert(rng_ss.fail() == false);
|
|
|
|
}
|
|
|
|
|
|
|
|
// set kv cache
|
|
|
|
{
|
|
|
|
size_t kv_size;
|
|
|
|
int kv_ntok;
|
|
|
|
|
|
|
|
memcpy(&kv_size, in, sizeof(kv_size)); in += sizeof(kv_size);
|
|
|
|
memcpy(&kv_ntok, in, sizeof(kv_ntok)); in += sizeof(kv_ntok);
|
|
|
|
|
|
|
|
if (kv_size) {
|
|
|
|
assert(model->kv_self.buf.size == kv_size);
|
|
|
|
|
|
|
|
void * k_data = model->kv_self.k->data; // remember data pointers
|
|
|
|
void * v_data = model->kv_self.v->data; // because their value is stored in buf and overwritten by memcpy
|
|
|
|
|
|
|
|
memcpy(model->kv_self.buf.addr, in, kv_size); in += kv_size;
|
|
|
|
|
|
|
|
model->kv_self.k->data = k_data; // restore correct data pointers
|
|
|
|
model->kv_self.v->data = v_data;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
model->kv_self.n = kv_ntok;
|
|
|
|
}
|
|
|
|
|
|
|
|
const size_t nread = in - src;
|
|
|
|
const size_t expected = mpt_get_state_size(*model);
|
|
|
|
assert(nread == expected);
|
|
|
|
fflush(stdout);
|
|
|
|
return nread;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct MPTPrivate {
|
|
|
|
const std::string modelPath;
|
|
|
|
bool modelLoaded;
|
|
|
|
mpt_vocab vocab;
|
|
|
|
mpt_model *model = nullptr;
|
|
|
|
int64_t n_threads = 0;
|
|
|
|
size_t mem_per_token = 0;
|
|
|
|
std::mt19937 rng;
|
2023-05-08 18:55:33 -04:00
|
|
|
bool has_im_end = false;
|
2023-05-05 14:04:32 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
MPT::MPT()
|
|
|
|
: d_ptr(new MPTPrivate) {
|
|
|
|
|
|
|
|
d_ptr->model = new mpt_model;
|
|
|
|
d_ptr->modelLoaded = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool MPT::loadModel(const std::string &modelPath) {
|
|
|
|
std::mt19937 rng(time(NULL));
|
|
|
|
d_ptr->rng = rng;
|
|
|
|
|
|
|
|
auto fin = std::ifstream(modelPath, std::ios::binary);
|
|
|
|
|
|
|
|
// load the model
|
|
|
|
if (!mpt_model_load(modelPath, fin, *d_ptr->model, d_ptr->vocab)) {
|
|
|
|
std::cerr << "GPT-J ERROR: failed to load model from " << modelPath;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
d_ptr->n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
|
|
|
|
d_ptr->modelLoaded = true;
|
2023-05-08 18:55:33 -04:00
|
|
|
d_ptr->has_im_end = d_ptr->vocab.token_to_id.find("<|im_end|>") != d_ptr->vocab.token_to_id.end();
|
2023-05-05 14:04:32 -04:00
|
|
|
fflush(stdout);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void MPT::setThreadCount(int32_t n_threads) {
|
|
|
|
d_ptr->n_threads = n_threads;
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t MPT::threadCount() {
|
|
|
|
return d_ptr->n_threads;
|
|
|
|
}
|
|
|
|
|
|
|
|
MPT::~MPT()
|
|
|
|
{
|
|
|
|
delete d_ptr->model;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool MPT::isModelLoaded() const
|
|
|
|
{
|
|
|
|
return d_ptr->modelLoaded;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t MPT::stateSize() const
|
|
|
|
{
|
|
|
|
return mpt_get_state_size(*d_ptr->model);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t MPT::saveState(uint8_t *dest) const
|
|
|
|
{
|
|
|
|
return mpt_copy_state_data(*d_ptr->model, d_ptr->rng, dest);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t MPT::restoreState(const uint8_t *src)
|
|
|
|
{
|
|
|
|
return mpt_set_state_data(d_ptr->model, &d_ptr->rng, src);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MPT::prompt(const std::string &prompt,
|
|
|
|
std::function<bool(int32_t)> promptCallback,
|
|
|
|
std::function<bool(int32_t, const std::string&)> responseCallback,
|
|
|
|
std::function<bool(bool)> recalculateCallback,
|
|
|
|
PromptContext &promptCtx) {
|
|
|
|
|
|
|
|
if (!isModelLoaded()) {
|
|
|
|
std::cerr << "GPT-J ERROR: prompt won't work with an unloaded model!\n";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const int64_t t_main_start_us = ggml_time_us();
|
|
|
|
|
|
|
|
int64_t t_sample_us = 0;
|
|
|
|
int64_t t_predict_us = 0;
|
|
|
|
int64_t t_prompt_us = 0;
|
|
|
|
|
|
|
|
// tokenize the prompt
|
|
|
|
std::vector<int> embd_inp = mpt_tokenize(d_ptr->vocab, prompt);
|
|
|
|
|
|
|
|
// save the context size
|
|
|
|
promptCtx.n_ctx = d_ptr->model->hparams.n_ctx;
|
|
|
|
|
|
|
|
if ((int) embd_inp.size() > promptCtx.n_ctx - 4) {
|
|
|
|
responseCallback(-1, "ERROR: The prompt size exceeds the context window size and cannot be processed.");
|
|
|
|
std::cerr << "GPT-J ERROR: The prompt is" << embd_inp.size() <<
|
|
|
|
"tokens and the context window is" << promptCtx.n_ctx << "!\n";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
promptCtx.n_predict = std::min(promptCtx.n_predict, promptCtx.n_ctx - (int) embd_inp.size());
|
|
|
|
promptCtx.n_past = std::min(promptCtx.n_past, promptCtx.n_ctx);
|
|
|
|
|
|
|
|
// determine the required inference memory per token:
|
|
|
|
static bool initialized = false;
|
|
|
|
static std::vector<int> p_instruct;
|
|
|
|
static std::vector<int> r_instruct;
|
|
|
|
if (!initialized) {
|
|
|
|
mpt_eval(*d_ptr->model, d_ptr->n_threads, 0, { 0, 1, 2, 3 }, promptCtx.logits,
|
|
|
|
d_ptr->mem_per_token);
|
|
|
|
initialized = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// process the prompt in batches
|
|
|
|
size_t i = 0;
|
|
|
|
const int64_t t_start_prompt_us = ggml_time_us();
|
|
|
|
while (i < embd_inp.size()) {
|
|
|
|
size_t batch_end = std::min(i + promptCtx.n_batch, embd_inp.size());
|
|
|
|
std::vector<int> batch(embd_inp.begin() + i, embd_inp.begin() + batch_end);
|
|
|
|
|
|
|
|
// Check if the context has run out...
|
|
|
|
if (promptCtx.n_past + batch.size() > promptCtx.n_ctx) {
|
|
|
|
const int32_t erasePoint = promptCtx.n_ctx * promptCtx.contextErase;
|
|
|
|
// Erase the first percentage of context from the tokens...
|
2023-05-08 12:01:40 -04:00
|
|
|
std::cerr << "MPT: reached the end of the context window so resizing\n";
|
2023-05-05 14:04:32 -04:00
|
|
|
promptCtx.tokens.erase(promptCtx.tokens.begin(), promptCtx.tokens.begin() + erasePoint);
|
|
|
|
promptCtx.n_past = promptCtx.tokens.size();
|
|
|
|
recalculateContext(promptCtx, recalculateCallback);
|
|
|
|
assert(promptCtx.n_past + batch.size() <= promptCtx.n_ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mpt_eval(*d_ptr->model, d_ptr->n_threads, promptCtx.n_past, batch, promptCtx.logits,
|
|
|
|
d_ptr->mem_per_token)) {
|
|
|
|
std::cerr << "GPT-J ERROR: Failed to process prompt\n";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t tokens = batch_end - i;
|
|
|
|
for (size_t t = 0; t < tokens; ++t) {
|
|
|
|
if (promptCtx.tokens.size() == promptCtx.n_ctx)
|
|
|
|
promptCtx.tokens.erase(promptCtx.tokens.begin());
|
|
|
|
promptCtx.tokens.push_back(batch.at(t));
|
|
|
|
if (!promptCallback(batch.at(t)))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
promptCtx.n_past += batch.size();
|
|
|
|
i = batch_end;
|
|
|
|
}
|
|
|
|
t_prompt_us += ggml_time_us() - t_start_prompt_us;
|
|
|
|
|
|
|
|
int p_instructFound = 0;
|
|
|
|
int r_instructFound = 0;
|
|
|
|
|
|
|
|
std::string cachedResponse;
|
|
|
|
std::vector<int> cachedTokens;
|
|
|
|
std::unordered_set<std::string> reversePrompts
|
|
|
|
= { "### Instruction", "### Prompt", "### Response", "### Human", "### Assistant" };
|
|
|
|
|
|
|
|
// predict next tokens
|
|
|
|
int32_t totalPredictions = 0;
|
|
|
|
for (int i = 0; i < promptCtx.n_predict; i++) {
|
|
|
|
|
|
|
|
// sample next token
|
|
|
|
const int n_vocab = d_ptr->model->hparams.n_vocab;
|
|
|
|
int id = 0;
|
|
|
|
{
|
|
|
|
const int64_t t_start_sample_us = ggml_time_us();
|
2023-05-08 12:01:40 -04:00
|
|
|
id = mpt_sample_top_k_top_p(d_ptr->vocab, n_vocab,
|
2023-05-05 14:04:32 -04:00
|
|
|
promptCtx.tokens.data() + promptCtx.n_ctx - promptCtx.n_ctx,
|
|
|
|
promptCtx.n_ctx,
|
|
|
|
promptCtx.logits,
|
|
|
|
promptCtx.top_k, promptCtx.top_p, promptCtx.temp,
|
|
|
|
promptCtx.repeat_penalty,
|
|
|
|
d_ptr->rng);
|
|
|
|
|
|
|
|
t_sample_us += ggml_time_us() - t_start_sample_us;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the context has run out...
|
|
|
|
if (promptCtx.n_past + 1 > promptCtx.n_ctx) {
|
|
|
|
const int32_t erasePoint = promptCtx.n_ctx * promptCtx.contextErase;
|
|
|
|
// Erase the first percentage of context from the tokens...
|
2023-05-08 12:01:40 -04:00
|
|
|
std::cerr << "MPT: reached the end of the context window so resizing\n";
|
2023-05-05 14:04:32 -04:00
|
|
|
promptCtx.tokens.erase(promptCtx.tokens.begin(), promptCtx.tokens.begin() + erasePoint);
|
|
|
|
promptCtx.n_past = promptCtx.tokens.size();
|
|
|
|
recalculateContext(promptCtx, recalculateCallback);
|
|
|
|
assert(promptCtx.n_past + 1 <= promptCtx.n_ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
const int64_t t_start_predict_us = ggml_time_us();
|
|
|
|
if (!mpt_eval(*d_ptr->model, d_ptr->n_threads, promptCtx.n_past, { id }, promptCtx.logits,
|
|
|
|
d_ptr->mem_per_token)) {
|
|
|
|
std::cerr << "GPT-J ERROR: Failed to predict next token\n";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
t_predict_us += ggml_time_us() - t_start_predict_us;
|
|
|
|
|
|
|
|
promptCtx.n_past += 1;
|
|
|
|
// display text
|
|
|
|
++totalPredictions;
|
|
|
|
|
2023-05-08 18:55:33 -04:00
|
|
|
// mpt-7b-chat has special token for end
|
|
|
|
if (d_ptr->has_im_end && id == d_ptr->vocab.token_to_id["<|im_end|>"])
|
|
|
|
goto stop_generating;
|
|
|
|
|
2023-05-06 15:35:02 -04:00
|
|
|
if (id == 0 /*end of text*/)
|
2023-05-05 14:04:32 -04:00
|
|
|
goto stop_generating;
|
|
|
|
|
2023-05-06 15:35:02 -04:00
|
|
|
const std::string str = d_ptr->vocab.id_to_token[id];
|
2023-05-05 14:04:32 -04:00
|
|
|
|
|
|
|
// Check if the provided str is part of our reverse prompts
|
|
|
|
bool foundPartialReversePrompt = false;
|
|
|
|
const std::string completed = cachedResponse + str;
|
|
|
|
if (reversePrompts.find(completed) != reversePrompts.end()) {
|
|
|
|
goto stop_generating;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if it partially matches our reverse prompts and if so, cache
|
|
|
|
for (auto s : reversePrompts) {
|
|
|
|
if (s.compare(0, completed.size(), completed) == 0) {
|
|
|
|
foundPartialReversePrompt = true;
|
|
|
|
cachedResponse = completed;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Regardless the token gets added to our cache
|
|
|
|
cachedTokens.push_back(id);
|
|
|
|
|
|
|
|
// Continue if we have found a partial match
|
|
|
|
if (foundPartialReversePrompt)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Empty the cache
|
|
|
|
for (auto t : cachedTokens) {
|
|
|
|
if (promptCtx.tokens.size() == promptCtx.n_ctx)
|
|
|
|
promptCtx.tokens.erase(promptCtx.tokens.begin());
|
|
|
|
promptCtx.tokens.push_back(t);
|
2023-05-06 15:35:02 -04:00
|
|
|
if (!responseCallback(t, d_ptr->vocab.id_to_token[t]))
|
2023-05-05 14:04:32 -04:00
|
|
|
goto stop_generating;
|
|
|
|
}
|
|
|
|
cachedTokens.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
stop_generating:
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
// report timing
|
|
|
|
{
|
|
|
|
const int64_t t_main_end_us = ggml_time_us();
|
|
|
|
|
|
|
|
std::cout << "GPT-J INFO: mem per token = " << mem_per_token << " bytes\n";
|
|
|
|
std::cout << "GPT-J INFO: sample time = " << t_sample_us/1000.0f << " ms\n";
|
|
|
|
std::cout << "GPT-J INFO: prompt time = " << t_prompt_us/1000.0f << " ms\n";
|
|
|
|
std::cout << "GPT-J INFO: predict time = " << t_predict_us/1000.0f << " ms / " << t_predict_us/1000.0f/totalPredictions << " ms per token\n";
|
|
|
|
std::cout << "GPT-J INFO: total time = " << (t_main_end_us - t_main_start_us)/1000.0f << " ms\n";
|
|
|
|
fflush(stdout);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
void MPT::recalculateContext(PromptContext &promptCtx, std::function<bool(bool)> recalculate)
|
|
|
|
{
|
|
|
|
size_t i = 0;
|
|
|
|
promptCtx.n_past = 0;
|
|
|
|
while (i < promptCtx.tokens.size()) {
|
|
|
|
size_t batch_end = std::min(i + promptCtx.n_batch, promptCtx.tokens.size());
|
|
|
|
std::vector<int> batch(promptCtx.tokens.begin() + i, promptCtx.tokens.begin() + batch_end);
|
|
|
|
|
|
|
|
assert(promptCtx.n_past + batch.size() <= promptCtx.n_ctx);
|
|
|
|
|
|
|
|
if (!mpt_eval(*d_ptr->model, d_ptr->n_threads, promptCtx.n_past, batch, promptCtx.logits,
|
|
|
|
d_ptr->mem_per_token)) {
|
2023-05-08 12:01:40 -04:00
|
|
|
std::cerr << "MPT ERROR: Failed to process prompt\n";
|
2023-05-05 14:04:32 -04:00
|
|
|
goto stop_generating;
|
|
|
|
}
|
|
|
|
promptCtx.n_past += batch.size();
|
|
|
|
if (!recalculate(true))
|
|
|
|
goto stop_generating;
|
|
|
|
i = batch_end;
|
|
|
|
}
|
|
|
|
assert(promptCtx.n_past == promptCtx.tokens.size());
|
|
|
|
|
|
|
|
stop_generating:
|
|
|
|
recalculate(false);
|
|
|
|
}
|