mirror of
https://github.com/ravenscroftj/turbopilot.git
synced 2024-10-01 01:06:01 -04:00
Merge pull request #67 from ravenscroftj/fix/mac-memory-usage
Temporary fix for stablecode and starcoder on mac
This commit is contained in:
commit
eaeb52fcb0
@ -84,20 +84,21 @@ bool gpt_neox_eval(
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
const int n_rot = hparams.n_rot;
|
||||
|
||||
static size_t buf_size = 256u*1024*1024;
|
||||
static size_t buf_size = 512u*1024*1024;
|
||||
static void * buf = malloc(buf_size);
|
||||
|
||||
// use 2 scratch buffers
|
||||
// TODO: very hacky solution - reimplement in a more elegant way
|
||||
static size_t scr0_size = 256u*1024*1024;
|
||||
static size_t scr0_size = 512*1024*1024;
|
||||
static void * scr0 = malloc(scr0_size);
|
||||
|
||||
static size_t scr1_size = 256u*1024*1024;
|
||||
static size_t scr1_size = 512*1024*1024;
|
||||
static void * scr1 = malloc(scr1_size);
|
||||
|
||||
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
|
||||
|
||||
if (mem_per_token > 0 && (mem_per_token*N) > buf_size) {
|
||||
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
|
||||
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
|
||||
spdlog::debug("{}: reallocating buffer from {} to {} bytes\n", __func__, buf_size, buf_size_new);
|
||||
|
||||
|
||||
// reallocate
|
||||
@ -107,8 +108,6 @@ bool gpt_neox_eval(
|
||||
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
|
||||
return false;
|
||||
}
|
||||
|
||||
spdlog::debug("{}: reallocating context buffer {} -> now {} bytes of tokens in prompt = {}", __func__, buf_size, buf_size_new);
|
||||
}
|
||||
|
||||
struct ggml_init_params params = {
|
||||
@ -303,12 +302,14 @@ bool gpt_neox_eval(
|
||||
embd_w.resize(n_vocab);
|
||||
memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
||||
|
||||
|
||||
spdlog::debug("used_mem = {}\n", ggml_used_mem(ctx0));
|
||||
|
||||
if (mem_per_token == 0) {
|
||||
mem_per_token = ggml_used_mem(ctx0)/N;
|
||||
mem_per_token = ggml_used_mem(ctx0) / N; //* 4;
|
||||
spdlog::debug("Set mem_per_token={} / {} * {} = {}", ggml_used_mem(ctx0), N, 4, mem_per_token);
|
||||
|
||||
}
|
||||
spdlog::debug("used_mem = {}\n", ggml_used_mem(ctx0));
|
||||
//printf("used_mem = %zu\n", ggml_used_mem(ctx0));
|
||||
|
||||
ggml_free(ctx0);
|
||||
|
||||
@ -685,7 +686,13 @@ std::stringstream GPTNEOXModel::predict_impl(std::string prompt, int max_length,
|
||||
|
||||
std::vector<float> logits;
|
||||
|
||||
gpt_neox_eval((*model), config.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token);
|
||||
std::vector<gpt_vocab::id> test = {};
|
||||
|
||||
for(int i=0;i<64;i++){
|
||||
test.push_back(i);
|
||||
}
|
||||
|
||||
gpt_neox_eval((*model), config.n_threads, 0, test, logits, mem_per_token);
|
||||
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
|
@ -37,7 +37,7 @@ bool starcoder_eval(
|
||||
const int n_head = hparams.n_head;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
|
||||
static size_t buf_size = 256u*1024*1024;
|
||||
static size_t buf_size = 512u*1024*1024;
|
||||
static void * buf = malloc(buf_size);
|
||||
|
||||
// use 2 scratch buffers
|
||||
@ -48,8 +48,10 @@ bool starcoder_eval(
|
||||
static size_t scr1_size = 512u*1024*1024;
|
||||
static void * scr1 = malloc(scr1_size);
|
||||
|
||||
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
|
||||
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
|
||||
if (mem_per_token > 0 && 2*mem_per_token*N > buf_size) {
|
||||
const size_t buf_size_new = 2*(mem_per_token*N); // add 10% to account for ggml object overhead
|
||||
|
||||
if(buf_size_new > buf_size){
|
||||
spdlog::debug("{}: reallocating buffer from {} to {} bytes\n", __func__, buf_size, buf_size_new);
|
||||
|
||||
// reallocate
|
||||
@ -61,12 +63,15 @@ bool starcoder_eval(
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
struct ggml_init_params params = {
|
||||
/*.mem_size =*/ buf_size,
|
||||
/*.mem_buffer =*/ buf,
|
||||
/*.no_alloc =*/ false,
|
||||
};
|
||||
|
||||
|
||||
struct ggml_context * ctx0 = ggml_init(params);
|
||||
struct ggml_cgraph gf = {};
|
||||
|
||||
@ -338,7 +343,9 @@ bool starcoder_eval(
|
||||
if (mem_per_token == 0) {
|
||||
mem_per_token = ggml_used_mem(ctx0)/N;
|
||||
}
|
||||
//printf("used_mem = %zu MB\n", ggml_used_mem(ctx0)/(1024*1024));
|
||||
|
||||
spdlog::debug("{}: used mem buf={} bytes", __func__, ggml_used_mem(ctx0));
|
||||
|
||||
|
||||
ggml_free(ctx0);
|
||||
|
||||
@ -743,11 +750,22 @@ std::stringstream StarcoderModel::predict_impl(std::string prompt, int max_lengt
|
||||
size_t mem_per_token = 0;
|
||||
|
||||
std::vector<float> logits;
|
||||
std::vector<gpt_vocab::id> test = {};
|
||||
|
||||
for(int i=0;i<64;i++){
|
||||
test.push_back(i);
|
||||
}
|
||||
|
||||
spdlog::debug("{}: calculate required memory per token", __func__);
|
||||
starcoder_eval((*model), config.n_threads, 0, test, logits, mem_per_token);
|
||||
spdlog::debug("{}: mem_per_token={}", __func__, mem_per_token);
|
||||
spdlog::debug("{}: total mem needed for prompt = {}*{}={}", __func__, embd_inp.size(), mem_per_token, embd_inp.size()*mem_per_token);
|
||||
|
||||
starcoder_eval((*model), config.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token);
|
||||
|
||||
for (int i = embd.size(); i < embd_inp.size() + n_predict; i++) {
|
||||
// predict
|
||||
spdlog::debug("{}: process token #{}: ", __func__, i);
|
||||
|
||||
if (embd.size() > 0) {
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user