llamamodel: fix embedding crash for >512 tokens after #2310 (#2383)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel 2024-05-29 10:51:00 -04:00 committed by GitHub
parent f047f383d0
commit e94177ee9a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -387,6 +387,7 @@ bool LLamaModel::loadModel(const std::string &modelPath, int n_ctx, int ngl)
const int n_ctx_train = llama_n_ctx_train(d_ptr->model); const int n_ctx_train = llama_n_ctx_train(d_ptr->model);
if (isEmbedding) { if (isEmbedding) {
d_ptr->ctx_params.n_batch = n_ctx; d_ptr->ctx_params.n_batch = n_ctx;
d_ptr->ctx_params.n_ubatch = n_ctx;
} else { } else {
if (n_ctx > n_ctx_train) { if (n_ctx > n_ctx_train) {
std::cerr << "warning: model was trained on only " << n_ctx_train << " context tokens (" std::cerr << "warning: model was trained on only " << n_ctx_train << " context tokens ("