llamamodel: always print special tokens (#2701)

Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
Jared Van Bortel 2024-07-22 13:32:17 -04:00 committed by GitHub
parent 4ca1d0411f
commit 2a7fe95ff4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -542,10 +542,10 @@ std::vector<LLModel::Token> LLamaModel::tokenize(PromptContext &ctx, const std::
std::string LLamaModel::tokenToString(Token id) const
{
std::vector<char> result(8, 0);
const int n_tokens = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), 0, false);
const int n_tokens = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), 0, true);
if (n_tokens < 0) {
result.resize(-n_tokens);
int check = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), 0, false);
int check = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), 0, true);
GGML_ASSERT(check == -n_tokens);
}
else {