mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-09-18 23:06:08 +00:00
llamamodel: always print special tokens (#2701)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
4ca1d0411f
commit
2a7fe95ff4
@ -542,10 +542,10 @@ std::vector<LLModel::Token> LLamaModel::tokenize(PromptContext &ctx, const std::
|
|||||||
std::string LLamaModel::tokenToString(Token id) const
|
std::string LLamaModel::tokenToString(Token id) const
|
||||||
{
|
{
|
||||||
std::vector<char> result(8, 0);
|
std::vector<char> result(8, 0);
|
||||||
const int n_tokens = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), 0, false);
|
const int n_tokens = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), 0, true);
|
||||||
if (n_tokens < 0) {
|
if (n_tokens < 0) {
|
||||||
result.resize(-n_tokens);
|
result.resize(-n_tokens);
|
||||||
int check = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), 0, false);
|
int check = llama_token_to_piece(d_ptr->model, id, result.data(), result.size(), 0, true);
|
||||||
GGML_ASSERT(check == -n_tokens);
|
GGML_ASSERT(check == -n_tokens);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
Loading…
Reference in New Issue
Block a user