mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
llamamodel: fix BERT tokenization after llama.cpp update (#2381)
Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
0b63ad5eff
commit
f1b4092ca6
@ -920,11 +920,11 @@ void LLamaModel::embedInternal(
|
||||
int32_t n_tokens = llama_tokenize(d_ptr->model, text.c_str(), text.length(), tokens.data(), tokens.size(), wantBOS, false);
|
||||
if (n_tokens) {
|
||||
(void)eos_token;
|
||||
assert(useEOS == (eos_token != -1 && tokens[n_tokens - 1] == eos_token));
|
||||
tokens.resize(n_tokens - useEOS); // erase EOS/SEP
|
||||
} else {
|
||||
tokens.clear();
|
||||
assert((useEOS && wantBOS) == (eos_token != -1 && tokens[n_tokens - 1] == eos_token));
|
||||
if (useEOS && wantBOS)
|
||||
n_tokens--; // erase EOS/SEP
|
||||
}
|
||||
tokens.resize(n_tokens);
|
||||
};
|
||||
|
||||
// tokenize the texts
|
||||
|
@ -938,7 +938,7 @@ void Database::start()
|
||||
connect(m_embLLM, &EmbeddingLLM::errorGenerated, this, &Database::handleErrorGenerated);
|
||||
m_scanTimer->callOnTimeout(this, &Database::scanQueue);
|
||||
if (!QSqlDatabase::drivers().contains("QSQLITE")) {
|
||||
qWarning() << "ERROR: missing sqllite driver";
|
||||
qWarning() << "ERROR: missing sqlite driver";
|
||||
} else {
|
||||
QSqlError err = initDb();
|
||||
if (err.type() != QSqlError::NoError)
|
||||
|
@ -229,7 +229,7 @@ Raw Data:
|
||||
- Explorer: https://atlas.nomic.ai/map/gpt4all_data_clean
|
||||
- [GPT4All-J Dataset](https://huggingface.co/datasets/nomic-ai/gpt4all-j-prompt-generations)
|
||||
- Explorer Indexed on Prompts: https://atlas.nomic.ai/map/gpt4all-j-prompts-curated
|
||||
- Exporer Indexed on Responses: https://atlas.nomic.ai/map/gpt4all-j-response-curated
|
||||
- Explorer Indexed on Responses: https://atlas.nomic.ai/map/gpt4all-j-response-curated
|
||||
|
||||
We are not distributing a LLaMa 7B checkpoint.
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user