sampling: remove incorrect offset for n_vocab (#900)

no effect, but avoids a *potential* bug later if we use
actualVocabSize - which is for when a model has a larger
embedding tensor/# of output logits than actually trained token
to allow room for adding extras in finetuning - presently all of our
models have had "placeholder" tokens in the vocab so this hasn't broken
anything, but if the sizes did differ we want the equivalent of
`logits[actualVocabSize:]` (the start point is unchanged), not
`logits[-actualVocabSize:]` (this.)
This commit is contained in:
Aaron Miller 2023-06-08 11:08:10 -07:00 committed by GitHub
parent eb26293205
commit b14953e136
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -230,7 +230,7 @@ gpt_vocab::id gpt_sample_top_k_top_p(
int n_logits = actualVocabSize; int n_logits = actualVocabSize;
const auto last_n_tokens = std::vector<int32_t>(last_n_tokens_data, last_n_tokens_data + last_n_tokens_size); const auto last_n_tokens = std::vector<int32_t>(last_n_tokens_data, last_n_tokens_data + last_n_tokens_size);
const auto * plogits = logits.data() + logits.size() - n_logits; const auto * plogits = logits.data();
std::vector<std::pair<double, gpt_vocab::id>> logits_id; std::vector<std::pair<double, gpt_vocab::id>> logits_id;
logits_id.reserve(n_logits); logits_id.reserve(n_logits);