llama.cpp: prevent prompt evaluation progress bar with just 1 step

This commit is contained in:
oobabooga 2024-09-03 17:37:06 -07:00
parent 2cb8d4c96e
commit c5b40eb555

View File

@ -61,7 +61,7 @@ def eval_with_progress(self, tokens: Sequence[int]):
assert self._batch.batch is not None
self._ctx.kv_cache_seq_rm(-1, self.n_tokens, -1)
if len(tokens) > 1:
if len(tokens) > self.n_batch:
progress_bar = tqdm(range(0, len(tokens), self.n_batch), desc="Prompt evaluation", leave=False)
else:
progress_bar = range(0, len(tokens), self.n_batch)