text-generation-webui/modules/logits.py

75 lines
3.1 KiB
Python
Raw Normal View History

2023-08-20 19:49:21 -04:00
import torch
from transformers import is_torch_xpu_available
2023-08-20 19:49:21 -04:00
from modules import sampler_hijack, shared
2023-09-17 09:42:32 -04:00
from modules.logging_colors import logger
from modules.text_generation import generate_reply
2023-08-20 19:49:21 -04:00
global_scores = None
2023-08-20 19:49:21 -04:00
def get_next_logits(prompt, state, use_samplers, previous, return_dict=False):
2023-09-17 09:42:32 -04:00
if shared.model is None:
logger.error("No model is loaded! Select one in the Model tab.")
return 'Error: No model is loaded1 Select one in the Model tab.', previous
is_non_hf_exllamav2 = shared.model.__class__.__name__ == 'Exllamav2Model'
is_non_hf_exllamav1 = shared.model.__class__.__name__ == 'ExllamaModel'
is_non_hf_llamacpp = shared.model.__class__.__name__ == 'LlamaCppModel'
2023-09-17 09:42:32 -04:00
if use_samplers:
2023-09-17 09:42:32 -04:00
if any([is_non_hf_exllamav2, is_non_hf_exllamav1, is_non_hf_llamacpp]):
logger.error("Sampler hijacking is not supported non-Huggingface loaders.")
# sampling is all done in c for exllama, so it is really hard to hijack
# it should be possible to hijack llamacpp sampler by hijacking all their sampling methods,
# but it is not implemented yet
return 'Error: Sampler hijacking is not supported non-Huggingface loaders. Please disable the "Use samplers" option.', previous
state['max_new_tokens'] = 1
state['auto_max_new_tokens'] = False
for _ in generate_reply(prompt, state):
pass
scores = sampler_hijack.global_scores[-1]
else:
2023-09-17 09:42:32 -04:00
if is_non_hf_exllamav2 or is_non_hf_exllamav1:
if is_torch_xpu_available():
tokens = shared.tokenizer.encode(prompt).to("xpu:0")
else:
tokens = shared.tokenizer.encode(prompt).cuda()
2023-09-17 09:42:32 -04:00
scores = shared.model.get_logits(tokens)[-1][-1]
elif is_non_hf_llamacpp:
tokens = shared.tokenizer.encode(prompt)
scores = shared.model.get_logits(tokens)[-1][-1]
else:
if is_torch_xpu_available():
tokens = shared.tokenizer.encode(prompt, return_tensors='pt').to("xpu:0")
else:
tokens = shared.tokenizer.encode(prompt, return_tensors='pt').cuda()
2023-09-17 09:42:32 -04:00
output = shared.model(input_ids=tokens)
scores = output['logits'][-1][-1]
2023-08-20 19:49:21 -04:00
probs = torch.softmax(scores, dim=-1, dtype=torch.float)
2023-09-17 10:01:34 -04:00
topk_values, topk_indices = torch.topk(probs, k=50, largest=True, sorted=True)
topk_values = [f"{float(i):.5f}" for i in topk_values]
2023-09-17 09:42:32 -04:00
if is_non_hf_exllamav1 or is_non_hf_llamacpp:
topk_indices = [i.expand((1, 1)) for i in topk_indices]
if hasattr(shared.tokenizer, 'convert_ids_to_tokens'):
tokens = [shared.tokenizer.convert_ids_to_tokens(int(i)) for i in topk_indices]
else:
tokens = [shared.tokenizer.decode(i) for i in topk_indices]
2023-08-20 19:49:21 -04:00
if return_dict:
output = {}
for row in list(zip(topk_values, tokens)):
output[row[1]] = row[0]
return output
else:
output = ''
for row in list(zip(topk_values, tokens)):
output += f"{row[0]} - {repr(row[1])}\n"
return output, previous