text-generation-webui/modules/llamacpp_model.py

79 lines
2.3 KiB
Python
Raw Normal View History

2023-03-19 06:42:10 +00:00
import llamacpp
2023-03-31 17:27:01 +00:00
from modules.callbacks import Iteratorize
2023-03-19 06:42:10 +00:00
class LlamaCppTokenizer:
"""A thin wrapper over the llamacpp tokenizer"""
2023-03-29 20:20:22 +00:00
def __init__(self, model: llamacpp.LlamaInference):
2023-03-19 06:42:10 +00:00
self._tokenizer = model.get_tokenizer()
self.eos_token_id = 2
self.bos_token_id = 0
@classmethod
2023-03-29 20:20:22 +00:00
def from_model(cls, model: llamacpp.LlamaInference):
2023-03-19 06:42:10 +00:00
return cls(model)
2023-03-29 20:20:22 +00:00
def encode(self, prompt: str):
2023-03-19 06:42:10 +00:00
return self._tokenizer.tokenize(prompt)
def decode(self, ids):
return self._tokenizer.detokenize(ids)
class LlamaCppModel:
def __init__(self):
self.initialized = False
@classmethod
def from_pretrained(self, path):
2023-03-29 20:20:22 +00:00
params = llamacpp.InferenceParams()
params.path_model = str(path)
_model = llamacpp.LlamaInference(params)
2023-03-19 06:42:10 +00:00
result = self()
result.model = _model
2023-03-31 17:27:01 +00:00
result.params = params
2023-03-19 06:42:10 +00:00
tokenizer = LlamaCppTokenizer.from_model(_model)
return result, tokenizer
2023-03-31 17:27:01 +00:00
def generate(self, context="", token_count=20, temperature=1, top_p=1, top_k=50, repetition_penalty=1, callback=None):
params = self.params
params.n_predict = token_count
params.top_p = top_p
params.top_k = top_k
params.temp = temperature
params.repeat_penalty = repetition_penalty
#params.repeat_last_n = repeat_last_n
2023-03-19 06:42:10 +00:00
2023-03-31 22:01:34 +00:00
#self.model.params = params
2023-03-29 20:20:22 +00:00
self.model.add_bos()
2023-03-19 06:42:10 +00:00
self.model.update_input(context)
output = ""
is_end_of_text = False
ctr = 0
2023-03-31 17:27:01 +00:00
while ctr < token_count and not is_end_of_text:
2023-03-19 06:42:10 +00:00
if self.model.has_unconsumed_input():
2023-03-29 20:20:22 +00:00
self.model.ingest_all_pending_input()
2023-03-19 06:42:10 +00:00
else:
2023-03-29 20:20:22 +00:00
self.model.eval()
token = self.model.sample()
text = self.model.token_to_str(token)
2023-03-31 21:43:45 +00:00
output += text
2023-03-29 20:20:22 +00:00
is_end_of_text = token == self.model.token_eos()
2023-03-19 06:42:10 +00:00
if callback:
callback(text)
ctr += 1
return output
def generate_with_streaming(self, **kwargs):
with Iteratorize(self.generate, kwargs, callback=None) as generator:
2023-03-31 17:27:01 +00:00
reply = ''
2023-03-19 06:42:10 +00:00
for token in generator:
reply += token
yield reply