mirror of
https://github.com/tloen/alpaca-lora.git
synced 2024-10-01 01:05:56 -04:00
76 lines
1.8 KiB
Python
76 lines
1.8 KiB
Python
"""
|
|
Helpers to support streaming generate output.
|
|
Borrowed from https://github.com/oobabooga/text-generation-webui/blob/ad37f396fc8bcbab90e11ecf17c56c97bfbd4a9c/modules/callbacks.py
|
|
"""
|
|
|
|
import gc
|
|
import traceback
|
|
from queue import Queue
|
|
from threading import Thread
|
|
|
|
import torch
|
|
import transformers
|
|
|
|
|
|
class Stream(transformers.StoppingCriteria):
|
|
def __init__(self, callback_func=None):
|
|
self.callback_func = callback_func
|
|
|
|
def __call__(self, input_ids, scores) -> bool:
|
|
if self.callback_func is not None:
|
|
self.callback_func(input_ids[0])
|
|
return False
|
|
|
|
|
|
class Iteratorize:
|
|
|
|
"""
|
|
Transforms a function that takes a callback
|
|
into a lazy iterator (generator).
|
|
"""
|
|
|
|
def __init__(self, func, kwargs={}, callback=None):
|
|
self.mfunc = func
|
|
self.c_callback = callback
|
|
self.q = Queue()
|
|
self.sentinel = object()
|
|
self.kwargs = kwargs
|
|
self.stop_now = False
|
|
|
|
def _callback(val):
|
|
if self.stop_now:
|
|
raise ValueError
|
|
self.q.put(val)
|
|
|
|
def gentask():
|
|
try:
|
|
ret = self.mfunc(callback=_callback, **self.kwargs)
|
|
except ValueError:
|
|
pass
|
|
except:
|
|
traceback.print_exc()
|
|
pass
|
|
|
|
self.q.put(self.sentinel)
|
|
if self.c_callback:
|
|
self.c_callback(ret)
|
|
|
|
self.thread = Thread(target=gentask)
|
|
self.thread.start()
|
|
|
|
def __iter__(self):
|
|
return self
|
|
|
|
def __next__(self):
|
|
obj = self.q.get(True, None)
|
|
if obj is self.sentinel:
|
|
raise StopIteration
|
|
else:
|
|
return obj
|
|
|
|
def __enter__(self):
|
|
return self
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
self.stop_now = True
|