mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-10-01 01:26:03 -04:00
Read more GGUF metadata (scale_linear and freq_base) (#3877)
This commit is contained in:
parent
90fca6a77d
commit
460c40d8ab
@ -71,6 +71,9 @@ def load_metadata(fname):
|
||||
ti_data_count = struct.unpack("<Q", file.read(8))[0]
|
||||
kv_data_count = struct.unpack("<Q", file.read(8))[0]
|
||||
|
||||
if GGUF_VERSION == 1:
|
||||
raise Exception('You are using an outdated GGUF, please download a new one.')
|
||||
|
||||
for i in range(kv_data_count):
|
||||
key_length = struct.unpack("<Q", file.read(8))[0]
|
||||
key = file.read(key_length)
|
||||
|
@ -17,6 +17,7 @@ def get_fallback_settings():
|
||||
'truncation_length': shared.settings['truncation_length'],
|
||||
'n_ctx': 2048,
|
||||
'rope_freq_base': 0,
|
||||
'compress_pos_emb': 1,
|
||||
}
|
||||
|
||||
|
||||
@ -48,6 +49,10 @@ def get_model_metadata(model):
|
||||
metadata = metadata_gguf.load_metadata(model_file)
|
||||
if 'llama.context_length' in metadata:
|
||||
model_settings['n_ctx'] = metadata['llama.context_length']
|
||||
if 'llama.rope.scale_linear' in metadata:
|
||||
model_settings['compress_pos_emb'] = metadata['llama.rope.scale_linear']
|
||||
if 'llama.rope.freq_base' in metadata:
|
||||
model_settings['rope_freq_base'] = metadata['llama.rope.freq_base']
|
||||
|
||||
return model_settings
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user