mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2024-10-01 01:06:10 -04:00
feat: commits for eval + generation
This commit is contained in:
parent
cd6a054a6c
commit
e550e4ed34
@ -1,5 +0,0 @@
|
||||
# model/tokenizer
|
||||
model_name: # update with llama model name
|
||||
tokenizer_name: # update with llama model name
|
||||
lora: true
|
||||
lora_path: "nomic-ai/gpt4all-lora"
|
@ -1,5 +1,5 @@
|
||||
# model/tokenizer
|
||||
model_name: # update with llama model name
|
||||
tokenizer_name: # update with llama model name
|
||||
model_name: "zpn/llama-7b"
|
||||
tokenizer_name: "zpn/llama-7b"
|
||||
lora: true
|
||||
lora_path: "tloen/alpaca-lora-7b"
|
@ -1,4 +1,4 @@
|
||||
# model/tokenizer
|
||||
model_name: "nomic-ai/gpt4all-warmup-lr-epoch_1"
|
||||
model_name: "nomic-ai/gpt4all-warmup-lr-epoch_0"
|
||||
tokenizer_name: "EleutherAI/gpt-j-6b"
|
||||
lora: false
|
||||
|
5
configs/eval/generate_gpt4all_llama_lora.yaml
Normal file
5
configs/eval/generate_gpt4all_llama_lora.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
# model/tokenizer
|
||||
model_name: "zpn/llama-7b"
|
||||
tokenizer_name: "zpn/llama-7b"
|
||||
lora: true
|
||||
lora_path: "nomic-ai/gpt4all-lora"
|
@ -1,6 +1,6 @@
|
||||
# model/tokenizer
|
||||
model_name: # REPLACE HERE with the base llama model
|
||||
tokenizer_name: # REPLACE HERE with the llama tokenizer
|
||||
model_name: "zpn/llama-7b"
|
||||
tokenizer_name: "zpn/llama-7b"
|
||||
lora: true
|
||||
lora_path: "nomic-ai/gpt4all-lora"
|
||||
|
||||
|
15
configs/generate/generate_gptj.yaml
Normal file
15
configs/generate/generate_gptj.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
# model/tokenizer
|
||||
model_name: "nomic-ai/gpt4all-warmup-lr-epoch_1"
|
||||
tokenizer_name: "EleutherAI/gpt-j-6b"
|
||||
lora: false
|
||||
|
||||
|
||||
max_new_tokens: 512
|
||||
temperature: 0.001
|
||||
prompt: |
|
||||
#this code prints a string reversed
|
||||
my_string = "hello how are you"
|
||||
print(len(my_string))
|
||||
|
||||
|
||||
My code above does not work. Can you help me?
|
15
configs/generate/generate_gptj_lora.yaml
Normal file
15
configs/generate/generate_gptj_lora.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
# model/tokenizer
|
||||
model_name: "EleutherAI/gpt-j-6b"
|
||||
tokenizer_name: "EleutherAI/gpt-j-6b"
|
||||
lora: true
|
||||
lora_path: "nomic-ai/gpt4all-gptj-lora-epoch_0"
|
||||
|
||||
max_new_tokens: 512
|
||||
temperature: 0
|
||||
prompt: |
|
||||
#this code prints a string reversed
|
||||
my_string = "hello how are you"
|
||||
print(len(my_string))
|
||||
|
||||
|
||||
My code above does not work. Can you help me?
|
Loading…
Reference in New Issue
Block a user