'context_pygmalion':"Kawaii's persona: Kawaii is a cheerful person who loves to make others smile. She is an optimist who loves to spread happiness and positivity wherever she goes.\n<START>",
'oasst-*':'<|prompter|>Write a story about future of AI development<|endoftext|><|assistant|>',
'alpaca-*':"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n### Instruction:\nWrite a poem about the transformers Python library. \nMention the word \"large language models\" in that poem.\n### Response:\n",
'(alpaca-lora-7b|alpaca-lora-13b|alpaca-lora-30b)':"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n### Instruction:\nWrite a poem about the transformers Python library. \nMention the word \"large language models\" in that poem.\n### Response:\n"
parser.add_argument('--notebook',action='store_true',help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.')
parser.add_argument('--chat',action='store_true',help='Launch the web UI in chat mode.')
parser.add_argument('--cai-chat',action='store_true',help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img_bot.png or img_bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img_me.png or img_me.jpg will be used as your profile picture.')
parser.add_argument('--cpu',action='store_true',help='Use the CPU to generate text.')
parser.add_argument('--load-in-8bit',action='store_true',help='Load the model with 8-bit precision.')
parser.add_argument('--gptq-bits',type=int,default=0,help='DEPRECATED: use --wbits instead.')
parser.add_argument('--gptq-model-type',type=str,help='DEPRECATED: use --model_type instead.')
parser.add_argument('--gptq-pre-layer',type=int,default=0,help='DEPRECATED: use --pre_layer instead.')
parser.add_argument('--wbits',type=int,default=0,help='GPTQ: Load a pre-quantized model with specified precision in bits. 2, 3, 4 and 8 are supported.')
parser.add_argument('--model_type',type=str,help='GPTQ: Model type of pre-quantized model. Currently only LLaMA and OPT are supported.')
parser.add_argument('--groupsize',type=int,default=-1,help='GPTQ: Group size.')
parser.add_argument('--pre_layer',type=int,default=0,help='GPTQ: The number of layers to preload.')
parser.add_argument('--bf16',action='store_true',help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
parser.add_argument('--auto-devices',action='store_true',help='Automatically split the model across the available GPU(s) and CPU.')
parser.add_argument('--disk',action='store_true',help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')
parser.add_argument('--disk-cache-dir',type=str,default="cache",help='Directory to save the disk cache to. Defaults to "cache".')
parser.add_argument('--gpu-memory',type=str,nargs="+",help='Maxmimum GPU memory in GiB to be allocated per GPU. Example: --gpu-memory 10 for a single GPU, --gpu-memory 10 5 for two GPUs.')
parser.add_argument('--cpu-memory',type=str,help='Maximum CPU memory in GiB to allocate for offloaded weights. Must be an integer number. Defaults to 99.')
parser.add_argument('--no-cache',action='store_true',help='Set use_cache to False while generating text. This reduces the VRAM usage a bit at a performance cost.')
parser.add_argument("--pin-weight",type=str2bool,nargs="?",const=True,default=True,help="FlexGen: whether to pin weights (setting this to False reduces CPU memory by 20%%).")
parser.add_argument('--rwkv-strategy',type=str,default=None,help='RWKV: The strategy to use while loading the model. Examples: "cpu fp32", "cuda fp16", "cuda fp16i8".')
parser.add_argument('--rwkv-cuda-on',action='store_true',help='RWKV: Compile the CUDA kernel for better performance.')
parser.add_argument('--settings',type=str,help='Load the default interface settings from this json file. See settings-template.json for an example. If you create a file called settings.json, this file will be loaded by default without the need to use the --settings flag.')
parser.add_argument('--extensions',type=str,nargs="+",help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.')
parser.add_argument("--gradio-auth-path",type=str,help='set gradio authentication file path ex. "/path/to/auth/file" with format like "u1:p1,u2:p2,u3:p3"',default=None)
parser.add_argument("--model-dir",type=str,default='models/',help="Path to directory with all the models")
parser.add_argument("--lora-dir",type=str,default='loras/',help="Path to directory with all the loras")