mirror of
https://github.com/ravenscroftj/turbopilot.git
synced 2024-10-01 01:06:01 -04:00
Merge branch 'main' into feature/model-lock
This commit is contained in:
commit
dc81abbc52
@ -6,9 +6,9 @@
|
||||
|
||||
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|
||||
|---------------------|-----------------|-----------------|-----------------|
|
||||
| StarCoder | ~3GiB | [:arrow_down:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/blob/main/stablecode-instruct-alpha-3b.ggmlv1.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/) |
|
||||
|
||||
| StableCode | ~3GiB | [:arrow_down:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/blob/main/stablecode-instruct-alpha-3b.ggmlv1.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/) |
|
||||
|
||||
To run in Turbopilot set model type `-m stablecode`
|
||||
|
||||
## "Coder" family models
|
||||
|
||||
@ -23,7 +23,7 @@ This model is primarily trained on Python, Java and Javscript.
|
||||
|
||||
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|
||||
|---------------------|-----------------|-----------------|-----------------|
|
||||
| StarCoder | ~2GiB | [:arrow_down:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/resolve/main/santacoder-q4_0.bin) | [:hugs:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/) |
|
||||
| SantaCoder | ~2GiB | [:arrow_down:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/resolve/main/santacoder-q4_0.bin) | [:hugs:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/) |
|
||||
|
||||
To run in Turbopilot set model type `-m starcoder`
|
||||
|
||||
@ -39,7 +39,7 @@ Even when quantized, WizardCoder is a large model that takes up a significant am
|
||||
|---------------------|-----------------|-----------------|-----------------|
|
||||
| WizardCoder | ~12GiB | [:arrow_down:](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/resolve/main/WizardCoder-15B-1.0.ggmlv3.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/) |
|
||||
|
||||
To run in Turbopilot set model type `-m starcoder`
|
||||
To run in Turbopilot set model type `-m wizardcoder`
|
||||
|
||||
|
||||
### StarCoder (Released 4/5/2023)
|
||||
|
@ -60,6 +60,11 @@ int main(int argc, char **argv)
|
||||
.default_value(0.1f)
|
||||
.scan<'g', float>();
|
||||
|
||||
program.add_argument("-b", "--batch-size")
|
||||
.help("set batch size for model completion")
|
||||
.default_value(512)
|
||||
.scan<'i',int>();
|
||||
|
||||
|
||||
program.add_argument("prompt").remaining();
|
||||
|
||||
@ -96,6 +101,7 @@ int main(int argc, char **argv)
|
||||
config.n_threads = program.get<int>("--threads");
|
||||
config.temp = program.get<float>("--temperature");
|
||||
config.top_p = program.get<float>("--top-p");
|
||||
config.n_batch = program.get<int>("--batch-size");
|
||||
|
||||
if(model_type.compare("codegen") == 0) {
|
||||
spdlog::info("Initializing GPT-J type model for '{}' model", model_type);
|
||||
@ -136,6 +142,7 @@ int main(int argc, char **argv)
|
||||
return "Hello world";
|
||||
});
|
||||
|
||||
|
||||
CROW_ROUTE(app, "/copilot_internal/v2/token")([](){
|
||||
//return "Hello world";
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user