Compare commits

...

20 Commits

Author SHA1 Message Date
m3ndax
942f9cf533
Merge eacb0d59c2 into c4e57e0aab 2023-08-26 13:30:03 +07:00
James Ravenscroft
c4e57e0aab
Merge pull request #58 from ravenscroftj/feature/model-lock
WIP: implement locking of model per request
2023-08-25 06:57:05 +01:00
James Ravenscroft
dc81abbc52 Merge branch 'main' into feature/model-lock 2023-08-24 14:58:44 +01:00
James Ravenscroft
ae2d505a2f use std mutex instead of boost mutex 2023-08-24 14:55:23 +01:00
James Ravenscroft
143155dac3 boost 2023-08-24 14:28:01 +01:00
James Ravenscroft
596c835939 boost 2023-08-24 14:26:40 +01:00
James Ravenscroft
6b0a25cb71
Merge pull request #59 from ravenscroftj/feature/batch-flag
expose batch size flag to cli
2023-08-24 14:13:40 +01:00
James Ravenscroft
227501188c try to set boost librarydir 2023-08-24 13:57:22 +01:00
James Ravenscroft
f69a8f65d4 fix build? 2023-08-24 13:49:14 +01:00
James Ravenscroft
22f2993db4 try using stage lib dir for boost root 2023-08-24 13:28:58 +01:00
James Ravenscroft
e8beac34e7 more attempts to build with boost threads 2023-08-24 13:17:51 +01:00
James Ravenscroft
ccf425f019 update deps for boost 2023-08-24 13:05:10 +01:00
James Ravenscroft
cceee41f79 add boost threads 2023-08-24 13:03:12 +01:00
James Ravenscroft
113544400a try adding build boost dirs explicitely 2023-08-24 12:04:45 +01:00
James Ravenscroft
f0627cd567 add boost libraries to cmake 2023-08-24 11:50:21 +01:00
James Ravenscroft
2d617b458e expose batch size flag to cli 2023-08-24 11:40:19 +01:00
James Ravenscroft
0c1fc1a04e implement locking of model per request to prevent crashing when multiple requests reeived 2023-08-24 11:30:44 +01:00
James Ravenscroft
e0adf0519b
Merge pull request #57 from aperullo/model-docs-fix
Fix incorrect instructions in model docs
2023-08-24 10:18:36 +01:00
aperullo
ef1402a1a8 Fix errant command in model docs 2023-08-23 17:59:43 -04:00
m3ndax
eacb0d59c2
Create codacy.yml 2023-06-26 22:18:05 +02:00
19 changed files with 194 additions and 26 deletions

View File

@ -92,7 +92,7 @@ jobs:
submodules: true
- name: Install Dependencies
run: sudo apt-get update && sudo apt-get install -yq libboost-dev libasio-dev
run: sudo apt-get update && sudo apt-get install -yq libboost-dev libasio-dev libboost-thread-dev
- name: Install OpenBlas
if: ${{ matrix.build == 'avx2-openblas' }}
@ -207,6 +207,7 @@ jobs:
$msvc = $(join-path $vcdir $('VC\Tools\MSVC\'+$(gc -raw $(join-path $vcdir 'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt')).Trim()))
$lib = $(join-path $msvc 'bin\Hostx64\x64\lib.exe')
& $lib /machine:x64 "/def:${env:RUNNER_TEMP}/openblas/lib/libopenblas.def" "/out:${env:RUNNER_TEMP}/openblas/lib/openblas.lib" /name:openblas.dll
- name: Build
id: cmake_build
env:
@ -214,7 +215,7 @@ jobs:
run: |
mkdir build
cd build
cmake .. ${{ matrix.defines }}
cmake .. ${{ matrix.defines }} -DBoost_LIBRARYDIRS=${{ steps.install-boost.outputs.BOOST_ROOT }}/lib
cmake --build . --config Release --target turbopilot
# - name: Add libopenblas.dll

61
.github/workflows/codacy.yml vendored Normal file
View File

@ -0,0 +1,61 @@
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
# This workflow checks out code, performs a Codacy security scan
# and integrates the results with the
# GitHub Advanced Security code scanning feature. For more information on
# the Codacy security scan action usage and parameters, see
# https://github.com/codacy/codacy-analysis-cli-action.
# For more information on Codacy Analysis CLI in general, see
# https://github.com/codacy/codacy-analysis-cli.
name: Codacy Security Scan
on:
push:
branches: [ "main" ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ "main" ]
schedule:
- cron: '36 12 * * 3'
permissions:
contents: read
jobs:
codacy-security-scan:
permissions:
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
name: Codacy Security Scan
runs-on: ubuntu-latest
steps:
# Checkout the repository to the GitHub Actions runner
- name: Checkout code
uses: actions/checkout@v3
# Execute Codacy Analysis CLI and generate a SARIF output with the security issues identified during the analysis
- name: Run Codacy Analysis CLI
uses: codacy/codacy-analysis-cli-action@d840f886c4bd4edc059706d09c6a1586111c540b
with:
# Check https://github.com/codacy/codacy-analysis-cli#project-token to get your project token from your Codacy repository
# You can also omit the token and run the tools that support default configurations
project-token: ${{ secrets.CODACY_PROJECT_TOKEN }}
verbose: true
output: results.sarif
format: sarif
# Adjust severity of non-security issues
gh-code-scanning-compat: true
# Force 0 exit code to allow SARIF file generation
# This will handover control about PR rejection to the GitHub side
max-allowed-issues: 2147483647
# Upload the SARIF file generated in the previous step
- name: Upload SARIF results file
uses: github/codeql-action/upload-sarif@v2
with:
sarif_file: results.sarif

View File

@ -1,5 +1,15 @@
cmake_minimum_required (VERSION 3.0)
project(turbopilot VERSION 0.1.0)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED true)
set(CMAKE_C_STANDARD 11)
set(CMAKE_C_STANDARD_REQUIRED true)
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
# option(BUILD_SHARED_LIBS "Build using shared libraries" OFF)
set(CMAKE_EXPORT_COMPILE_COMMANDS "on")
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
@ -27,6 +37,9 @@ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES
endif()
add_subdirectory(extern/ggml)
add_subdirectory(extern/argparse)
add_subdirectory(extern/spdlog)
@ -35,8 +48,11 @@ if (GGML_STATIC)
SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
SET(BUILD_SHARED_LIBS OFF)
SET(CMAKE_EXE_LINKER_FLAGS "-static")
endif()
# if(GGML_OPENBLAS)
# set(BLA_STATIC ON)
# endif()
endif()
add_subdirectory(src)

View File

@ -7,7 +7,7 @@ RUN apt-get update && apt-get install ca-certificates gpg wget
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null
RUN echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ jammy main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev libboost-thread-dev
ADD ./ /turbopilot

View File

@ -7,7 +7,7 @@ RUN apt-get update && apt-get install ca-certificates gpg wget
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null
RUN echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ focal main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev libboost-thread-dev
ADD ./ /turbopilot

View File

@ -1,6 +1,6 @@
FROM alpine AS build
RUN apk add --update alpine-sdk boost-dev cmake asio-dev
RUN apk add --update alpine-sdk boost-dev cmake asio-dev
ADD ./ /turbopilot/

View File

@ -6,9 +6,9 @@
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|---------------------|-----------------|-----------------|-----------------|
| StarCoder | ~3GiB | [:arrow_down:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/blob/main/stablecode-instruct-alpha-3b.ggmlv1.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/) |
| StableCode | ~3GiB | [:arrow_down:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/blob/main/stablecode-instruct-alpha-3b.ggmlv1.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/) |
To run in Turbopilot set model type `-m stablecode`
## "Coder" family models
@ -23,7 +23,7 @@ This model is primarily trained on Python, Java and Javscript.
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|---------------------|-----------------|-----------------|-----------------|
| StarCoder | ~2GiB | [:arrow_down:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/resolve/main/santacoder-q4_0.bin) | [:hugs:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/) |
| SantaCoder | ~2GiB | [:arrow_down:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/resolve/main/santacoder-q4_0.bin) | [:hugs:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/) |
To run in Turbopilot set model type `-m starcoder`
@ -39,7 +39,7 @@ Even when quantized, WizardCoder is a large model that takes up a significant am
|---------------------|-----------------|-----------------|-----------------|
| WizardCoder | ~12GiB | [:arrow_down:](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/resolve/main/WizardCoder-15B-1.0.ggmlv3.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/) |
To run in Turbopilot set model type `-m starcoder`
To run in Turbopilot set model type `-m wizardcoder`
### StarCoder (Released 4/5/2023)

View File

@ -71,7 +71,7 @@ public:
}
virtual ~GPTJModel();
bool load_model(std::string path);
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
private:
gptj_model *model = NULL;

View File

@ -75,7 +75,7 @@ public:
}
virtual ~GPTNEOXModel();
bool load_model(std::string path);
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
private:
gpt_neox_model *model = NULL;

View File

@ -7,6 +7,7 @@
#include <map>
#include <vector>
#include <random>
#include <mutex>
typedef void (*offload_func_t)(struct ggml_tensor * tensor);
void ggml_nop(struct ggml_tensor * tensor);
@ -54,11 +55,16 @@ public:
rng(rng)
{}
virtual bool load_model(std::string model_path) = 0;
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt) = 0;
std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
void lock();
void unlock();
protected:
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt) = 0;
ModelConfig config;
std::mt19937 &rng;
std::mutex model_lock;
};
#endif //__TURBOPILOT_MODEL_H

View File

@ -2,6 +2,8 @@
#define __TURBOPILOT_SERVER_H
#include <spdlog/spdlog.h>
#include "turbopilot/model.hpp"
#include "crow_all.h"
@ -10,6 +12,46 @@ crow::response handle_openai_request(TurbopilotModel *model, const crow::request
crow::response handle_hf_request(TurbopilotModel *model, const crow::request& req);
class TBPLogger : public crow::ILogHandler {
public:
TBPLogger() {}
void log(std::string message, crow::LogLevel crow_level) {
// "message" doesn't contain the timestamp and loglevel
// prefix the default logger does and it doesn't end
// in a newline.
spdlog::level::level_enum level = spdlog::level::info;
switch(crow_level){
case crow::LogLevel::Critical:
level = spdlog::level::critical;
break;
case crow::LogLevel::Error:
level = spdlog::level::err;
break;
case crow::LogLevel::Warning:
level = spdlog::level::warn;
break;
case crow::LogLevel::Info:
level = spdlog::level::info;
break;
case crow::LogLevel::Debug:
level = spdlog::level::debug;
break;
default:
// if case is not a known value, assume the worst
level = spdlog::level::critical;
}
spdlog::log(level, message);
}
};
#endif // __TURBOPILOT_SERVER_H

View File

@ -68,7 +68,7 @@ public:
}
virtual ~StarcoderModel();
bool load_model(std::string path);
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
private:
starcoder_model *model = NULL;

View File

@ -1,6 +1,7 @@
set(TURBOPILOT_TARGET turbopilot)
find_package(Boost REQUIRED)
find_package(Boost COMPONENTS thread system REQUIRED)
include_directories(${Boost_INCLUDE_DIRS})
add_executable(${TURBOPILOT_TARGET}
@ -16,6 +17,9 @@ add_executable(${TURBOPILOT_TARGET}
../include/turbopilot/starcoder.hpp
)
#set(THREADS_PREFER_PTHREAD_FLAG TRUE)
#find_package(Threads REQUIRED)
target_include_directories(${TURBOPILOT_TARGET} PRIVATE
../include
@ -23,8 +27,6 @@ target_include_directories(${TURBOPILOT_TARGET} PRIVATE
../extern/crow/include
)
#target_compile_features(${TURBOPILOT_TARGET} PRIVATE cxx_std_11)
target_link_libraries(${TURBOPILOT_TARGET} PRIVATE ggml argparse)
#target_link_libraries(${TURBOPILOT_TARGET} PRIVATE spdlog::spdlog_header_only)
target_link_libraries(${TURBOPILOT_TARGET} PRIVATE ggml argparse)

View File

@ -4,6 +4,22 @@
#include <cmath>
#include <random>
void TurbopilotModel::lock(){
this->model_lock.lock();
}
void TurbopilotModel::unlock(){
this->model_lock.unlock();
}
std::stringstream TurbopilotModel::predict(std::string prompt, int max_length, bool include_prompt){
lock();
auto result = predict_impl(prompt, max_length, include_prompt);
unlock();
return result;
}
void llama_nop(struct ggml_tensor * tensor) { // don't offload by default
(void) tensor;
}
@ -163,4 +179,6 @@ gpt_vocab::id gpt_sample_top_k_top_p(
int idx = dist(rng);
return logits_id[idx].second;
}
}

View File

@ -556,7 +556,7 @@ bool GPTJModel::load_model(std::string fname) {
return true;
}
std::stringstream GPTJModel::predict(std::string prompt, int max_length, bool include_prompt) {
std::stringstream GPTJModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
std::stringstream result;
// tokenize the prompt

View File

@ -91,6 +91,7 @@ bool gpt_neox_eval(
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
// reallocate
buf_size = buf_size_new;
buf = realloc(buf, buf_size);
@ -98,6 +99,8 @@ bool gpt_neox_eval(
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
return false;
}
spdlog::debug("{}: reallocating context buffer {} -> now {} bytes of tokens in prompt = {}", __func__, buf_size, buf_size_new);
}
struct ggml_init_params params = {
@ -283,6 +286,7 @@ bool gpt_neox_eval(
// ggml_graph_print (&gf);
// ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
//}
//embd_w.resize(n_vocab*N);
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
@ -293,7 +297,9 @@ bool gpt_neox_eval(
if (mem_per_token == 0) {
mem_per_token = ggml_used_mem(ctx0)/N;
}
spdlog::debug("used_mem = {}\n", ggml_used_mem(ctx0));
//printf("used_mem = %zu\n", ggml_used_mem(ctx0));
ggml_free(ctx0);
@ -612,7 +618,7 @@ bool GPTNEOXModel::load_model(std::string fname) {
return true;
}
std::stringstream GPTNEOXModel::predict(std::string prompt, int max_length, bool include_prompt) {
std::stringstream GPTNEOXModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
std::stringstream result;
// tokenize the prompt
@ -631,6 +637,8 @@ std::stringstream GPTNEOXModel::predict(std::string prompt, int max_length, bool
std::vector<gpt_vocab::id> embd;
// determine the required inference memory per token:
size_t mem_per_token = 0;
@ -717,3 +725,4 @@ std::stringstream GPTNEOXModel::predict(std::string prompt, int max_length, bool
return result;
}

View File

@ -60,6 +60,11 @@ int main(int argc, char **argv)
.default_value(0.1f)
.scan<'g', float>();
program.add_argument("-b", "--batch-size")
.help("set batch size for model completion")
.default_value(512)
.scan<'i',int>();
program.add_argument("prompt").remaining();
@ -96,6 +101,7 @@ int main(int argc, char **argv)
config.n_threads = program.get<int>("--threads");
config.temp = program.get<float>("--temperature");
config.top_p = program.get<float>("--top-p");
config.n_batch = program.get<int>("--batch-size");
if(model_type.compare("codegen") == 0) {
spdlog::info("Initializing GPT-J type model for '{}' model", model_type);
@ -121,16 +127,22 @@ int main(int argc, char **argv)
}
t_load_us = ggml_time_us() - t_start_us;
spdlog::info("Loaded model in {:0.2f}ms", t_load_us/1000.0f);
crow::SimpleApp app;
TBPLogger logger;
crow::logger::setHandler(&logger);
CROW_ROUTE(app, "/")([](){
return "Hello world";
});
CROW_ROUTE(app, "/copilot_internal/v2/token")([](){
//return "Hello world";
@ -168,5 +180,7 @@ int main(int argc, char **argv)
app.port(program.get<int>("--port")).multithreaded().run();
free(model);
}

View File

@ -37,7 +37,6 @@ crow::response handle_hf_request(TurbopilotModel *model, const crow::request& re
crow::json::wvalue response = {
{"generated_text", result.str()},
};
crow::response res;

View File

@ -44,13 +44,13 @@ bool starcoder_eval(
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
spdlog::debug("{}: reallocating buffer from {} to {} bytes\n", __func__, buf_size, buf_size_new);
// reallocate
buf_size = buf_size_new;
buf = realloc(buf, buf_size);
if (buf == nullptr) {
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
spdlog::error("{}: failed to allocate {} bytes\n", __func__, buf_size);
return false;
}
}
@ -681,7 +681,7 @@ bool StarcoderModel::load_model(std::string fname) {
}
std::stringstream StarcoderModel::predict(std::string prompt, int max_length, bool include_prompt) {
std::stringstream StarcoderModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
std::stringstream result;
// tokenize the prompt