mirror of
https://github.com/ravenscroftj/turbopilot.git
synced 2024-10-01 01:06:01 -04:00
Compare commits
1 Commits
942f9cf533
...
62dfd821a3
Author | SHA1 | Date | |
---|---|---|---|
|
62dfd821a3 |
5
.github/workflows/build-commit.yml
vendored
5
.github/workflows/build-commit.yml
vendored
@ -92,7 +92,7 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Install Dependencies
|
- name: Install Dependencies
|
||||||
run: sudo apt-get update && sudo apt-get install -yq libboost-dev libasio-dev libboost-thread-dev
|
run: sudo apt-get update && sudo apt-get install -yq libboost-dev libasio-dev
|
||||||
|
|
||||||
- name: Install OpenBlas
|
- name: Install OpenBlas
|
||||||
if: ${{ matrix.build == 'avx2-openblas' }}
|
if: ${{ matrix.build == 'avx2-openblas' }}
|
||||||
@ -207,7 +207,6 @@ jobs:
|
|||||||
$msvc = $(join-path $vcdir $('VC\Tools\MSVC\'+$(gc -raw $(join-path $vcdir 'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt')).Trim()))
|
$msvc = $(join-path $vcdir $('VC\Tools\MSVC\'+$(gc -raw $(join-path $vcdir 'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt')).Trim()))
|
||||||
$lib = $(join-path $msvc 'bin\Hostx64\x64\lib.exe')
|
$lib = $(join-path $msvc 'bin\Hostx64\x64\lib.exe')
|
||||||
& $lib /machine:x64 "/def:${env:RUNNER_TEMP}/openblas/lib/libopenblas.def" "/out:${env:RUNNER_TEMP}/openblas/lib/openblas.lib" /name:openblas.dll
|
& $lib /machine:x64 "/def:${env:RUNNER_TEMP}/openblas/lib/libopenblas.def" "/out:${env:RUNNER_TEMP}/openblas/lib/openblas.lib" /name:openblas.dll
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
id: cmake_build
|
id: cmake_build
|
||||||
env:
|
env:
|
||||||
@ -215,7 +214,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake .. ${{ matrix.defines }} -DBoost_LIBRARYDIRS=${{ steps.install-boost.outputs.BOOST_ROOT }}/lib
|
cmake .. ${{ matrix.defines }}
|
||||||
cmake --build . --config Release --target turbopilot
|
cmake --build . --config Release --target turbopilot
|
||||||
|
|
||||||
# - name: Add libopenblas.dll
|
# - name: Add libopenblas.dll
|
||||||
|
@ -1,15 +1,5 @@
|
|||||||
cmake_minimum_required (VERSION 3.0)
|
cmake_minimum_required (VERSION 3.0)
|
||||||
project(turbopilot VERSION 0.1.0)
|
project(turbopilot VERSION 0.1.0)
|
||||||
set(CMAKE_CXX_STANDARD 11)
|
|
||||||
set(CMAKE_CXX_STANDARD_REQUIRED true)
|
|
||||||
set(CMAKE_C_STANDARD 11)
|
|
||||||
set(CMAKE_C_STANDARD_REQUIRED true)
|
|
||||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
|
||||||
find_package(Threads REQUIRED)
|
|
||||||
|
|
||||||
|
|
||||||
# option(BUILD_SHARED_LIBS "Build using shared libraries" OFF)
|
|
||||||
|
|
||||||
|
|
||||||
set(CMAKE_EXPORT_COMPILE_COMMANDS "on")
|
set(CMAKE_EXPORT_COMPILE_COMMANDS "on")
|
||||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
||||||
@ -37,9 +27,6 @@ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
add_subdirectory(extern/ggml)
|
add_subdirectory(extern/ggml)
|
||||||
add_subdirectory(extern/argparse)
|
add_subdirectory(extern/argparse)
|
||||||
add_subdirectory(extern/spdlog)
|
add_subdirectory(extern/spdlog)
|
||||||
@ -48,13 +35,10 @@ if (GGML_STATIC)
|
|||||||
SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
|
SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
|
||||||
SET(BUILD_SHARED_LIBS OFF)
|
SET(BUILD_SHARED_LIBS OFF)
|
||||||
SET(CMAKE_EXE_LINKER_FLAGS "-static")
|
SET(CMAKE_EXE_LINKER_FLAGS "-static")
|
||||||
|
|
||||||
# if(GGML_OPENBLAS)
|
|
||||||
# set(BLA_STATIC ON)
|
|
||||||
# endif()
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
add_subdirectory(src)
|
add_subdirectory(src)
|
||||||
|
|
||||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
@ -7,7 +7,7 @@ RUN apt-get update && apt-get install ca-certificates gpg wget
|
|||||||
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null
|
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null
|
||||||
RUN echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ jammy main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null
|
RUN echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ jammy main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev libboost-thread-dev
|
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev
|
||||||
|
|
||||||
|
|
||||||
ADD ./ /turbopilot
|
ADD ./ /turbopilot
|
||||||
|
@ -7,7 +7,7 @@ RUN apt-get update && apt-get install ca-certificates gpg wget
|
|||||||
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null
|
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null
|
||||||
RUN echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ focal main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null
|
RUN echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ focal main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev libboost-thread-dev
|
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev
|
||||||
|
|
||||||
ADD ./ /turbopilot
|
ADD ./ /turbopilot
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
FROM alpine AS build
|
FROM alpine AS build
|
||||||
|
|
||||||
RUN apk add --update alpine-sdk boost-dev cmake asio-dev
|
RUN apk add --update alpine-sdk boost-dev cmake asio-dev
|
||||||
|
|
||||||
ADD ./ /turbopilot/
|
ADD ./ /turbopilot/
|
||||||
|
|
||||||
|
@ -6,9 +6,9 @@
|
|||||||
|
|
||||||
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|
||||||
|---------------------|-----------------|-----------------|-----------------|
|
|---------------------|-----------------|-----------------|-----------------|
|
||||||
| StableCode | ~3GiB | [:arrow_down:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/blob/main/stablecode-instruct-alpha-3b.ggmlv1.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/) |
|
| StarCoder | ~3GiB | [:arrow_down:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/blob/main/stablecode-instruct-alpha-3b.ggmlv1.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/) |
|
||||||
|
|
||||||
|
|
||||||
To run in Turbopilot set model type `-m stablecode`
|
|
||||||
|
|
||||||
## "Coder" family models
|
## "Coder" family models
|
||||||
|
|
||||||
@ -23,7 +23,7 @@ This model is primarily trained on Python, Java and Javscript.
|
|||||||
|
|
||||||
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|
||||||
|---------------------|-----------------|-----------------|-----------------|
|
|---------------------|-----------------|-----------------|-----------------|
|
||||||
| SantaCoder | ~2GiB | [:arrow_down:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/resolve/main/santacoder-q4_0.bin) | [:hugs:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/) |
|
| StarCoder | ~2GiB | [:arrow_down:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/resolve/main/santacoder-q4_0.bin) | [:hugs:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/) |
|
||||||
|
|
||||||
To run in Turbopilot set model type `-m starcoder`
|
To run in Turbopilot set model type `-m starcoder`
|
||||||
|
|
||||||
@ -39,7 +39,7 @@ Even when quantized, WizardCoder is a large model that takes up a significant am
|
|||||||
|---------------------|-----------------|-----------------|-----------------|
|
|---------------------|-----------------|-----------------|-----------------|
|
||||||
| WizardCoder | ~12GiB | [:arrow_down:](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/resolve/main/WizardCoder-15B-1.0.ggmlv3.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/) |
|
| WizardCoder | ~12GiB | [:arrow_down:](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/resolve/main/WizardCoder-15B-1.0.ggmlv3.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/) |
|
||||||
|
|
||||||
To run in Turbopilot set model type `-m wizardcoder`
|
To run in Turbopilot set model type `-m starcoder`
|
||||||
|
|
||||||
|
|
||||||
### StarCoder (Released 4/5/2023)
|
### StarCoder (Released 4/5/2023)
|
||||||
|
@ -71,7 +71,7 @@ public:
|
|||||||
}
|
}
|
||||||
virtual ~GPTJModel();
|
virtual ~GPTJModel();
|
||||||
bool load_model(std::string path);
|
bool load_model(std::string path);
|
||||||
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
|
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
gptj_model *model = NULL;
|
gptj_model *model = NULL;
|
||||||
|
@ -75,7 +75,7 @@ public:
|
|||||||
}
|
}
|
||||||
virtual ~GPTNEOXModel();
|
virtual ~GPTNEOXModel();
|
||||||
bool load_model(std::string path);
|
bool load_model(std::string path);
|
||||||
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
|
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
gpt_neox_model *model = NULL;
|
gpt_neox_model *model = NULL;
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
#include <map>
|
#include <map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <random>
|
#include <random>
|
||||||
#include <mutex>
|
|
||||||
|
|
||||||
typedef void (*offload_func_t)(struct ggml_tensor * tensor);
|
typedef void (*offload_func_t)(struct ggml_tensor * tensor);
|
||||||
void ggml_nop(struct ggml_tensor * tensor);
|
void ggml_nop(struct ggml_tensor * tensor);
|
||||||
@ -55,16 +54,11 @@ public:
|
|||||||
rng(rng)
|
rng(rng)
|
||||||
{}
|
{}
|
||||||
virtual bool load_model(std::string model_path) = 0;
|
virtual bool load_model(std::string model_path) = 0;
|
||||||
std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
|
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt) = 0;
|
||||||
void lock();
|
|
||||||
void unlock();
|
|
||||||
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt) = 0;
|
|
||||||
ModelConfig config;
|
ModelConfig config;
|
||||||
std::mt19937 &rng;
|
std::mt19937 &rng;
|
||||||
std::mutex model_lock;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif //__TURBOPILOT_MODEL_H
|
#endif //__TURBOPILOT_MODEL_H
|
@ -2,8 +2,6 @@
|
|||||||
#define __TURBOPILOT_SERVER_H
|
#define __TURBOPILOT_SERVER_H
|
||||||
|
|
||||||
|
|
||||||
#include <spdlog/spdlog.h>
|
|
||||||
|
|
||||||
#include "turbopilot/model.hpp"
|
#include "turbopilot/model.hpp"
|
||||||
|
|
||||||
#include "crow_all.h"
|
#include "crow_all.h"
|
||||||
@ -12,46 +10,6 @@ crow::response handle_openai_request(TurbopilotModel *model, const crow::request
|
|||||||
|
|
||||||
crow::response handle_hf_request(TurbopilotModel *model, const crow::request& req);
|
crow::response handle_hf_request(TurbopilotModel *model, const crow::request& req);
|
||||||
|
|
||||||
class TBPLogger : public crow::ILogHandler {
|
|
||||||
public:
|
|
||||||
TBPLogger() {}
|
|
||||||
void log(std::string message, crow::LogLevel crow_level) {
|
|
||||||
// "message" doesn't contain the timestamp and loglevel
|
|
||||||
// prefix the default logger does and it doesn't end
|
|
||||||
// in a newline.
|
|
||||||
|
|
||||||
spdlog::level::level_enum level = spdlog::level::info;
|
|
||||||
|
|
||||||
switch(crow_level){
|
|
||||||
case crow::LogLevel::Critical:
|
|
||||||
level = spdlog::level::critical;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case crow::LogLevel::Error:
|
|
||||||
level = spdlog::level::err;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case crow::LogLevel::Warning:
|
|
||||||
level = spdlog::level::warn;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case crow::LogLevel::Info:
|
|
||||||
level = spdlog::level::info;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case crow::LogLevel::Debug:
|
|
||||||
level = spdlog::level::debug;
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
// if case is not a known value, assume the worst
|
|
||||||
level = spdlog::level::critical;
|
|
||||||
}
|
|
||||||
|
|
||||||
spdlog::log(level, message);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
#endif // __TURBOPILOT_SERVER_H
|
#endif // __TURBOPILOT_SERVER_H
|
||||||
|
|
||||||
|
@ -68,7 +68,7 @@ public:
|
|||||||
}
|
}
|
||||||
virtual ~StarcoderModel();
|
virtual ~StarcoderModel();
|
||||||
bool load_model(std::string path);
|
bool load_model(std::string path);
|
||||||
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
|
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
starcoder_model *model = NULL;
|
starcoder_model *model = NULL;
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
set(TURBOPILOT_TARGET turbopilot)
|
set(TURBOPILOT_TARGET turbopilot)
|
||||||
|
|
||||||
find_package(Boost COMPONENTS thread system REQUIRED)
|
find_package(Boost REQUIRED)
|
||||||
|
|
||||||
include_directories(${Boost_INCLUDE_DIRS})
|
include_directories(${Boost_INCLUDE_DIRS})
|
||||||
|
|
||||||
add_executable(${TURBOPILOT_TARGET}
|
add_executable(${TURBOPILOT_TARGET}
|
||||||
@ -17,9 +16,6 @@ add_executable(${TURBOPILOT_TARGET}
|
|||||||
../include/turbopilot/starcoder.hpp
|
../include/turbopilot/starcoder.hpp
|
||||||
)
|
)
|
||||||
|
|
||||||
#set(THREADS_PREFER_PTHREAD_FLAG TRUE)
|
|
||||||
#find_package(Threads REQUIRED)
|
|
||||||
|
|
||||||
|
|
||||||
target_include_directories(${TURBOPILOT_TARGET} PRIVATE
|
target_include_directories(${TURBOPILOT_TARGET} PRIVATE
|
||||||
../include
|
../include
|
||||||
@ -27,6 +23,8 @@ target_include_directories(${TURBOPILOT_TARGET} PRIVATE
|
|||||||
../extern/crow/include
|
../extern/crow/include
|
||||||
)
|
)
|
||||||
|
|
||||||
#target_compile_features(${TURBOPILOT_TARGET} PRIVATE cxx_std_11)
|
|
||||||
|
|
||||||
target_link_libraries(${TURBOPILOT_TARGET} PRIVATE ggml argparse)
|
target_link_libraries(${TURBOPILOT_TARGET} PRIVATE ggml argparse)
|
||||||
|
|
||||||
|
|
||||||
|
#target_link_libraries(${TURBOPILOT_TARGET} PRIVATE spdlog::spdlog_header_only)
|
@ -4,22 +4,6 @@
|
|||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <random>
|
#include <random>
|
||||||
|
|
||||||
|
|
||||||
void TurbopilotModel::lock(){
|
|
||||||
this->model_lock.lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
void TurbopilotModel::unlock(){
|
|
||||||
this->model_lock.unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::stringstream TurbopilotModel::predict(std::string prompt, int max_length, bool include_prompt){
|
|
||||||
lock();
|
|
||||||
auto result = predict_impl(prompt, max_length, include_prompt);
|
|
||||||
unlock();
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
void llama_nop(struct ggml_tensor * tensor) { // don't offload by default
|
void llama_nop(struct ggml_tensor * tensor) { // don't offload by default
|
||||||
(void) tensor;
|
(void) tensor;
|
||||||
}
|
}
|
||||||
@ -179,6 +163,4 @@ gpt_vocab::id gpt_sample_top_k_top_p(
|
|||||||
int idx = dist(rng);
|
int idx = dist(rng);
|
||||||
|
|
||||||
return logits_id[idx].second;
|
return logits_id[idx].second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -556,7 +556,7 @@ bool GPTJModel::load_model(std::string fname) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::stringstream GPTJModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
|
std::stringstream GPTJModel::predict(std::string prompt, int max_length, bool include_prompt) {
|
||||||
|
|
||||||
std::stringstream result;
|
std::stringstream result;
|
||||||
// tokenize the prompt
|
// tokenize the prompt
|
||||||
|
@ -91,7 +91,6 @@ bool gpt_neox_eval(
|
|||||||
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
|
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
|
||||||
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
|
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
|
||||||
|
|
||||||
|
|
||||||
// reallocate
|
// reallocate
|
||||||
buf_size = buf_size_new;
|
buf_size = buf_size_new;
|
||||||
buf = realloc(buf, buf_size);
|
buf = realloc(buf, buf_size);
|
||||||
@ -99,8 +98,6 @@ bool gpt_neox_eval(
|
|||||||
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
|
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
spdlog::debug("{}: reallocating context buffer {} -> now {} bytes of tokens in prompt = {}", __func__, buf_size, buf_size_new);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_init_params params = {
|
struct ggml_init_params params = {
|
||||||
@ -286,7 +283,6 @@ bool gpt_neox_eval(
|
|||||||
// ggml_graph_print (&gf);
|
// ggml_graph_print (&gf);
|
||||||
// ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
|
// ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
|
||||||
//}
|
//}
|
||||||
|
|
||||||
|
|
||||||
//embd_w.resize(n_vocab*N);
|
//embd_w.resize(n_vocab*N);
|
||||||
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
|
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
|
||||||
@ -297,9 +293,7 @@ bool gpt_neox_eval(
|
|||||||
|
|
||||||
if (mem_per_token == 0) {
|
if (mem_per_token == 0) {
|
||||||
mem_per_token = ggml_used_mem(ctx0)/N;
|
mem_per_token = ggml_used_mem(ctx0)/N;
|
||||||
|
|
||||||
}
|
}
|
||||||
spdlog::debug("used_mem = {}\n", ggml_used_mem(ctx0));
|
|
||||||
//printf("used_mem = %zu\n", ggml_used_mem(ctx0));
|
//printf("used_mem = %zu\n", ggml_used_mem(ctx0));
|
||||||
|
|
||||||
ggml_free(ctx0);
|
ggml_free(ctx0);
|
||||||
@ -618,7 +612,7 @@ bool GPTNEOXModel::load_model(std::string fname) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::stringstream GPTNEOXModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
|
std::stringstream GPTNEOXModel::predict(std::string prompt, int max_length, bool include_prompt) {
|
||||||
|
|
||||||
std::stringstream result;
|
std::stringstream result;
|
||||||
// tokenize the prompt
|
// tokenize the prompt
|
||||||
@ -637,8 +631,6 @@ std::stringstream GPTNEOXModel::predict_impl(std::string prompt, int max_length,
|
|||||||
|
|
||||||
std::vector<gpt_vocab::id> embd;
|
std::vector<gpt_vocab::id> embd;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// determine the required inference memory per token:
|
// determine the required inference memory per token:
|
||||||
size_t mem_per_token = 0;
|
size_t mem_per_token = 0;
|
||||||
|
|
||||||
@ -725,4 +717,3 @@ std::stringstream GPTNEOXModel::predict_impl(std::string prompt, int max_length,
|
|||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
14
src/main.cpp
14
src/main.cpp
@ -60,11 +60,6 @@ int main(int argc, char **argv)
|
|||||||
.default_value(0.1f)
|
.default_value(0.1f)
|
||||||
.scan<'g', float>();
|
.scan<'g', float>();
|
||||||
|
|
||||||
program.add_argument("-b", "--batch-size")
|
|
||||||
.help("set batch size for model completion")
|
|
||||||
.default_value(512)
|
|
||||||
.scan<'i',int>();
|
|
||||||
|
|
||||||
|
|
||||||
program.add_argument("prompt").remaining();
|
program.add_argument("prompt").remaining();
|
||||||
|
|
||||||
@ -101,7 +96,6 @@ int main(int argc, char **argv)
|
|||||||
config.n_threads = program.get<int>("--threads");
|
config.n_threads = program.get<int>("--threads");
|
||||||
config.temp = program.get<float>("--temperature");
|
config.temp = program.get<float>("--temperature");
|
||||||
config.top_p = program.get<float>("--top-p");
|
config.top_p = program.get<float>("--top-p");
|
||||||
config.n_batch = program.get<int>("--batch-size");
|
|
||||||
|
|
||||||
if(model_type.compare("codegen") == 0) {
|
if(model_type.compare("codegen") == 0) {
|
||||||
spdlog::info("Initializing GPT-J type model for '{}' model", model_type);
|
spdlog::info("Initializing GPT-J type model for '{}' model", model_type);
|
||||||
@ -127,22 +121,16 @@ int main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
t_load_us = ggml_time_us() - t_start_us;
|
t_load_us = ggml_time_us() - t_start_us;
|
||||||
|
|
||||||
|
|
||||||
spdlog::info("Loaded model in {:0.2f}ms", t_load_us/1000.0f);
|
spdlog::info("Loaded model in {:0.2f}ms", t_load_us/1000.0f);
|
||||||
|
|
||||||
|
|
||||||
crow::SimpleApp app;
|
crow::SimpleApp app;
|
||||||
|
|
||||||
TBPLogger logger;
|
|
||||||
|
|
||||||
crow::logger::setHandler(&logger);
|
|
||||||
|
|
||||||
CROW_ROUTE(app, "/")([](){
|
CROW_ROUTE(app, "/")([](){
|
||||||
return "Hello world";
|
return "Hello world";
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
CROW_ROUTE(app, "/copilot_internal/v2/token")([](){
|
CROW_ROUTE(app, "/copilot_internal/v2/token")([](){
|
||||||
//return "Hello world";
|
//return "Hello world";
|
||||||
|
|
||||||
@ -180,7 +168,5 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
app.port(program.get<int>("--port")).multithreaded().run();
|
app.port(program.get<int>("--port")).multithreaded().run();
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
free(model);
|
free(model);
|
||||||
}
|
}
|
@ -37,6 +37,7 @@ crow::response handle_hf_request(TurbopilotModel *model, const crow::request& re
|
|||||||
crow::json::wvalue response = {
|
crow::json::wvalue response = {
|
||||||
{"generated_text", result.str()},
|
{"generated_text", result.str()},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
crow::response res;
|
crow::response res;
|
||||||
|
@ -44,13 +44,13 @@ bool starcoder_eval(
|
|||||||
|
|
||||||
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
|
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
|
||||||
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
|
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
|
||||||
spdlog::debug("{}: reallocating buffer from {} to {} bytes\n", __func__, buf_size, buf_size_new);
|
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
|
||||||
|
|
||||||
// reallocate
|
// reallocate
|
||||||
buf_size = buf_size_new;
|
buf_size = buf_size_new;
|
||||||
buf = realloc(buf, buf_size);
|
buf = realloc(buf, buf_size);
|
||||||
if (buf == nullptr) {
|
if (buf == nullptr) {
|
||||||
spdlog::error("{}: failed to allocate {} bytes\n", __func__, buf_size);
|
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -681,7 +681,7 @@ bool StarcoderModel::load_model(std::string fname) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::stringstream StarcoderModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
|
std::stringstream StarcoderModel::predict(std::string prompt, int max_length, bool include_prompt) {
|
||||||
|
|
||||||
std::stringstream result;
|
std::stringstream result;
|
||||||
// tokenize the prompt
|
// tokenize the prompt
|
||||||
|
Loading…
Reference in New Issue
Block a user