Compare commits

...

64 Commits

Author SHA1 Message Date
James Ravenscroft
a00de2a332 recomment the cuda preprocessor check 2023-08-26 16:21:42 +01:00
James Ravenscroft
215a69b5af update clblast code in gpt-j model 2023-08-26 16:16:01 +01:00
James Ravenscroft
91639b8fc0 disable clblast docker images 2023-08-26 16:12:50 +01:00
James Ravenscroft
0b408510f4 add gpu offload for gpt-j models (codegen) 2023-08-26 16:11:16 +01:00
James Ravenscroft
604183380d tidy up prints in stablecoder and starcoder 2023-08-26 16:04:41 +01:00
James Ravenscroft
88683abe50 update run script to incorporate GPU layers 2023-08-26 16:03:16 +01:00
James Ravenscroft
326e76c9bb Merge branch 'main' into feature/gpu_layers 2023-08-26 15:59:13 +01:00
James Ravenscroft
23c0a3d19e Merge branch 'feature/gpu_layers' of github.com:ravenscroftj/turbopilot into feature/gpu_layers 2023-08-26 15:34:28 +01:00
James Ravenscroft
31bb33c731 use latest upstream ggml instead of mine 2023-08-26 15:34:15 +01:00
James Ravenscroft
d4989b543c
Merge pull request #62 from ravenscroftj/feature/blasdocker
Implement better docker builds
2023-08-26 15:32:13 +01:00
James Ravenscroft
e9dc6a304a use latest upstream ggml instead of mine 2023-08-26 15:22:14 +01:00
James Ravenscroft
97a0377cd6 remove llama.cpp submodule 2023-08-26 15:21:01 +01:00
James Ravenscroft
6d26c9b064 Merge branch 'feature/gpu_layers' of github.com:ravenscroftj/turbopilot into feature/gpu_layers 2023-08-26 15:20:25 +01:00
James Ravenscroft
63b554793d tidy cmakelist 2023-08-26 15:20:02 +01:00
James Ravenscroft
0cf7a9c341 remove llama 2023-08-26 15:19:51 +01:00
James Ravenscroft
356a83c5fd remove crow submodule 2023-08-26 15:19:17 +01:00
James Ravenscroft
a5517b0fcd use ggerganov ggml instead of mine 2023-08-26 15:19:04 +01:00
James Ravenscroft
8fa70e1518 update for gpu build 2023-08-26 15:14:18 +01:00
James Ravenscroft
b79ab46b50 add gpu offload for gptneox 2023-08-26 15:14:02 +01:00
James Ravenscroft
4a47251822 update for gpu build 2023-08-26 15:13:08 +01:00
James Ravenscroft
b2b4a1480f increase scratch on starcoder 2023-08-26 15:12:41 +01:00
James Ravenscroft
5f7155a314 add gpu offload for gptneox 2023-08-26 15:12:41 +01:00
James Ravenscroft
77cde95cb9 remove deprecated cuda dockerfiles 2023-08-26 14:17:31 +01:00
James Ravenscroft
bea7ebdb34 correct runtime libs for openblas and clblast 2023-08-26 14:16:39 +01:00
James Ravenscroft
812bbea9d7 correct typo with clblast 2023-08-26 13:53:26 +01:00
James Ravenscroft
08e8834390 add changes to dockerfile 2023-08-26 13:51:50 +01:00
James Ravenscroft
25680e64d8 remove all the quotes 2023-08-26 13:43:33 +01:00
James Ravenscroft
dca25d8456 remove quotes 2023-08-26 13:42:11 +01:00
James Ravenscroft
1f6f84a783 add quotes to args 2023-08-26 13:39:02 +01:00
James Ravenscroft
0183b30502 always use ubuntu 22.04 2023-08-26 13:32:01 +01:00
James Ravenscroft
b465eae818 break out vars 2023-08-26 13:28:08 +01:00
James Ravenscroft
e8adff5339 try again 2023-08-26 13:22:09 +01:00
James Ravenscroft
f12dacaa15 read the readme properly 2023-08-26 13:16:34 +01:00
James Ravenscroft
0b0b914f92 add commas? 2023-08-26 13:12:27 +01:00
James Ravenscroft
b21dd0799d fix basenames 2023-08-26 13:10:23 +01:00
James Ravenscroft
c73c196364 build nvidia with default dockerfile 2023-08-26 13:08:19 +01:00
James Ravenscroft
30834e3121 remove brew update to prevent python breaking build 2023-08-26 12:58:02 +01:00
James Ravenscroft
39c3182a3a try to fix build args 2023-08-26 12:57:00 +01:00
James Ravenscroft
2abdcabf02 use lists for build args 2023-08-26 12:45:23 +01:00
James Ravenscroft
6877542ad8 blas docker build 2023-08-26 12:43:52 +01:00
James Ravenscroft
5b561f7b7e
Merge pull request #61 from c01o/patch-1
Fix download link on MODELS.md
2023-08-26 12:37:21 +01:00
c01o
e85492d8ba
Fix download link on MODELS.md 2023-08-26 19:16:15 +09:00
James Ravenscroft
f840ea0b73
Merge pull request #60 from nvtienanh/update-dockerfile-default
Change from alpine to ubuntu in Dockerfile.default
2023-08-26 09:43:24 +01:00
Anh Nguyen
3e37c4bb7c Update cmake command 2023-08-26 14:27:27 +07:00
Anh Nguyen
20b1460bd8 Using GGML_STATIC 2023-08-26 14:21:18 +07:00
Anh Nguyen
2308b9ae21 Change from alpine to ubuntu in dockerfile.default 2023-08-26 13:21:04 +07:00
James Ravenscroft
c4e57e0aab
Merge pull request #58 from ravenscroftj/feature/model-lock
WIP: implement locking of model per request
2023-08-25 06:57:05 +01:00
James Ravenscroft
dc81abbc52 Merge branch 'main' into feature/model-lock 2023-08-24 14:58:44 +01:00
James Ravenscroft
ae2d505a2f use std mutex instead of boost mutex 2023-08-24 14:55:23 +01:00
James Ravenscroft
143155dac3 boost 2023-08-24 14:28:01 +01:00
James Ravenscroft
596c835939 boost 2023-08-24 14:26:40 +01:00
James Ravenscroft
6b0a25cb71
Merge pull request #59 from ravenscroftj/feature/batch-flag
expose batch size flag to cli
2023-08-24 14:13:40 +01:00
James Ravenscroft
227501188c try to set boost librarydir 2023-08-24 13:57:22 +01:00
James Ravenscroft
f69a8f65d4 fix build? 2023-08-24 13:49:14 +01:00
James Ravenscroft
22f2993db4 try using stage lib dir for boost root 2023-08-24 13:28:58 +01:00
James Ravenscroft
e8beac34e7 more attempts to build with boost threads 2023-08-24 13:17:51 +01:00
James Ravenscroft
ccf425f019 update deps for boost 2023-08-24 13:05:10 +01:00
James Ravenscroft
cceee41f79 add boost threads 2023-08-24 13:03:12 +01:00
James Ravenscroft
113544400a try adding build boost dirs explicitely 2023-08-24 12:04:45 +01:00
James Ravenscroft
f0627cd567 add boost libraries to cmake 2023-08-24 11:50:21 +01:00
James Ravenscroft
2d617b458e expose batch size flag to cli 2023-08-24 11:40:19 +01:00
James Ravenscroft
0c1fc1a04e implement locking of model per request to prevent crashing when multiple requests reeived 2023-08-24 11:30:44 +01:00
James Ravenscroft
e0adf0519b
Merge pull request #57 from aperullo/model-docs-fix
Fix incorrect instructions in model docs
2023-08-24 10:18:36 +01:00
aperullo
ef1402a1a8 Fix errant command in model docs 2023-08-23 17:59:43 -04:00
30 changed files with 612 additions and 118 deletions

View File

@ -25,7 +25,6 @@ jobs:
- name: Dependencies
id: depends
run: |
brew update
brew install cmake boost asio
- name: Build
id: make_build
@ -92,7 +91,7 @@ jobs:
submodules: true
- name: Install Dependencies
run: sudo apt-get update && sudo apt-get install -yq libboost-dev libasio-dev
run: sudo apt-get update && sudo apt-get install -yq libboost-dev libasio-dev libboost-thread-dev
- name: Install OpenBlas
if: ${{ matrix.build == 'avx2-openblas' }}
@ -207,6 +206,7 @@ jobs:
$msvc = $(join-path $vcdir $('VC\Tools\MSVC\'+$(gc -raw $(join-path $vcdir 'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt')).Trim()))
$lib = $(join-path $msvc 'bin\Hostx64\x64\lib.exe')
& $lib /machine:x64 "/def:${env:RUNNER_TEMP}/openblas/lib/libopenblas.def" "/out:${env:RUNNER_TEMP}/openblas/lib/openblas.lib" /name:openblas.dll
- name: Build
id: cmake_build
env:
@ -214,7 +214,7 @@ jobs:
run: |
mkdir build
cd build
cmake .. ${{ matrix.defines }}
cmake .. ${{ matrix.defines }} -DBoost_LIBRARYDIRS=${{ steps.install-boost.outputs.BOOST_ROOT }}/lib
cmake --build . --config Release --target turbopilot
# - name: Add libopenblas.dll

View File

@ -16,9 +16,52 @@ jobs:
strategy:
matrix:
config:
- {tag: "", dockerfile: "./Dockerfile.default", platforms: "linux/amd64,linux/arm64"}
- {tag: "-cuda11", dockerfile: "./Dockerfile.cuda11", platforms: "linux/amd64"}
- {tag: "-cuda12", dockerfile: "./Dockerfile.cuda12", platforms: "linux/amd64"}
- tag:
dockerfile: ./Dockerfile.default
platforms: linux/amd64,linux/arm64
build_base: ubuntu:22.04
runtime_base: ubuntu:22.04
- tag: -openblas
dockerfile: ./Dockerfile.default
platforms: linux/amd64,linux/arm64
build_base: ubuntu:22.04
runtime_base: ubuntu:22.04
extra_deps: libopenblas-dev
cmake_args: -DGGML_OPENBLAS=On
- tag: -cuda11-7
dockerfile: ./Dockerfile.default
platforms: linux/amd64
build_base: nvidia/cuda:11.7.1-devel-ubuntu22.04
runtime_base: nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu22.04
cmake_args: -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc
- tag: -cuda12-0
dockerfile: ./Dockerfile.default
platforms: linux/amd64
build_base: nvidia/cuda:12.0.0-devel-ubuntu22.04
runtime_base: nvidia/cuda:12.0.0-runtime-ubuntu22.04
cmake_args: -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc
- tag: -cuda12-2
dockerfile: ./Dockerfile.default
platforms: linux/amd64
build_base: nvidia/cuda:12.2.0-devel-ubuntu22.04
runtime_base: nvidia/cuda:12.2.0-runtime-ubuntu22.04
cmake_args: -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc
# - tag: -clblast
# dockerfile: ./Dockerfile.default
# platforms: linux/amd64
# build_base: ubuntu:22.04
# runtime_base: ubuntu:22.04
# runtime_deps: libclblast1
# extra_deps: libclblast-dev
# cmake_args: -DGGML_CLBLAST=On
steps:
@ -45,7 +88,7 @@ jobs:
password: ${{ secrets.GH_TOKEN }}
- name: Build and push incremental
uses: docker/build-push-action@v4
uses: docker/build-push-action@v4.1.1
if: (!startsWith(github.ref, 'refs/tags/'))
with:
file: ${{matrix.config.dockerfile}}
@ -53,6 +96,12 @@ jobs:
tags: ghcr.io/ravenscroftj/turbopilot:nightly${{matrix.config.tag}}-${{ github.sha }}
context: ${{github.workspace}}
platforms: ${{matrix.config.platforms}}
build-args: |
EXTRA_DEPS=${{matrix.config.extra_deps}}
CMAKE_ARGS=${{matrix.config.cmake_args}}
BUILD_BASE=${{matrix.config.build_base}}
RUNTIME_BASE=${{matrix.config.runtime_base}}
RUNTIME_DEPS=${{matrix.config.runtime_deps}}
- name: Build and push release (Main Latest Build)
@ -64,6 +113,11 @@ jobs:
tags: ghcr.io/ravenscroftj/turbopilot:${{ github.ref_name }}, ghcr.io/ravenscroftj/turbopilot:latest
context: ${{github.workspace}}
platforms: ${{matrix.config.platforms}}
build-args: |
EXTRA_DEPS="${{matrix.config.extra_deps}}"
CMAKE_ARGS="${{matrix.config.cmake_args}}"
BUILD_BASE="${{matrix.config.build_base}}"
RUNTIME_BASE="${{matrix.config.runtime_base}}"
- name: Build and push release (Accelerated Builds)
@ -75,3 +129,8 @@ jobs:
tags: ghcr.io/ravenscroftj/turbopilot:${{ github.ref_name }}${{matrix.config.tag}}
context: ${{github.workspace}}
platforms: ${{matrix.config.platforms}}
build-args: |
EXTRA_DEPS="${{matrix.config.extra_deps}}"
CMAKE_ARGS="${{matrix.config.cmake_args}}"
BUILD_BASE="${{matrix.config.build_base}}"
RUNTIME_BASE="${{matrix.config.runtime_base}}"

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
build/
models/

5
.gitmodules vendored
View File

@ -1,9 +1,12 @@
[submodule "ggml"]
path = extern/ggml
url = git@github.com:ravenscroftj/ggml.git
url = git@github.com:ggerganov/ggml.git
[submodule "extern/argparse"]
path = extern/argparse
url = https://github.com/p-ranav/argparse.git
[submodule "extern/sbdlog"]
path = extern/spdlog
url = https://github.com/gabime/spdlog.git
[submodule "extern/ggml"]
path = extern/ggml
url = https://github.com/ggerganov/ggml

20
.vscode/c_cpp_properties.json vendored Normal file
View File

@ -0,0 +1,20 @@
{
"configurations": [
{
"name": "Linux",
"includePath": [
"${workspaceFolder}/**",
"${workspaceFolder}/extern/crow/include",
"${workspaceFolder}/include",
"${workspaceFolder}/include"
],
"defines": [],
"compilerPath": "/usr/bin/gcc",
"cStandard": "c17",
"cppStandard": "gnu++17",
"intelliSenseMode": "linux-gcc-x64",
"configurationProvider": "ms-vscode.cmake-tools"
}
],
"version": 4
}

70
.vscode/launch.json vendored Normal file
View File

@ -0,0 +1,70 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "(gdb) Launch TBP",
"type": "cppdbg",
"request": "launch",
"program": "/home/james/workspace/rafael-llm/turbopilot/build/bin/turbopilot",
"args": [
//TBP ARGS
"-v",
"-f",
"/home/james/Downloads/replit-code-v1-3b-q4_0.bin",
"-m",
"replit",
],
"stopAtEntry": false,
"cwd": "${workspaceFolder}",
"environment": [],
"externalConsole": false,
"MIMode": "gdb",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
},
{
"description": "Set Disassembly Flavor to Intel",
"text": "-gdb-set disassembly-flavor intel",
"ignoreFailures": true
}
]
},
{
"name": "(gdb) Launch Replut",
"type": "cppdbg",
"request": "launch",
"program": "/home/james/workspace/rafael-llm/turbopilot/extern/ggml/build/bin/replit",
"args": [
// REPLIT ARGS
"-m",
"/home/james/Downloads/replit-code-v1-3b-q4_0.bin",
"-f",
"/home/james/workspace/rafael-llm/turbopilot/test.txt"
],
"stopAtEntry": false,
"cwd": "${workspaceFolder}",
"environment": [],
"externalConsole": false,
"MIMode": "gdb",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
},
{
"description": "Set Disassembly Flavor to Intel",
"text": "-gdb-set disassembly-flavor intel",
"ignoreFailures": true
}
]
},
]
}

28
.vscode/tasks.json vendored Normal file
View File

@ -0,0 +1,28 @@
{
"tasks": [
{
"type": "cppbuild",
"label": "C/C++: g++ build active file",
"command": "/usr/bin/g++",
"args": [
"-fdiagnostics-color=always",
"-g",
"${file}",
"-o",
"${fileDirname}/${fileBasenameNoExtension}"
],
"options": {
"cwd": "${fileDirname}"
},
"problemMatcher": [
"$gcc"
],
"group": {
"kind": "build",
"isDefault": true
},
"detail": "Task generated by Debugger."
}
],
"version": "2.0.0"
}

View File

@ -1,5 +1,15 @@
cmake_minimum_required (VERSION 3.0)
project(turbopilot VERSION 0.1.0)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED true)
set(CMAKE_C_STANDARD 11)
set(CMAKE_C_STANDARD_REQUIRED true)
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
# option(BUILD_SHARED_LIBS "Build using shared libraries" OFF)
set(CMAKE_EXPORT_COMPILE_COMMANDS "on")
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
@ -32,6 +42,9 @@ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES
endif()
add_subdirectory(extern/ggml)
add_subdirectory(extern/argparse)
add_subdirectory(extern/spdlog)

View File

@ -1,39 +0,0 @@
FROM nvidia/cuda:11.7.1-devel-ubuntu22.04 AS build
ENV DEBIAN_FRONTEND=noninteractive
# inlude kitware apt repo to allow us to grab latest cmake
RUN apt-get update && apt-get install ca-certificates gpg wget
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null
RUN echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ jammy main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev
ADD ./ /turbopilot
RUN mkdir /turbopilot/build
WORKDIR /turbopilot/build
RUN cmake -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc ..
RUN make turbopilot
FROM nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu22.04 AS runtime
WORKDIR /app
COPY --from=build /turbopilot/build/bin/turbopilot /app/turbopilot
ENV THREADS=4
ENV MODEL="/models/codegen-2B-multi-ggml-4bit-quant.bin"
ENV BATCHSIZE=64
COPY ./run.sh /app/
EXPOSE 18080
CMD /app/run.sh

View File

@ -1,37 +0,0 @@
FROM nvidia/cuda:12.2.0-devel-ubuntu20.04 AS build
ENV DEBIAN_FRONTEND=noninteractive
# inlude kitware apt repo to allow us to grab latest cmake
RUN apt-get update && apt-get install ca-certificates gpg wget
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null
RUN echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ focal main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev
ADD ./ /turbopilot
RUN mkdir /turbopilot/build
WORKDIR /turbopilot/build
RUN cmake -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc ..
RUN make turbopilot
FROM nvidia/cuda:12.2.0-runtime-ubuntu20.04 AS runtime
WORKDIR /app
COPY --from=build /turbopilot/build/bin/turbopilot /app/turbopilot
ENV THREADS=4
ENV MODEL="/models/codegen-2B-multi-ggml-4bit-quant.bin"
ENV BATCHSIZE=64
COPY ./run.sh /app/
EXPOSE 18080
CMD /app/run.sh

View File

@ -1,17 +1,37 @@
FROM alpine AS build
ARG BUILD_BASE="ubuntu:22.04"
ARG RUNTIME_BASE="ubuntu:22.04"
RUN apk add --update alpine-sdk boost-dev cmake asio-dev
FROM ${BUILD_BASE} AS build
ADD ./ /turbopilot/
ARG EXTRA_DEPS=""
ARG CMAKE_ARGS=""
RUN echo "CMAKE_ARGS: ${CMAKE_ARGS}"
RUN echo "EXTRA_DEPS: ${EXTRA_DEPS}"
ENV DEBIAN_FRONTEND=noninteractive
# inlude kitware apt repo to allow us to grab latest cmake
RUN apt-get update && apt-get install -y ca-certificates gpg wget
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null
RUN echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ jammy main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev libboost-thread-dev ${EXTRA_DEPS}
ADD ./ /turbopilot
RUN mkdir /turbopilot/build
WORKDIR /turbopilot/build
RUN cmake -D GGML_STATIC=ON ..
RUN cmake .. ${CMAKE_ARGS}
RUN make turbopilot
FROM alpine AS runtime
FROM ${RUNTIME_BASE} AS runtime
ARG RUNTIME_DEPS=""
RUN if [[ -z "${RUNTIME_DEPS}" ]] ; then echo "No runtime libs required" ; else apt-get update && apt-get install -y ${RUNTIME_DEPS} ; fi
WORKDIR /app

View File

@ -6,9 +6,9 @@
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|---------------------|-----------------|-----------------|-----------------|
| StarCoder | ~3GiB | [:arrow_down:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/blob/main/stablecode-instruct-alpha-3b.ggmlv1.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/) |
| StableCode | ~3GiB | [:arrow_down:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/resolve/main/stablecode-instruct-alpha-3b.ggmlv1.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/) |
To run in Turbopilot set model type `-m stablecode`
## "Coder" family models
@ -23,7 +23,7 @@ This model is primarily trained on Python, Java and Javscript.
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|---------------------|-----------------|-----------------|-----------------|
| StarCoder | ~2GiB | [:arrow_down:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/resolve/main/santacoder-q4_0.bin) | [:hugs:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/) |
| SantaCoder | ~2GiB | [:arrow_down:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/resolve/main/santacoder-q4_0.bin) | [:hugs:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/) |
To run in Turbopilot set model type `-m starcoder`
@ -39,7 +39,7 @@ Even when quantized, WizardCoder is a large model that takes up a significant am
|---------------------|-----------------|-----------------|-----------------|
| WizardCoder | ~12GiB | [:arrow_down:](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/resolve/main/WizardCoder-15B-1.0.ggmlv3.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/) |
To run in Turbopilot set model type `-m starcoder`
To run in Turbopilot set model type `-m wizardcoder`
### StarCoder (Released 4/5/2023)

2
extern/ggml vendored

@ -1 +1 @@
Subproject commit f6365c0605ac86c6ab106cda0e8d6650e54097a7
Subproject commit 1a5d5f331de1d3c7ace40d86fe2373021a42f9ce

View File

@ -71,7 +71,7 @@ public:
}
virtual ~GPTJModel();
bool load_model(std::string path);
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
private:
gptj_model *model = NULL;

View File

@ -75,7 +75,7 @@ public:
}
virtual ~GPTNEOXModel();
bool load_model(std::string path);
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
private:
gpt_neox_model *model = NULL;

View File

@ -7,6 +7,7 @@
#include <map>
#include <vector>
#include <random>
#include <mutex>
typedef void (*offload_func_t)(struct ggml_tensor * tensor);
void ggml_nop(struct ggml_tensor * tensor);
@ -55,11 +56,16 @@ public:
rng(rng)
{}
virtual bool load_model(std::string model_path) = 0;
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt) = 0;
std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
void lock();
void unlock();
protected:
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt) = 0;
ModelConfig config;
std::mt19937 &rng;
std::mutex model_lock;
};

View File

@ -2,6 +2,8 @@
#define __TURBOPILOT_SERVER_H
#include <spdlog/spdlog.h>
#include "turbopilot/model.hpp"
#include "crow_all.h"
@ -10,6 +12,46 @@ crow::response handle_openai_request(TurbopilotModel *model, const crow::request
crow::response handle_hf_request(TurbopilotModel *model, const crow::request& req);
class TBPLogger : public crow::ILogHandler {
public:
TBPLogger() {}
void log(std::string message, crow::LogLevel crow_level) {
// "message" doesn't contain the timestamp and loglevel
// prefix the default logger does and it doesn't end
// in a newline.
spdlog::level::level_enum level = spdlog::level::info;
switch(crow_level){
case crow::LogLevel::Critical:
level = spdlog::level::critical;
break;
case crow::LogLevel::Error:
level = spdlog::level::err;
break;
case crow::LogLevel::Warning:
level = spdlog::level::warn;
break;
case crow::LogLevel::Info:
level = spdlog::level::info;
break;
case crow::LogLevel::Debug:
level = spdlog::level::debug;
break;
default:
// if case is not a known value, assume the worst
level = spdlog::level::critical;
}
spdlog::log(level, message);
}
};
#endif // __TURBOPILOT_SERVER_H

View File

@ -68,7 +68,7 @@ public:
}
virtual ~StarcoderModel();
bool load_model(std::string path);
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
private:
starcoder_model *model = NULL;

7
run.sh
View File

@ -1,3 +1,6 @@
#!/bin/sh
/app/turbopilot -t $THREADS -m $MODEL_TYPE -f $MODEL
if [ -z "$GPU_LAYERS" ]; then
/app/turbopilot -t $THREADS -m $MODEL_TYPE -f $MODEL
else
/app/turbopilot -t $THREADS -m $MODEL_TYPE -f $MODEL --ngl $GPU_LAYERS
fi

View File

@ -1,6 +1,7 @@
set(TURBOPILOT_TARGET turbopilot)
find_package(Boost REQUIRED)
find_package(Boost COMPONENTS thread system REQUIRED)
include_directories(${Boost_INCLUDE_DIRS})
add_executable(${TURBOPILOT_TARGET}
@ -16,6 +17,9 @@ add_executable(${TURBOPILOT_TARGET}
../include/turbopilot/starcoder.hpp
)
#set(THREADS_PREFER_PTHREAD_FLAG TRUE)
#find_package(Threads REQUIRED)
target_include_directories(${TURBOPILOT_TARGET} PRIVATE
../include
@ -23,8 +27,6 @@ target_include_directories(${TURBOPILOT_TARGET} PRIVATE
../extern/crow/include
)
#target_compile_features(${TURBOPILOT_TARGET} PRIVATE cxx_std_11)
target_link_libraries(${TURBOPILOT_TARGET} PRIVATE ggml argparse)
#target_link_libraries(${TURBOPILOT_TARGET} PRIVATE spdlog::spdlog_header_only)
target_link_libraries(${TURBOPILOT_TARGET} PRIVATE ggml argparse)

View File

@ -4,6 +4,22 @@
#include <cmath>
#include <random>
void TurbopilotModel::lock(){
this->model_lock.lock();
}
void TurbopilotModel::unlock(){
this->model_lock.unlock();
}
std::stringstream TurbopilotModel::predict(std::string prompt, int max_length, bool include_prompt){
lock();
auto result = predict_impl(prompt, max_length, include_prompt);
unlock();
return result;
}
void llama_nop(struct ggml_tensor * tensor) { // don't offload by default
(void) tensor;
}
@ -163,4 +179,6 @@ gpt_vocab::id gpt_sample_top_k_top_p(
int idx = dist(rng);
return logits_id[idx].second;
}
}

View File

@ -6,6 +6,14 @@
#include <iostream>
#include <fstream>
#ifdef GGML_USE_CLBLAST
#include "ggml-opencl.h"
#endif
#ifdef GGML_USE_CUBLAS
#include "ggml-cuda.h"
#endif
#if defined(_MSC_VER)
#pragma warning(disable: 4244 4267) // possible loss of data
#endif
@ -455,6 +463,9 @@ bool GPTJModel::load_model(std::string fname) {
}
}
// key + value memory
{
const auto & hparams = model->hparams;
@ -553,10 +564,51 @@ bool GPTJModel::load_model(std::string fname) {
fin.close();
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
if(config.n_gpu_layers > 0){
size_t vram_total = 0;
int gpu_layers = std::min(config.n_gpu_layers, model->hparams.n_layer);
spdlog::info("Attempting to offload {} layers to GPU", gpu_layers);
for(int i=0; i < gpu_layers; i++) {
const auto & layer = model->layers[i];
layer.c_attn_q_proj_w->backend = GGML_BACKEND_GPU;
layer.c_attn_k_proj_w->backend = GGML_BACKEND_GPU;
layer.c_attn_v_proj_w->backend = GGML_BACKEND_GPU;
layer.c_attn_proj_w->backend = GGML_BACKEND_GPU;
layer.c_mlp_fc_w->backend = GGML_BACKEND_GPU;
layer.c_mlp_proj_w->backend = GGML_BACKEND_GPU;
#if defined(GGML_USE_CLBLAST)
ggml_cl_transform_tensor(layer.c_attn_q_proj_w->data,layer.c_attn_q_proj_w); vram_total += ggml_nbytes(layer.c_attn_q_proj_w);
ggml_cl_transform_tensor(layer.c_attn_k_proj_w->data,layer.c_attn_k_proj_w); vram_total += ggml_nbytes(layer.c_attn_k_proj_w);
ggml_cl_transform_tensor(layer.c_attn_v_proj_w->data,layer.c_attn_v_proj_w); vram_total += ggml_nbytes(layer.c_attn_v_proj_w);
ggml_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
ggml_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
ggml_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
#else
ggml_cuda_transform_tensor(layer.c_attn_q_proj_w->data,layer.c_attn_q_proj_w); vram_total += ggml_nbytes(layer.c_attn_q_proj_w);
ggml_cuda_transform_tensor(layer.c_attn_k_proj_w->data,layer.c_attn_k_proj_w); vram_total += ggml_nbytes(layer.c_attn_k_proj_w);
ggml_cuda_transform_tensor(layer.c_attn_v_proj_w->data,layer.c_attn_v_proj_w); vram_total += ggml_nbytes(layer.c_attn_v_proj_w);
ggml_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
ggml_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
#endif
}
spdlog::info("{}: [GPU] total VRAM used: {} MB\n", __func__, vram_total / 1024 / 1024);
}
#endif // defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
return true;
}
std::stringstream GPTJModel::predict(std::string prompt, int max_length, bool include_prompt) {
std::stringstream GPTJModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
std::stringstream result;
// tokenize the prompt

View File

@ -99,6 +99,7 @@ bool gpt_neox_eval(
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
// reallocate
buf_size = buf_size_new;
buf = realloc(buf, buf_size);
@ -106,6 +107,8 @@ bool gpt_neox_eval(
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
return false;
}
spdlog::debug("{}: reallocating context buffer {} -> now {} bytes of tokens in prompt = {}", __func__, buf_size, buf_size_new);
}
struct ggml_init_params params = {
@ -291,6 +294,7 @@ bool gpt_neox_eval(
// ggml_graph_print (&gf);
// ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
//}
//embd_w.resize(n_vocab*N);
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
@ -301,7 +305,9 @@ bool gpt_neox_eval(
if (mem_per_token == 0) {
mem_per_token = ggml_used_mem(ctx0)/N;
}
spdlog::debug("used_mem = {}\n", ggml_used_mem(ctx0));
//printf("used_mem = %zu\n", ggml_used_mem(ctx0));
ggml_free(ctx0);
@ -618,8 +624,6 @@ bool GPTNEOXModel::load_model(std::string fname) {
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
printf("inside ggml clblast check\n");
if(config.n_gpu_layers > 0){
size_t vram_total = 0;
@ -647,7 +651,7 @@ bool GPTNEOXModel::load_model(std::string fname) {
#endif
}
fprintf(stderr, "%s: [GPU] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
spdlog::info("{}: [GPU] total VRAM used: {} MB\n", __func__, vram_total / 1024 / 1024);
}
#endif // defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
@ -655,7 +659,7 @@ bool GPTNEOXModel::load_model(std::string fname) {
return true;
}
std::stringstream GPTNEOXModel::predict(std::string prompt, int max_length, bool include_prompt) {
std::stringstream GPTNEOXModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
std::stringstream result;
// tokenize the prompt
@ -674,6 +678,8 @@ std::stringstream GPTNEOXModel::predict(std::string prompt, int max_length, bool
std::vector<gpt_vocab::id> embd;
// determine the required inference memory per token:
size_t mem_per_token = 0;
@ -760,3 +766,4 @@ std::stringstream GPTNEOXModel::predict(std::string prompt, int max_length, bool
return result;
}

View File

@ -67,6 +67,11 @@ int main(int argc, char **argv)
.default_value(0.1f)
.scan<'g', float>();
program.add_argument("-b", "--batch-size")
.help("set batch size for model completion")
.default_value(512)
.scan<'i',int>();
program.add_argument("prompt").remaining();
@ -103,6 +108,7 @@ int main(int argc, char **argv)
config.n_threads = program.get<int>("--threads");
config.temp = program.get<float>("--temperature");
config.top_p = program.get<float>("--top-p");
config.n_batch = program.get<int>("--batch-size");
config.n_gpu_layers = program.get<int>("--ngl");
if(model_type.compare("codegen") == 0) {
@ -129,16 +135,22 @@ int main(int argc, char **argv)
}
t_load_us = ggml_time_us() - t_start_us;
spdlog::info("Loaded model in {:0.2f}ms", t_load_us/1000.0f);
crow::SimpleApp app;
TBPLogger logger;
crow::logger::setHandler(&logger);
CROW_ROUTE(app, "/")([](){
return "Hello world";
});
CROW_ROUTE(app, "/copilot_internal/v2/token")([](){
//return "Hello world";
@ -176,6 +188,8 @@ int main(int argc, char **argv)
app.port(program.get<int>("--port")).multithreaded().run();
free(model);
}

View File

@ -37,7 +37,6 @@ crow::response handle_hf_request(TurbopilotModel *model, const crow::request& re
crow::json::wvalue response = {
{"generated_text", result.str()},
};
crow::response res;

View File

@ -50,13 +50,13 @@ bool starcoder_eval(
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
spdlog::debug("{}: reallocating buffer from {} to {} bytes\n", __func__, buf_size, buf_size_new);
// reallocate
buf_size = buf_size_new;
buf = realloc(buf, buf_size);
if (buf == nullptr) {
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
spdlog::error("{}: failed to allocate {} bytes\n", __func__, buf_size);
return false;
}
}
@ -686,8 +686,6 @@ bool StarcoderModel::load_model(std::string fname) {
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
printf("inside ggml clblast check\n");
if(config.n_gpu_layers > 0){
size_t vram_total = 0;
int gpu_layers = std::min(config.n_gpu_layers, model->hparams.n_layer);
@ -714,7 +712,7 @@ bool StarcoderModel::load_model(std::string fname) {
#endif
}
fprintf(stderr, "%s: [GPU] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
spdlog::info("{}: [GPU] total VRAM used: {} MB\n", __func__, vram_total / 1024 / 1024);
}
#endif // defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
@ -724,7 +722,7 @@ bool StarcoderModel::load_model(std::string fname) {
}
std::stringstream StarcoderModel::predict(std::string prompt, int max_length, bool include_prompt) {
std::stringstream StarcoderModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
std::stringstream result;
// tokenize the prompt

10
test.txt Normal file
View File

@ -0,0 +1,10 @@
#%%
import os
import cats

65
test_codegen2.py Normal file
View File

@ -0,0 +1,65 @@
#%%
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen2-1B")
model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen2-1B", trust_remote_code=True, revision="main")
#%%
model = model.to(device="cuda")
#%%
text = """
import os
def post_to_pastebin"""
input_ids = tokenizer(text, return_tensors="pt").to("cuda").input_ids
generated_ids = model.generate(input_ids, max_length=512)
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
# %%
def format_model_input(prefix, suffix):
return prefix + "<mask_1>" + suffix + "<|endoftext|>" + "<sep>" + "<mask_1>"
prefix = """
import os
def post_to_pastebin"""
suffix = "result = post_to_pastebin(content)"
text = format_model_input(prefix, suffix)
input_ids = tokenizer(text, return_tensors="pt").to("cuda").input_ids
generated_ids = model.generate(input_ids, max_length=128)
print(tokenizer.decode(generated_ids[0], skip_special_tokens=False))
# %%
def main():
text = """
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
if __name__ == '__main__':
main()
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
# %%
import os
def post_to_pastebin"""
input_ids = tokenizer(text, return_tensors="pt").to("cuda").input_ids
generated_ids = model.generate(input_ids, max_length=512)
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
# %%
def post_to_pastebin(content):
input_ids = tokenizer(content, return_tensors="pt").to("cuda").input_ids
generated_ids = model.generate(input_ids, max_length=512)
return tokenizer.decode(generated_ids[0], skip_special_tokens=True)

45
test_santa.py Normal file
View File

@ -0,0 +1,45 @@
#%%
import torch
from transformers import CodeGenTokenizer, GPTJForCausalLM
checkpoint = "/home/james/workspace/rafael-llm/codegen-2B-multi-gptj"
device = "cuda" # for GPU usage or "cpu" for CPU usage
tokenizer = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-multi")
model = GPTJForCausalLM.from_pretrained(checkpoint).to(device)
#model = AutoModel.from_pretrained(checkpoint, trust_remote_code=True).to(device)
#%%
# define the user model
class User:
# %%
code = """import os
import requests
#send the json data to pastebin
def send_data"""
inputs = tokenizer.encode(code, return_tensors="pt").to(device)
outputs = model.generate(inputs, max_length=200)
response = tokenizer.decode(outputs[0])
print(response)
import requests
#send the json data to pastebin
def send_data(data):
url = "http://pastebin.com/api_post.php"
data = {"api_dev_key": "<api_key>", "api_user_key": "<user_key>", "api_content": data}
response = requests.post(url, data=data).text
return response
# %%
code
# %%

94
turbopilot.code-workspace Normal file
View File

@ -0,0 +1,94 @@
{
"folders": [
{
"path": "."
},
{
"path": "extern/ggml"
},
{
"path": "../../pymicrocosm"
}
],
"settings": {
"files.associations": {
"array": "cpp",
"atomic": "cpp",
"bit": "cpp",
"*.tcc": "cpp",
"bitset": "cpp",
"cctype": "cpp",
"chrono": "cpp",
"clocale": "cpp",
"cmath": "cpp",
"compare": "cpp",
"concepts": "cpp",
"cstdint": "cpp",
"cstdio": "cpp",
"cstdlib": "cpp",
"cstring": "cpp",
"ctime": "cpp",
"cwchar": "cpp",
"cwctype": "cpp",
"deque": "cpp",
"map": "cpp",
"unordered_map": "cpp",
"vector": "cpp",
"exception": "cpp",
"fstream": "cpp",
"functional": "cpp",
"initializer_list": "cpp",
"iosfwd": "cpp",
"istream": "cpp",
"limits": "cpp",
"memory": "cpp",
"new": "cpp",
"numbers": "cpp",
"numeric": "cpp",
"ostream": "cpp",
"ratio": "cpp",
"regex": "cpp",
"semaphore": "cpp",
"sstream": "cpp",
"stdexcept": "cpp",
"stop_token": "cpp",
"streambuf": "cpp",
"string": "cpp",
"string_view": "cpp",
"system_error": "cpp",
"thread": "cpp",
"type_traits": "cpp",
"tuple": "cpp",
"typeinfo": "cpp",
"utility": "cpp",
"csignal": "cpp",
"cstdarg": "cpp",
"cstddef": "cpp",
"any": "cpp",
"strstream": "cpp",
"charconv": "cpp",
"cinttypes": "cpp",
"codecvt": "cpp",
"complex": "cpp",
"condition_variable": "cpp",
"coroutine": "cpp",
"list": "cpp",
"set": "cpp",
"algorithm": "cpp",
"iterator": "cpp",
"memory_resource": "cpp",
"optional": "cpp",
"random": "cpp",
"source_location": "cpp",
"future": "cpp",
"iomanip": "cpp",
"iostream": "cpp",
"mutex": "cpp",
"span": "cpp",
"cfenv": "cpp",
"typeindex": "cpp",
"variant": "cpp",
"unordered_set": "cpp"
}
}
}