mirror of
https://github.com/ravenscroftj/turbopilot.git
synced 2024-10-01 01:06:01 -04:00
Compare commits
129 Commits
eacb0d59c2
...
3577858ef7
Author | SHA1 | Date | |
---|---|---|---|
|
3577858ef7 | ||
|
0bc30c3923 | ||
|
eaeb52fcb0 | ||
|
83cb7c042f | ||
|
8fd357e0a5 | ||
|
30c437700a | ||
|
86f07745bb | ||
|
7bb93b6f4e | ||
|
2b27760a7f | ||
|
a00de2a332 | ||
|
215a69b5af | ||
|
91639b8fc0 | ||
|
0b408510f4 | ||
|
604183380d | ||
|
88683abe50 | ||
|
326e76c9bb | ||
|
23c0a3d19e | ||
|
31bb33c731 | ||
|
d4989b543c | ||
|
e9dc6a304a | ||
|
97a0377cd6 | ||
|
6d26c9b064 | ||
|
63b554793d | ||
|
0cf7a9c341 | ||
|
356a83c5fd | ||
|
a5517b0fcd | ||
|
8fa70e1518 | ||
|
b79ab46b50 | ||
|
4a47251822 | ||
|
b2b4a1480f | ||
|
5f7155a314 | ||
|
77cde95cb9 | ||
|
bea7ebdb34 | ||
|
812bbea9d7 | ||
|
08e8834390 | ||
|
25680e64d8 | ||
|
dca25d8456 | ||
|
1f6f84a783 | ||
|
0183b30502 | ||
|
b465eae818 | ||
|
e8adff5339 | ||
|
f12dacaa15 | ||
|
0b0b914f92 | ||
|
b21dd0799d | ||
|
c73c196364 | ||
|
30834e3121 | ||
|
39c3182a3a | ||
|
2abdcabf02 | ||
|
6877542ad8 | ||
|
5b561f7b7e | ||
|
e85492d8ba | ||
|
f840ea0b73 | ||
|
3e37c4bb7c | ||
|
20b1460bd8 | ||
|
2308b9ae21 | ||
|
c4e57e0aab | ||
|
dc81abbc52 | ||
|
ae2d505a2f | ||
|
143155dac3 | ||
|
596c835939 | ||
|
6b0a25cb71 | ||
|
227501188c | ||
|
f69a8f65d4 | ||
|
22f2993db4 | ||
|
e8beac34e7 | ||
|
ccf425f019 | ||
|
cceee41f79 | ||
|
113544400a | ||
|
f0627cd567 | ||
|
2d617b458e | ||
|
0c1fc1a04e | ||
|
e0adf0519b | ||
|
ef1402a1a8 | ||
|
c164deb042 | ||
|
b3d8d996a0 | ||
|
1e14d91bc3 | ||
|
cf7c5285e6 | ||
|
11f385066a | ||
|
6a72e32dab | ||
|
bad53ad190 | ||
|
cd9baacdd7 | ||
|
6d90e5d870 | ||
|
88c5ede768 | ||
|
1e0d2d3434 | ||
|
757cc64a59 | ||
|
5f5e9f90be | ||
|
68760434b2 | ||
|
364168dda7 | ||
|
f818e2d09f | ||
|
8be7171573 | ||
|
2f0e2dee94 | ||
|
c0cde10046 | ||
|
12a3fe9367 | ||
|
de68ab022c | ||
|
f7f1991e2c | ||
|
6ee2d3dc66 | ||
|
18faa3e5f6 | ||
|
84869ff0f3 | ||
|
d618fb1aec | ||
|
66270d3a9b | ||
|
c7b4fcddba | ||
|
14050e15ba | ||
|
815ae3a088 | ||
|
5e8773f366 | ||
|
b14f275bf6 | ||
|
225faefd2c | ||
|
7266451f44 | ||
|
57ab06d457 | ||
|
430733c7b8 | ||
|
f5c0bbcb3f | ||
|
506694db16 | ||
|
0f949020c1 | ||
|
c48cd786ed | ||
|
21b3fa5415 | ||
|
b3a4feea81 | ||
|
7dee51782d | ||
|
00ec442860 | ||
|
3f3bff73b6 | ||
|
0e17c13f86 | ||
|
a679a63677 | ||
|
2874e534ef | ||
|
b0a01071d6 | ||
|
dfa4b5e74f | ||
|
7eb10b225e | ||
|
fd3a127aaa | ||
|
887d348188 | ||
|
392b212304 | ||
|
20784a9210 | ||
|
6e01cd9825 |
@ -1 +1,4 @@
|
||||
ggml/build
|
||||
build
|
||||
models
|
||||
assets
|
||||
|
61
.github/workflows/build-commit.yml
vendored
61
.github/workflows/build-commit.yml
vendored
@ -25,18 +25,14 @@ jobs:
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
run: |
|
||||
brew update
|
||||
brew install cmake boost
|
||||
brew install cmake boost asio
|
||||
- name: Build
|
||||
id: make_build
|
||||
run: |
|
||||
cd ${{github.workspace}}/ggml
|
||||
cmake -B ${{github.workspace}}/ggml/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DCMAKE_OSX_ARCHITECTURES="arm64;x86_64"
|
||||
cd ${{github.workspace}}/ggml/build
|
||||
make codegen codegen-serve codegen-quantize
|
||||
chmod +x ${{github.workspace}}/ggml/build/bin/codegen
|
||||
chmod +x ${{github.workspace}}/ggml/build/bin/codegen-serve
|
||||
chmod +x ${{github.workspace}}/ggml/build/bin/codegen-quantize
|
||||
cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} -DCMAKE_OSX_ARCHITECTURES="arm64;x86_64"
|
||||
cd ${{github.workspace}}/build
|
||||
make
|
||||
chmod +x ${{github.workspace}}/build/bin/turbopilot
|
||||
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
id: short-sha
|
||||
@ -50,22 +46,22 @@ jobs:
|
||||
# Artifact name
|
||||
name: turbopilot-${{ runner.os }}-${{ runner.arch }}-${{ steps.short-sha.outputs.sha }} # optional, default is artifact
|
||||
# A file, directory or wildcard pattern that describes what to upload
|
||||
path: ${{github.workspace}}/ggml/build/bin/codegen*
|
||||
path: ${{github.workspace}}/build/bin/turbopilot
|
||||
# The desired behavior if no files are found using the provided path.
|
||||
|
||||
- name: package artifacts for release
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
|
||||
run: |
|
||||
cd ${{github.workspace}}/ggml/build/bin
|
||||
zip turbopilot-${{ runner.os }}-${{ runner.arch }}.zip ./codegen*
|
||||
cd ${{github.workspace}}/build/bin
|
||||
zip turbopilot-${{ runner.os }}-${{ runner.arch }}.zip ./turbopilot
|
||||
|
||||
- name: Upload binaries to release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
files: ${{github.workspace}}/ggml/build/bin/turbopilot-${{ runner.os }}-${{ runner.arch }}.zip
|
||||
files: ${{github.workspace}}/build/bin/turbopilot-${{ runner.os }}-${{ runner.arch }}.zip
|
||||
|
||||
|
||||
|
||||
@ -95,7 +91,7 @@ jobs:
|
||||
submodules: true
|
||||
|
||||
- name: Install Dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -yq libboost-dev
|
||||
run: sudo apt-get update && sudo apt-get install -yq libboost-dev libasio-dev libboost-thread-dev
|
||||
|
||||
- name: Install OpenBlas
|
||||
if: ${{ matrix.build == 'avx2-openblas' }}
|
||||
@ -112,13 +108,10 @@ jobs:
|
||||
# Configure CMake in a 'build' subdirectory. `CMAKE_BUILD_TYPE` is only required if you are using a single-configuration generator such as make.
|
||||
# See https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html?highlight=cmake_build_type
|
||||
run: |
|
||||
cd ${{github.workspace}}/ggml
|
||||
cmake -B ${{github.workspace}}/ggml/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} ${{ matrix.defines }}
|
||||
cd ${{github.workspace}}/ggml/build
|
||||
make codegen codegen-serve codegen-quantize
|
||||
chmod +x ${{github.workspace}}/ggml/build/bin/codegen
|
||||
chmod +x ${{github.workspace}}/ggml/build/bin/codegen-serve
|
||||
chmod +x ${{github.workspace}}/ggml/build/bin/codegen-quantize
|
||||
cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} ${{ matrix.defines }}
|
||||
cd ${{github.workspace}}/build
|
||||
make
|
||||
chmod +x ${{github.workspace}}/build/bin/turbopilot
|
||||
|
||||
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
@ -133,15 +126,15 @@ jobs:
|
||||
# Artifact name
|
||||
name: turbopilot-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build }}-${{ steps.short-sha.outputs.sha }} # optional, default is artifact
|
||||
# A file, directory or wildcard pattern that describes what to upload
|
||||
path: ${{github.workspace}}/ggml/build/bin/codegen*
|
||||
path: ${{github.workspace}}/build/bin/turbopilot
|
||||
# The desired behavior if no files are found using the provided path.
|
||||
|
||||
- name: package artifacts for release
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
|
||||
run: |
|
||||
cd ${{github.workspace}}/ggml/build/bin
|
||||
zip turbopilot-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build }}.zip ./codegen*
|
||||
cd ${{github.workspace}}/build/bin
|
||||
zip turbopilot-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build }}.zip ./turbopilot
|
||||
|
||||
|
||||
- name: Upload binaries to release
|
||||
@ -149,7 +142,7 @@ jobs:
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
files: ${{github.workspace}}/ggml/build/bin/turbopilot-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build }}.zip
|
||||
files: ${{github.workspace}}/build/bin/turbopilot-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build }}.zip
|
||||
|
||||
windows-latest-cmake:
|
||||
runs-on: windows-latest
|
||||
@ -213,16 +206,16 @@ jobs:
|
||||
$msvc = $(join-path $vcdir $('VC\Tools\MSVC\'+$(gc -raw $(join-path $vcdir 'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt')).Trim()))
|
||||
$lib = $(join-path $msvc 'bin\Hostx64\x64\lib.exe')
|
||||
& $lib /machine:x64 "/def:${env:RUNNER_TEMP}/openblas/lib/libopenblas.def" "/out:${env:RUNNER_TEMP}/openblas/lib/openblas.lib" /name:openblas.dll
|
||||
|
||||
- name: Build
|
||||
id: cmake_build
|
||||
env:
|
||||
BOOST_ROOT: ${{ steps.install-boost.outputs.BOOST_ROOT }}
|
||||
run: |
|
||||
cd ${{github.workspace}}/ggml
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. ${{ matrix.defines }}
|
||||
cmake --build . --config Release --target codegen codegen-serve codegen-quantize
|
||||
cmake .. ${{ matrix.defines }} -DBoost_LIBRARYDIRS=${{ steps.install-boost.outputs.BOOST_ROOT }}/lib
|
||||
cmake --build . --config Release --target turbopilot
|
||||
|
||||
# - name: Add libopenblas.dll
|
||||
# id: add_libopenblas_dll
|
||||
@ -244,11 +237,6 @@ jobs:
|
||||
& $cl /O2 /GS- /kernel avx512f.c /link /nodefaultlib /entry:main
|
||||
.\avx512f.exe && echo "AVX512F: YES" && ( echo HAS_AVX512F=1 >> $env:GITHUB_ENV ) || echo "AVX512F: NO"
|
||||
|
||||
- uses: benjlevesque/short-sha@v2.2
|
||||
id: short-sha
|
||||
with:
|
||||
length: 6
|
||||
|
||||
|
||||
- name: Upload Build Artifacts
|
||||
uses: actions/upload-artifact@v3.1.2
|
||||
@ -256,15 +244,14 @@ jobs:
|
||||
# Artifact name
|
||||
name: turbopilot-${{ runner.os }}-${{ runner.arch }}-${{ steps.short-sha.outputs.sha }}-${{ matrix.build }} # optional, default is artifact
|
||||
# A file, directory or wildcard pattern that describes what to upload
|
||||
path: ${{github.workspace}}\\ggml\\build\\bin\\Release\\codegen*
|
||||
path: ${{github.workspace}}\\build\\bin\\Release\\turbopilot.exe
|
||||
# The desired behavior if no files are found using the provided path.
|
||||
|
||||
- name: package artifacts for release
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
|
||||
run: |
|
||||
cd ${{github.workspace}}\ggml\build\bin\\Release\
|
||||
7z a ${{github.workspace}}\ggml\build\bin\Release\turbopilot-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build }}.zip ./codegen*
|
||||
7z a ${{github.workspace}}\build\bin\Release\turbopilot-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build }}.zip ${{github.workspace}}\\build\\bin\\Release\\turbopilot.exe
|
||||
|
||||
|
||||
- name: Convert backslashes
|
||||
@ -280,4 +267,4 @@ jobs:
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
files: ${{ steps.convert_backslashes.outputs.converted_path }}/ggml/build/bin/Release/turbopilot-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build }}.zip
|
||||
files: ${{ steps.convert_backslashes.outputs.converted_path }}/build/bin/Release/turbopilot-${{ runner.os }}-${{ runner.arch }}-${{ matrix.build }}.zip
|
80
.github/workflows/docker-image.yml
vendored
80
.github/workflows/docker-image.yml
vendored
@ -16,8 +16,52 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- {tag: "", dockerfile: "./Dockerfile.default"}
|
||||
- {tag: "-cuda", dockerfile: "./Dockerfile.cuda"}
|
||||
- tag:
|
||||
dockerfile: ./Dockerfile.default
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build_base: ubuntu:22.04
|
||||
runtime_base: ubuntu:22.04
|
||||
|
||||
|
||||
- tag: -openblas
|
||||
dockerfile: ./Dockerfile.default
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build_base: ubuntu:22.04
|
||||
runtime_base: ubuntu:22.04
|
||||
extra_deps: libopenblas-dev
|
||||
cmake_args: -DGGML_OPENBLAS=On
|
||||
|
||||
|
||||
- tag: -cuda11-7
|
||||
dockerfile: ./Dockerfile.default
|
||||
platforms: linux/amd64
|
||||
build_base: nvidia/cuda:11.7.1-devel-ubuntu22.04
|
||||
runtime_base: nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu22.04
|
||||
cmake_args: -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc
|
||||
|
||||
- tag: -cuda12-0
|
||||
dockerfile: ./Dockerfile.default
|
||||
platforms: linux/amd64
|
||||
build_base: nvidia/cuda:12.0.0-devel-ubuntu22.04
|
||||
runtime_base: nvidia/cuda:12.0.0-runtime-ubuntu22.04
|
||||
cmake_args: -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc
|
||||
|
||||
- tag: -cuda12-2
|
||||
dockerfile: ./Dockerfile.default
|
||||
platforms: linux/amd64
|
||||
build_base: nvidia/cuda:12.2.0-devel-ubuntu22.04
|
||||
runtime_base: nvidia/cuda:12.2.0-runtime-ubuntu22.04
|
||||
cmake_args: -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc
|
||||
|
||||
# - tag: -clblast
|
||||
# dockerfile: ./Dockerfile.default
|
||||
# platforms: linux/amd64
|
||||
# build_base: ubuntu:22.04
|
||||
# runtime_base: ubuntu:22.04
|
||||
# runtime_deps: libclblast1
|
||||
# extra_deps: libclblast-dev
|
||||
# cmake_args: -DGGML_CLBLAST=On
|
||||
|
||||
|
||||
steps:
|
||||
|
||||
@ -44,17 +88,23 @@ jobs:
|
||||
password: ${{ secrets.GH_TOKEN }}
|
||||
|
||||
- name: Build and push incremental
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/build-push-action@v4.1.1
|
||||
if: (!startsWith(github.ref, 'refs/tags/'))
|
||||
with:
|
||||
file: ${{matrix.config.dockerfile}}
|
||||
push: true
|
||||
tags: ghcr.io/ravenscroftj/turbopilot:nightly${{matrix.config.tag}}-${{ github.sha }}
|
||||
context: ${{github.workspace}}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: ${{matrix.config.platforms}}
|
||||
build-args: |
|
||||
EXTRA_DEPS=${{matrix.config.extra_deps}}
|
||||
CMAKE_ARGS=${{matrix.config.cmake_args}}
|
||||
BUILD_BASE=${{matrix.config.build_base}}
|
||||
RUNTIME_BASE=${{matrix.config.runtime_base}}
|
||||
RUNTIME_DEPS=${{matrix.config.runtime_deps}}
|
||||
|
||||
|
||||
- name: Build and push release
|
||||
- name: Build and push release (Main Latest Build)
|
||||
uses: docker/build-push-action@v4
|
||||
if: startsWith(github.ref, 'refs/tags/') && matrix.config.tag == ''
|
||||
with:
|
||||
@ -62,15 +112,27 @@ jobs:
|
||||
push: true
|
||||
tags: ghcr.io/ravenscroftj/turbopilot:${{ github.ref_name }}, ghcr.io/ravenscroftj/turbopilot:latest
|
||||
context: ${{github.workspace}}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: ${{matrix.config.platforms}}
|
||||
build-args: |
|
||||
EXTRA_DEPS=${{matrix.config.extra_deps}}
|
||||
CMAKE_ARGS=${{matrix.config.cmake_args}}
|
||||
BUILD_BASE=${{matrix.config.build_base}}
|
||||
RUNTIME_BASE=${{matrix.config.runtime_base}}
|
||||
RUNTIME_DEPS=${{matrix.config.runtime_deps}}
|
||||
|
||||
|
||||
- name: Build and push release (CUDA)
|
||||
- name: Build and push release (Accelerated Builds)
|
||||
uses: docker/build-push-action@v4
|
||||
if: startsWith(github.ref, 'refs/tags/') && matrix.config.tag != ''
|
||||
with:
|
||||
file: ${{matrix.config.dockerfile}}
|
||||
push: true
|
||||
tags: ghcr.io/ravenscroftj/turbopilot:${{ github.ref_name }}
|
||||
tags: ghcr.io/ravenscroftj/turbopilot:${{ github.ref_name }}${{matrix.config.tag}}
|
||||
context: ${{github.workspace}}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: ${{matrix.config.platforms}}
|
||||
build-args: |
|
||||
EXTRA_DEPS=${{matrix.config.extra_deps}}
|
||||
CMAKE_ARGS=${{matrix.config.cmake_args}}
|
||||
BUILD_BASE=${{matrix.config.build_base}}
|
||||
RUNTIME_BASE=${{matrix.config.runtime_base}}
|
||||
RUNTIME_DEPS=${{matrix.config.runtime_deps}}
|
||||
|
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
build/
|
||||
models/
|
13
.gitmodules
vendored
13
.gitmodules
vendored
@ -1,3 +1,12 @@
|
||||
[submodule "ggml"]
|
||||
path = ggml
|
||||
url = git@github.com:ravenscroftj/ggml.git
|
||||
path = extern/ggml
|
||||
url = git@github.com:ggerganov/ggml.git
|
||||
[submodule "extern/argparse"]
|
||||
path = extern/argparse
|
||||
url = https://github.com/p-ranav/argparse.git
|
||||
[submodule "extern/sbdlog"]
|
||||
path = extern/spdlog
|
||||
url = https://github.com/gabime/spdlog.git
|
||||
[submodule "extern/ggml"]
|
||||
path = extern/ggml
|
||||
url = https://github.com/ggerganov/ggml
|
||||
|
20
.vscode/c_cpp_properties.json
vendored
Normal file
20
.vscode/c_cpp_properties.json
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
{
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Linux",
|
||||
"includePath": [
|
||||
"${workspaceFolder}/**",
|
||||
"${workspaceFolder}/extern/crow/include",
|
||||
"${workspaceFolder}/include",
|
||||
"${workspaceFolder}/include"
|
||||
],
|
||||
"defines": [],
|
||||
"compilerPath": "/usr/bin/gcc",
|
||||
"cStandard": "c17",
|
||||
"cppStandard": "gnu++17",
|
||||
"intelliSenseMode": "linux-gcc-x64",
|
||||
"configurationProvider": "ms-vscode.cmake-tools"
|
||||
}
|
||||
],
|
||||
"version": 4
|
||||
}
|
70
.vscode/launch.json
vendored
Normal file
70
.vscode/launch.json
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "(gdb) Launch TBP",
|
||||
"type": "cppdbg",
|
||||
"request": "launch",
|
||||
"program": "/home/james/workspace/rafael-llm/turbopilot/build/bin/turbopilot",
|
||||
"args": [
|
||||
//TBP ARGS
|
||||
"-v",
|
||||
"-f",
|
||||
"/home/james/Downloads/replit-code-v1-3b-q4_0.bin",
|
||||
"-m",
|
||||
"replit",
|
||||
],
|
||||
"stopAtEntry": false,
|
||||
"cwd": "${workspaceFolder}",
|
||||
"environment": [],
|
||||
"externalConsole": false,
|
||||
"MIMode": "gdb",
|
||||
"setupCommands": [
|
||||
{
|
||||
"description": "Enable pretty-printing for gdb",
|
||||
"text": "-enable-pretty-printing",
|
||||
"ignoreFailures": true
|
||||
},
|
||||
{
|
||||
"description": "Set Disassembly Flavor to Intel",
|
||||
"text": "-gdb-set disassembly-flavor intel",
|
||||
"ignoreFailures": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "(gdb) Launch Replut",
|
||||
"type": "cppdbg",
|
||||
"request": "launch",
|
||||
"program": "/home/james/workspace/rafael-llm/turbopilot/extern/ggml/build/bin/replit",
|
||||
"args": [
|
||||
// REPLIT ARGS
|
||||
"-m",
|
||||
"/home/james/Downloads/replit-code-v1-3b-q4_0.bin",
|
||||
"-f",
|
||||
"/home/james/workspace/rafael-llm/turbopilot/test.txt"
|
||||
],
|
||||
"stopAtEntry": false,
|
||||
"cwd": "${workspaceFolder}",
|
||||
"environment": [],
|
||||
"externalConsole": false,
|
||||
"MIMode": "gdb",
|
||||
"setupCommands": [
|
||||
{
|
||||
"description": "Enable pretty-printing for gdb",
|
||||
"text": "-enable-pretty-printing",
|
||||
"ignoreFailures": true
|
||||
},
|
||||
{
|
||||
"description": "Set Disassembly Flavor to Intel",
|
||||
"text": "-gdb-set disassembly-flavor intel",
|
||||
"ignoreFailures": true
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
]
|
||||
}
|
28
.vscode/tasks.json
vendored
Normal file
28
.vscode/tasks.json
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
{
|
||||
"tasks": [
|
||||
{
|
||||
"type": "cppbuild",
|
||||
"label": "C/C++: g++ build active file",
|
||||
"command": "/usr/bin/g++",
|
||||
"args": [
|
||||
"-fdiagnostics-color=always",
|
||||
"-g",
|
||||
"${file}",
|
||||
"-o",
|
||||
"${fileDirname}/${fileBasenameNoExtension}"
|
||||
],
|
||||
"options": {
|
||||
"cwd": "${fileDirname}"
|
||||
},
|
||||
"problemMatcher": [
|
||||
"$gcc"
|
||||
],
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
"detail": "Task generated by Debugger."
|
||||
}
|
||||
],
|
||||
"version": "2.0.0"
|
||||
}
|
14
BUILD.md
14
BUILD.md
@ -32,25 +32,19 @@ Make sure the ggml subproject is checked out with `git submodule init` and `git
|
||||
Configure cmake to build the project with the following:
|
||||
|
||||
```bash
|
||||
mkdir ggml/build
|
||||
cd ggml/build
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
```
|
||||
|
||||
If you are running on linux you can optionally compile a static build with `cmake -D CMAKE_EXE_LINKER_FLAGS="-static" ..` which should make your binary portable across different flavours of the OS.
|
||||
|
||||
From here you can now build the components that make up turbopilot:
|
||||
From here you can now build the components that make up turbopilot by running:
|
||||
|
||||
```bash
|
||||
make codegen codegen-quantize codegen-serve
|
||||
make
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- *codegen* is a command line tool for testing out prompts in a lightweight way (a lot like llama.cpp)
|
||||
- *codegen-serve* is the actual REST server that can be used to connect to VSCode
|
||||
- *codegen-quantize* is the tool for quantizing models exported by the conversion script. For more details see [Converting and Quantizing The Models](https://github.com/ravenscroftj/turbopilot/wiki/Converting-and-Quantizing-The-Models).
|
||||
|
||||
### Building with OpenBLAS
|
||||
|
||||
|
||||
|
73
CMakeLists.txt
Normal file
73
CMakeLists.txt
Normal file
@ -0,0 +1,73 @@
|
||||
cmake_minimum_required (VERSION 3.0)
|
||||
project(turbopilot VERSION 0.1.0)
|
||||
set(CMAKE_CXX_STANDARD 11)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED true)
|
||||
set(CMAKE_C_STANDARD 11)
|
||||
set(CMAKE_C_STANDARD_REQUIRED true)
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
|
||||
# option(BUILD_SHARED_LIBS "Build using shared libraries" OFF)
|
||||
|
||||
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS "on")
|
||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
||||
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib")
|
||||
|
||||
option(GGML_CLBLAST "ggml: use clBLAST" OFF)
|
||||
option(GGML_CUBLAS "ggml: use cuBLAS" OFF)
|
||||
|
||||
|
||||
|
||||
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
|
||||
message(STATUS "ARM detected")
|
||||
if (MSVC)
|
||||
# TODO: arm msvc?
|
||||
message(STATUS "ARM+MSVC= :( ")
|
||||
else()
|
||||
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv6")
|
||||
# Raspberry Pi 1, Zero
|
||||
add_compile_options(-mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access)
|
||||
endif()
|
||||
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv7")
|
||||
# Raspberry Pi 2
|
||||
add_compile_options(-mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations)
|
||||
endif()
|
||||
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv8")
|
||||
# Raspberry Pi 3, 4, Zero 2 (32-bit)
|
||||
add_compile_options(-mfp16-format=ieee -mno-unaligned-access)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
add_subdirectory(extern/ggml)
|
||||
add_subdirectory(extern/argparse)
|
||||
add_subdirectory(extern/spdlog)
|
||||
|
||||
if (GGML_STATIC)
|
||||
SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
|
||||
SET(BUILD_SHARED_LIBS OFF)
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "-static")
|
||||
endif()
|
||||
|
||||
if (GGML_CUBLAS)
|
||||
cmake_minimum_required(VERSION 3.17)
|
||||
|
||||
find_package(CUDAToolkit)
|
||||
if (CUDAToolkit_FOUND)
|
||||
add_compile_definitions(GGML_USE_CUBLAS)
|
||||
else()
|
||||
message(WARNING "cuBLAS not found")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
add_subdirectory(src)
|
||||
|
||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
@ -1,31 +0,0 @@
|
||||
FROM nvidia/cuda:11.7.1-devel-ubuntu22.04 AS build
|
||||
|
||||
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev
|
||||
|
||||
ADD ./ggml /build/ggml
|
||||
|
||||
RUN mkdir /build/ggml/build
|
||||
|
||||
WORKDIR /build/ggml/build
|
||||
|
||||
RUN cmake -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc ..
|
||||
RUN make codegen-serve
|
||||
|
||||
FROM nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu22.04 AS runtime
|
||||
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=build /build/ggml/build/bin/codegen-serve /app/codegen-serve
|
||||
|
||||
ENV THREADS=4
|
||||
|
||||
ENV MODEL="/models/codegen-2B-multi-ggml-4bit-quant.bin"
|
||||
|
||||
ENV BATCHSIZE=64
|
||||
|
||||
COPY ./run.sh /app/
|
||||
|
||||
EXPOSE 18080
|
||||
|
||||
CMD /app/run.sh
|
@ -1,22 +1,42 @@
|
||||
FROM alpine AS build
|
||||
ARG BUILD_BASE="ubuntu:22.04"
|
||||
ARG RUNTIME_BASE="ubuntu:22.04"
|
||||
|
||||
RUN apk add --update alpine-sdk boost-dev cmake
|
||||
FROM ${BUILD_BASE} AS build
|
||||
|
||||
ADD ./ggml /build/ggml
|
||||
ARG EXTRA_DEPS=""
|
||||
ARG CMAKE_ARGS=""
|
||||
|
||||
RUN mkdir /build/ggml/build
|
||||
RUN echo "CMAKE_ARGS: ${CMAKE_ARGS}"
|
||||
RUN echo "EXTRA_DEPS: ${EXTRA_DEPS}"
|
||||
|
||||
WORKDIR /build/ggml/build
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN cmake -D GGML_STATIC=ON ..
|
||||
RUN make codegen-serve
|
||||
# inlude kitware apt repo to allow us to grab latest cmake
|
||||
RUN apt-get update && apt-get install -y ca-certificates gpg wget
|
||||
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null
|
||||
RUN echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ jammy main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null
|
||||
|
||||
FROM alpine AS runtime
|
||||
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev libboost-thread-dev ${EXTRA_DEPS}
|
||||
|
||||
ADD ./ /turbopilot
|
||||
|
||||
RUN mkdir /turbopilot/build
|
||||
|
||||
WORKDIR /turbopilot/build
|
||||
|
||||
RUN cmake .. ${CMAKE_ARGS}
|
||||
RUN make turbopilot
|
||||
|
||||
FROM ${RUNTIME_BASE} AS runtime
|
||||
|
||||
ARG RUNTIME_DEPS=""
|
||||
|
||||
RUN if [[ -z "${RUNTIME_DEPS}" ]] ; then echo "No runtime libs required" ; else apt-get update && apt-get install -y ${RUNTIME_DEPS} ; fi
|
||||
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=build /build/ggml/build/bin/codegen-serve /app/codegen-serve
|
||||
COPY --from=build /turbopilot/build/bin/turbopilot /app/turbopilot
|
||||
|
||||
ENV THREADS=4
|
||||
|
||||
|
80
MODELS.md
Normal file
80
MODELS.md
Normal file
@ -0,0 +1,80 @@
|
||||
# Models Directory
|
||||
|
||||
## StableCode Instruct State-of-the-art for low Spec machines(Released 8th August 2023)
|
||||
|
||||
[StableCode](https://stability.ai/blog/stablecode-llm-generative-ai-coding) Instruct is a new model from [Stability.ai](https://stability.ai/) which provides reasonable autocomplete suggestions in approx 3GiB of RAM.
|
||||
|
||||
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|
||||
|---------------------|-----------------|-----------------|-----------------|
|
||||
| StableCode | ~3GiB | [:arrow_down:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/resolve/main/stablecode-instruct-alpha-3b.ggmlv1.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/) |
|
||||
|
||||
To run in Turbopilot set model type `-m stablecode`
|
||||
|
||||
## "Coder" family models
|
||||
|
||||
WizardCoder, StarCoder and SantaCoder are current "state-of-the-art" autocomplete models
|
||||
|
||||
### SantaCoder (Small Model, Reasonable on lower spec machines - Released 13/4/2023)
|
||||
|
||||
[SantaCoder](https://huggingface.co/bigcode/santacoder) is a smaller version of the StarCoder and WizardCoder family with only 1.1 Billion parameters. The model is trained with fill-in-the-middle objective allowing it to be used to auto-complete function parameters.
|
||||
|
||||
This model is primarily trained on Python, Java and Javscript.
|
||||
|
||||
|
||||
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|
||||
|---------------------|-----------------|-----------------|-----------------|
|
||||
| SantaCoder | ~2GiB | [:arrow_down:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/resolve/main/santacoder-q4_0.bin) | [:hugs:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/) |
|
||||
|
||||
To run in Turbopilot set model type `-m starcoder`
|
||||
|
||||
|
||||
### WizardCoder 15B Best Autocomplete Performance, Compute-Hungry (Released 15/6/2023)
|
||||
|
||||
[WizardCoder](https://github.com/nlpxucan/WizardLM/tree/main/WizardCoder) is the current SOTA auto complete model, it is an updated version of StarCoder that achieves 57.1 pass@1 on HumanEval benchmarks (essentially in 57% of cases it correctly solves a given challenge. Read more about how this metric works in the scientific paper [here](https://arxiv.org/pdf/2107.03374.pdf) ).
|
||||
|
||||
Even when quantized, WizardCoder is a large model that takes up a significant amount of RAM.
|
||||
|
||||
|
||||
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|
||||
|---------------------|-----------------|-----------------|-----------------|
|
||||
| WizardCoder | ~12GiB | [:arrow_down:](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/resolve/main/WizardCoder-15B-1.0.ggmlv3.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/) |
|
||||
|
||||
To run in Turbopilot set model type `-m wizardcoder`
|
||||
|
||||
|
||||
### StarCoder (Released 4/5/2023)
|
||||
|
||||
[StarCoder](https://huggingface.co/blog/starcoder) held the previous title of state-of-the-art coding model back in May 2023. It is still a reasonably good model by comparison but it is a similar size and has similar RAM and compute requirements to WizardCoder so you may be better off just running that. Links below provided for posterity.
|
||||
|
||||
|
||||
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|
||||
|---------------------|-----------------|------------------|-----------------|
|
||||
| StarCoder | ~12GiB | [:arrow_down:](https://huggingface.co/NeoDim/starcoder-GGML/resolve/main/starcoder-ggml-q4_0.bin) | [:hugs:](https://huggingface.co/NeoDim/starcoder-GGML/) |
|
||||
| StarCoder Plus | ~12GiB | [:arrow_down:](https://huggingface.co/TheBloke/starcoderplus-GGML/resolve/main/starcoderplus.ggmlv3.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/starcoderplus-GGML/) |
|
||||
|
||||
To run in Turbopilot set model type `-m starcoder`
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## CodeGen 1.0
|
||||
|
||||
The CodeGen models were the first models supported by Turbopilot. They perform less well than the newer Wizardcoder/Starcoder/Santacoder variant models.
|
||||
|
||||
|
||||
The `multi` flavour models can provide auto-complete suggestions for `C`, `C++`, `Go`, `Java`, `JavaScript`, and `Python`.
|
||||
|
||||
The `mono` flavour models can provide auto-complete suggestions for `Python` only (but the quality of Python-specific suggestions may be higher).
|
||||
|
||||
Pre-converted and pre-quantized models are available for download from here:
|
||||
|
||||
| Model Name | RAM Requirement | Supported Languages | Direct Download | HF Project Link |
|
||||
|---------------------|-----------------|---------------------------|-----------------|-----------------|
|
||||
| CodeGen 350M multi | ~800MiB | `C`, `C++`, `Go`, `Java`, `JavaScript`, `Python` | [:arrow_down:](https://huggingface.co/ravenscroftj/CodeGen-350M-multi-ggml-quant/resolve/main/codegen-350M-multi-ggml-4bit-quant.bin) | [:hugs:](https://huggingface.co/ravenscroftj/CodeGen-350M-multi-ggml-quant) |
|
||||
| CodeGen 350M mono | ~800MiB | `Python` | [:arrow_down:](https://huggingface.co/Guglielmo/CodeGen-350M-mono-ggml-quant/resolve/main/ggml-model-quant.bin) | [:hugs:](https://huggingface.co/Guglielmo/CodeGen-350M-mono-ggml-quant) |
|
||||
| CodeGen 2B multi | ~4GiB | `C`, `C++`, `Go`, `Java`, `JavaScript`, `Python` | [:arrow_down:](https://huggingface.co/ravenscroftj/CodeGen-2B-multi-ggml-quant/resolve/main/codegen-2B-multi-ggml-4bit-quant_q4_0.bin) | [:hugs:](https://huggingface.co/ravenscroftj/CodeGen-2B-multi-ggml-quant) |
|
||||
| CodeGen 2B mono | ~4GiB | `Python` | [:arrow_down:](https://huggingface.co/Guglielmo/CodeGen-2B-mono-ggml-quant/resolve/main/ggml-model-quant.bin) | [:hugs:](https://huggingface.co/Guglielmo/CodeGen-2B-mono-ggml-quant/) |
|
||||
| CodeGen 6B multi | ~8GiB | `C`, `C++`, `Go`, `Java`, `JavaScript`, `Python` | [:arrow_down:](https://huggingface.co/ravenscroftj/CodeGen-6B-multi-ggml-quant/resolve/main/codegen-6B-multi-ggml-4bit-quant.bin) | [:hugs:](https://huggingface.co/ravenscroftj/CodeGen-6B-multi-ggml-quant) |
|
||||
| CodeGen 6B mono | ~8GiB | `Python` | [:arrow_down:](https://huggingface.co/Guglielmo/CodeGen-6B-mono-ggml-quant/resolve/main/ggml-model-quant.bin) | [:hugs:](https://huggingface.co/Guglielmo/CodeGen-6B-mono-ggml-quant/) |
|
55
README.md
55
README.md
@ -9,8 +9,11 @@ TurboPilot is a self-hosted [copilot](https://github.com/features/copilot) clone
|
||||
|
||||
![a screen recording of turbopilot running through fauxpilot plugin](assets/vscode-status.gif)
|
||||
|
||||
**✨ Now Supports [StableCode 3B Instruct](https://huggingface.co/stabilityai/stablecode-instruct-alpha-3b)** simply use [TheBloke's Quantized GGML models](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML) and set `-m stablecode`.
|
||||
|
||||
**NEW:** As of v0.0.5 turbopilot supports cuda inference which greatly accelerates suggestions when working with longer prompts (i.e. longer existing code files).
|
||||
**✨ New: Refactored + Simplified**: The source code has been improved to make it easier to extend and add new models to Turbopilot. The system now supports multiple flavours of model
|
||||
|
||||
**✨ New: Wizardcoder, Starcoder, Santacoder support** - Turbopilot now supports state of the art local code completion models which provide more programming languages and "fill in the middle" support.
|
||||
|
||||
## 🤝 Contributing
|
||||
|
||||
@ -23,6 +26,7 @@ Make a fork, make your changes and then open a [PR](https://github.com/ravenscro
|
||||
|
||||
The easiest way to try the project out is to grab the pre-processed models and then run the server in docker.
|
||||
|
||||
|
||||
### Getting The Models
|
||||
|
||||
You have 2 options for getting the model
|
||||
@ -31,20 +35,11 @@ You have 2 options for getting the model
|
||||
|
||||
You can download the pre-converted, pre-quantized models from Huggingface.
|
||||
|
||||
The `multi` flavour models can provide auto-complete suggestions for `C`, `C++`, `Go`, `Java`, `JavaScript`, and `Python`.
|
||||
For low RAM users (4-8 GiB), I recommend [StableCode](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML) and for high power users (16+ GiB RAM, discrete GPU or apple silicon) I recomnmend [WizardCoder](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/resolve/main/WizardCoder-15B-1.0.ggmlv3.q4_0.bin).
|
||||
|
||||
The `mono` flavour models can provide auto-complete suggestions for `Python` only (but the quality of Python-specific suggestions may be higher).
|
||||
Turbopilot still supports the first generation codegen models from `v0.0.5` and earlier builds. Although old models do need to be requantized.
|
||||
|
||||
Pre-converted and pre-quantized models are available for download from here:
|
||||
|
||||
| Model Name | RAM Requirement | Supported Languages | Direct Download | HF Project Link |
|
||||
|---------------------|-----------------|---------------------------|-----------------|-----------------|
|
||||
| CodeGen 350M multi | ~800MiB | `C`, `C++`, `Go`, `Java`, `JavaScript`, `Python` | [:arrow_down:](https://huggingface.co/ravenscroftj/CodeGen-350M-multi-ggml-quant/resolve/main/codegen-350M-multi-ggml-4bit-quant.bin) | [:hugs:](https://huggingface.co/ravenscroftj/CodeGen-350M-multi-ggml-quant) |
|
||||
| CodeGen 350M mono | ~800MiB | `Python` | [:arrow_down:](https://huggingface.co/Guglielmo/CodeGen-350M-mono-ggml-quant/resolve/main/ggml-model-quant.bin) | [:hugs:](https://huggingface.co/Guglielmo/CodeGen-350M-mono-ggml-quant) |
|
||||
| CodeGen 2B multi | ~4GiB | `C`, `C++`, `Go`, `Java`, `JavaScript`, `Python` | [:arrow_down:](https://huggingface.co/ravenscroftj/CodeGen-2B-multi-ggml-quant/resolve/main/codegen-2B-multi-ggml-4bit-quant.bin) | [:hugs:](https://huggingface.co/ravenscroftj/CodeGen-2B-multi-ggml-quant) |
|
||||
| CodeGen 2B mono | ~4GiB | `Python` | [:arrow_down:](https://huggingface.co/Guglielmo/CodeGen-2B-mono-ggml-quant/resolve/main/ggml-model-quant.bin) | [:hugs:](https://huggingface.co/Guglielmo/CodeGen-2B-mono-ggml-quant/) |
|
||||
| CodeGen 6B multi | ~8GiB | `C`, `C++`, `Go`, `Java`, `JavaScript`, `Python` | [:arrow_down:](https://huggingface.co/ravenscroftj/CodeGen-6B-multi-ggml-quant/resolve/main/codegen-6B-multi-ggml-4bit-quant.bin) | [:hugs:](https://huggingface.co/ravenscroftj/CodeGen-6B-multi-ggml-quant) |
|
||||
| CodeGen 6B mono | ~8GiB | `Python` | [:arrow_down:](https://huggingface.co/Guglielmo/CodeGen-6B-mono-ggml-quant/resolve/main/ggml-model-quant.bin) | [:hugs:](https://huggingface.co/Guglielmo/CodeGen-6B-mono-ggml-quant/) |
|
||||
You can find a full catalogue of models in [MODELS.md](MODELS.md).
|
||||
|
||||
|
||||
#### Option B: Convert The Models Yourself - Hard, More Flexible
|
||||
@ -58,17 +53,21 @@ Download the [latest binary](https://github.com/ravenscroftj/turbopilot/releases
|
||||
Run:
|
||||
|
||||
```bash
|
||||
./codegen-serve -m ./models/codegen-6B-multi-ggml-4bit-quant.bin
|
||||
./turbopilot -m starcoder -f ./models/santacoder-q4_0.bin
|
||||
```
|
||||
|
||||
The application should start a server on port `18080`
|
||||
The application should start a server on port `18080`, you can change this with the `-p` option but this is the default port that vscode-fauxpilot tries to connect to so you probably want to leave this alone unless you are sure you know what you're doing.
|
||||
|
||||
If you have a multi-core system you can control how many CPUs are used with the `-t` option - for example, on my AMD Ryzen 5000 which has 6 cores/12 threads I use:
|
||||
|
||||
```bash
|
||||
./codegen-serve -t 6 -m ./models/codegen-6B-multi-ggml-4bit-quant.bin
|
||||
./codegen-serve -t 6 -m starcoder -f ./models/santacoder-q4_0.bin
|
||||
```
|
||||
|
||||
To run the legacy codegen models. Just change the model type flag `-m` to `codegen` instead.
|
||||
|
||||
**NOTE: Turbopilot 0.1.0 and newer re-quantize your codegen models old models from v0.0.5 and older. I am working on providing updated quantized codegen models**
|
||||
|
||||
### 📦 Running From Docker
|
||||
|
||||
You can also run Turbopilot from the pre-built docker image supplied [here](https://github.com/users/ravenscroftj/packages/container/package/turbopilot)
|
||||
@ -79,7 +78,8 @@ You will still need to download the models separately, then you can run:
|
||||
docker run --rm -it \
|
||||
-v ./models:/models \
|
||||
-e THREADS=6 \
|
||||
-e MODEL="/models/codegen-2B-multi-ggml-4bit-quant.bin" \
|
||||
-e MODEL_TYPE=starcoder \
|
||||
-e MODEL="/models/santacoder-q4_0.bin" \
|
||||
-p 18080:18080 \
|
||||
ghcr.io/ravenscroftj/turbopilot:latest
|
||||
```
|
||||
@ -92,18 +92,26 @@ As of release v0.0.5 turbocode now supports CUDA inference. In order to run the
|
||||
docker run --gpus=all --rm -it \
|
||||
-v ./models:/models \
|
||||
-e THREADS=6 \
|
||||
-e MODEL="/models/codegen-2B-multi-ggml-4bit-quant.bin" \
|
||||
-e MODEL_TYPE=starcoder \
|
||||
-e MODEL="/models/santacoder-q4_0.bin" \
|
||||
-e GPU_LAYERS=32 \
|
||||
-p 18080:18080 \
|
||||
ghcr.io/ravenscroftj/turbopilot:v0.0.5-cuda
|
||||
ghcr.io/ravenscroftj/turbopilot:v0.2.0-cuda11-7
|
||||
```
|
||||
|
||||
You will need CUDA 11 or later to run this container. You should be able to see `/app/codegen-serve` listed when you run `nvidia-smi`.
|
||||
If you have a big enough GPU then setting `GPU_LAYERS` will allow turbopilot to fully offload computation onto your GPU rather than copying data backwards and forwards, dramatically speeding up inference.
|
||||
|
||||
Swap `ghcr.io/ravenscroftj/turbopilot:v0.1.0-cuda11` for `ghcr.io/ravenscroftj/turbopilot:v0.2.0-cuda12-0` or `ghcr.io/ravenscroftj/turbopilot:v0.2.0-cuda12-2` if you are using CUDA 12.0 or 12.2 respectively.
|
||||
|
||||
You will need CUDA 11 or CUDA 12 later to run this container. You should be able to see `/app/turbopilot` listed when you run `nvidia-smi`.
|
||||
|
||||
|
||||
#### Executable and CUDA
|
||||
|
||||
As of v0.0.5 a CUDA version of the linux executable is available - it requires that libcublas 11 be installed on the machine - I might build ubuntu debs at some point but for now running in docker may be more convenient if you want to use a CUDA GPU.
|
||||
|
||||
You can use GPU offloading via the `--ngl` option.
|
||||
|
||||
### 🌐 Using the API
|
||||
|
||||
#### Support for the official Copilot Plugin
|
||||
@ -174,12 +182,7 @@ Should get you something like this:
|
||||
|
||||
## 👉 Known Limitations
|
||||
|
||||
Again I want to set expectations around this being a proof-of-concept project. With that in mind. Here are some current known limitations.
|
||||
|
||||
As of **v0.0.2**:
|
||||
- The models can be quite slow - especially the 6B ones. It can take ~30-40s to make suggestions across 4 CPU cores.
|
||||
- I've only tested the system on Ubuntu 22.04 but I am now supplying ARM docker images and soon I'll be providing ARM binary releases.
|
||||
- Sometimes suggestions get truncated in nonsensical places - e.g. part way through a variable name or string name. This is due to a hard limit of 2048 on the context length (prompt + suggestion).
|
||||
- Currently Turbopilot only supports one GPU device at a time (it will not try to make use of multiple devices).
|
||||
|
||||
## 👏 Acknowledgements
|
||||
|
||||
|
@ -171,10 +171,10 @@ for name in list_vars.keys():
|
||||
print(" Skipping variable: " + name)
|
||||
continue
|
||||
|
||||
n_dims = len(data.shape);
|
||||
n_dims = len(data.shape)
|
||||
|
||||
# ftype == 0 -> float32, ftype == 1 -> float16
|
||||
ftype_cur = 0;
|
||||
ftype_cur = 0
|
||||
if ftype != 0:
|
||||
if name[-7:] == ".weight" and n_dims == 2:
|
||||
print(" Converting to float16")
|
||||
@ -211,7 +211,7 @@ for name in list_vars.keys():
|
||||
fout.write(struct.pack("iii", n_dims, len(str), ftype_cur))
|
||||
for i in range(n_dims):
|
||||
fout.write(struct.pack("i", data.shape[n_dims - 1 - i]))
|
||||
fout.write(str);
|
||||
fout.write(str)
|
||||
|
||||
# data
|
||||
data.tofile(fout)
|
||||
|
1
extern/argparse
vendored
Submodule
1
extern/argparse
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit b0930ab0288185815d6dc67af59de7014a6272f7
|
1
extern/ggml
vendored
Submodule
1
extern/ggml
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 1a5d5f331de1d3c7ace40d86fe2373021a42f9ce
|
1
extern/spdlog
vendored
Submodule
1
extern/spdlog
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 2ee8bac78e6525a8ad9a9196e65d502ce390d83a
|
1
ggml
1
ggml
@ -1 +0,0 @@
|
||||
Subproject commit 6c4fe0ef5e50b76dd2539130c109e12179da0bd2
|
12643
include/turbopilot/crow_all.h
Normal file
12643
include/turbopilot/crow_all.h
Normal file
File diff suppressed because it is too large
Load Diff
83
include/turbopilot/gptj.hpp
Normal file
83
include/turbopilot/gptj.hpp
Normal file
@ -0,0 +1,83 @@
|
||||
#ifndef __TURBOPILOT_GPTJ_H
|
||||
#define __TURBOPILOT_GPTJ_H
|
||||
|
||||
#include <turbopilot/model.hpp>
|
||||
|
||||
#include <vector>
|
||||
#include <map>
|
||||
|
||||
// default hparams (GPT-J 6B)
|
||||
struct gptj_hparams {
|
||||
int32_t n_vocab = 50400;
|
||||
int32_t n_ctx = 2048;
|
||||
int32_t n_embd = 4096;
|
||||
int32_t n_head = 16;
|
||||
int32_t n_layer = 28;
|
||||
int32_t n_rot = 64;
|
||||
int32_t ftype = 1;
|
||||
};
|
||||
|
||||
struct gptj_layer {
|
||||
// normalization
|
||||
struct ggml_tensor * ln_1_g;
|
||||
struct ggml_tensor * ln_1_b;
|
||||
|
||||
// attention
|
||||
struct ggml_tensor * c_attn_q_proj_w;
|
||||
struct ggml_tensor * c_attn_k_proj_w;
|
||||
struct ggml_tensor * c_attn_v_proj_w;
|
||||
|
||||
struct ggml_tensor * c_attn_proj_w;
|
||||
|
||||
// ff
|
||||
struct ggml_tensor * c_mlp_fc_w;
|
||||
struct ggml_tensor * c_mlp_fc_b;
|
||||
|
||||
struct ggml_tensor * c_mlp_proj_w;
|
||||
struct ggml_tensor * c_mlp_proj_b;
|
||||
};
|
||||
|
||||
struct gptj_model {
|
||||
gptj_hparams hparams;
|
||||
|
||||
// normalization
|
||||
struct ggml_tensor * ln_f_g;
|
||||
struct ggml_tensor * ln_f_b;
|
||||
|
||||
struct ggml_tensor * wte; // position embedding
|
||||
|
||||
struct ggml_tensor * lmh_g; // language model head
|
||||
struct ggml_tensor * lmh_b; // language model bias
|
||||
|
||||
std::vector<gptj_layer> layers;
|
||||
|
||||
// key + value memory
|
||||
struct ggml_tensor * memory_k;
|
||||
struct ggml_tensor * memory_v;
|
||||
|
||||
//
|
||||
struct ggml_context * ctx;
|
||||
std::map<std::string, struct ggml_tensor *> tensors;
|
||||
};
|
||||
|
||||
|
||||
|
||||
class GPTJModel : public TurbopilotModel {
|
||||
|
||||
public:
|
||||
GPTJModel(ModelConfig config, std::mt19937 &rng) : TurbopilotModel(config, rng){
|
||||
this->model = new gptj_model{};
|
||||
this->vocab = new gpt_vocab{};
|
||||
}
|
||||
virtual ~GPTJModel();
|
||||
bool load_model(std::string path);
|
||||
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
|
||||
|
||||
private:
|
||||
gptj_model *model = NULL;
|
||||
gpt_vocab *vocab = NULL;
|
||||
|
||||
|
||||
};
|
||||
|
||||
#endif
|
87
include/turbopilot/gptneox.hpp
Normal file
87
include/turbopilot/gptneox.hpp
Normal file
@ -0,0 +1,87 @@
|
||||
#ifndef __TURBOPILOT_GPTNEOX_H
|
||||
#define __TURBOPILOT_GPTNEOX_H
|
||||
|
||||
#include <turbopilot/model.hpp>
|
||||
|
||||
#include <vector>
|
||||
#include <map>
|
||||
|
||||
// default hparams (StableLM 3B)
|
||||
struct gpt_neox_hparams {
|
||||
int32_t n_vocab = 50257;
|
||||
int32_t n_ctx = 4096;
|
||||
int32_t n_embd = 4096;
|
||||
int32_t n_head = 32;
|
||||
int32_t n_layer = 16;
|
||||
int32_t n_rot = 32; // rotary_pct * (n_embd / n_head)
|
||||
int32_t par_res = 1; // 1 = true, 0 = false
|
||||
int32_t ftype = 1;
|
||||
};
|
||||
|
||||
struct gpt_neox_layer {
|
||||
// pre normalization
|
||||
struct ggml_tensor * ln_1_g;
|
||||
struct ggml_tensor * ln_1_b;
|
||||
|
||||
// attention
|
||||
struct ggml_tensor * c_attn_attn_w;
|
||||
struct ggml_tensor * c_attn_attn_b;
|
||||
|
||||
struct ggml_tensor * c_attn_proj_w;
|
||||
struct ggml_tensor * c_attn_proj_b;
|
||||
|
||||
// post normalization
|
||||
struct ggml_tensor * ln_2_g;
|
||||
struct ggml_tensor * ln_2_b;
|
||||
|
||||
// ff
|
||||
struct ggml_tensor * c_mlp_fc_w;
|
||||
struct ggml_tensor * c_mlp_fc_b;
|
||||
|
||||
struct ggml_tensor * c_mlp_proj_w;
|
||||
struct ggml_tensor * c_mlp_proj_b;
|
||||
};
|
||||
|
||||
struct gpt_neox_model {
|
||||
gpt_neox_hparams hparams;
|
||||
|
||||
// normalization
|
||||
struct ggml_tensor * ln_f_g;
|
||||
struct ggml_tensor * ln_f_b;
|
||||
|
||||
struct ggml_tensor * wte; // position embedding
|
||||
|
||||
struct ggml_tensor * lmh_g; // language model head
|
||||
//struct ggml_tensor * lmh_b; // language model bias
|
||||
|
||||
std::vector<gpt_neox_layer> layers;
|
||||
|
||||
// key + value memory
|
||||
struct ggml_tensor * memory_k;
|
||||
struct ggml_tensor * memory_v;
|
||||
|
||||
//
|
||||
struct ggml_context * ctx;
|
||||
std::map<std::string, struct ggml_tensor *> tensors;
|
||||
};
|
||||
|
||||
|
||||
class GPTNEOXModel : public TurbopilotModel {
|
||||
|
||||
public:
|
||||
GPTNEOXModel(ModelConfig config, std::mt19937 &rng) : TurbopilotModel(config, rng){
|
||||
this->model = new gpt_neox_model{};
|
||||
this->vocab = new gpt_vocab{};
|
||||
}
|
||||
virtual ~GPTNEOXModel();
|
||||
bool load_model(std::string path);
|
||||
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
|
||||
|
||||
private:
|
||||
gpt_neox_model *model = NULL;
|
||||
gpt_vocab *vocab = NULL;
|
||||
|
||||
|
||||
};
|
||||
|
||||
#endif // __TURBOPILOT_GPTNEOX_H
|
72
include/turbopilot/model.hpp
Normal file
72
include/turbopilot/model.hpp
Normal file
@ -0,0 +1,72 @@
|
||||
#ifndef __TURBOPILOT_MODEL_H
|
||||
#define __TURBOPILOT_MODEL_H
|
||||
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <random>
|
||||
#include <mutex>
|
||||
|
||||
typedef void (*offload_func_t)(struct ggml_tensor * tensor);
|
||||
void ggml_nop(struct ggml_tensor * tensor);
|
||||
|
||||
struct gpt_vocab
|
||||
{
|
||||
using id = int32_t;
|
||||
using token = std::string;
|
||||
|
||||
std::map<token, id> token_to_id;
|
||||
std::map<id, token> id_to_token;
|
||||
std::vector<std::string> special_tokens;
|
||||
|
||||
void add_special_token(const std::string &token);
|
||||
};
|
||||
|
||||
std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab &vocab, const std::string &text);
|
||||
|
||||
gpt_vocab::id gpt_sample_top_k_top_p(
|
||||
const gpt_vocab &vocab,
|
||||
const float *logits,
|
||||
int top_k,
|
||||
double top_p,
|
||||
double temp,
|
||||
std::mt19937 &rng);
|
||||
|
||||
struct ModelConfig
|
||||
{
|
||||
int n_threads = 4;
|
||||
int32_t top_k = 40;
|
||||
float top_p = 0.95f;
|
||||
float temp = 0.80f;
|
||||
float repeat_penalty = 1.10f;
|
||||
int32_t seed = -1; // RNG seed
|
||||
int32_t n_ctx = 512; // context size
|
||||
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
|
||||
int32_t n_gpu_layers = 0;
|
||||
};
|
||||
|
||||
class TurbopilotModel
|
||||
{
|
||||
|
||||
public:
|
||||
TurbopilotModel(ModelConfig config, std::mt19937 &rng) :
|
||||
config(config),
|
||||
rng(rng)
|
||||
{}
|
||||
virtual bool load_model(std::string model_path) = 0;
|
||||
std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
|
||||
void lock();
|
||||
void unlock();
|
||||
|
||||
|
||||
protected:
|
||||
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt) = 0;
|
||||
ModelConfig config;
|
||||
std::mt19937 &rng;
|
||||
std::mutex model_lock;
|
||||
};
|
||||
|
||||
|
||||
#endif //__TURBOPILOT_MODEL_H
|
57
include/turbopilot/server.hpp
Normal file
57
include/turbopilot/server.hpp
Normal file
@ -0,0 +1,57 @@
|
||||
#ifndef __TURBOPILOT_SERVER_H
|
||||
#define __TURBOPILOT_SERVER_H
|
||||
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include "turbopilot/model.hpp"
|
||||
|
||||
#include "crow_all.h"
|
||||
|
||||
crow::response handle_openai_request(TurbopilotModel *model, const crow::request& req);
|
||||
|
||||
crow::response handle_hf_request(TurbopilotModel *model, const crow::request& req);
|
||||
|
||||
class TBPLogger : public crow::ILogHandler {
|
||||
public:
|
||||
TBPLogger() {}
|
||||
void log(std::string message, crow::LogLevel crow_level) {
|
||||
// "message" doesn't contain the timestamp and loglevel
|
||||
// prefix the default logger does and it doesn't end
|
||||
// in a newline.
|
||||
|
||||
spdlog::level::level_enum level = spdlog::level::info;
|
||||
|
||||
switch(crow_level){
|
||||
case crow::LogLevel::Critical:
|
||||
level = spdlog::level::critical;
|
||||
break;
|
||||
|
||||
case crow::LogLevel::Error:
|
||||
level = spdlog::level::err;
|
||||
break;
|
||||
|
||||
case crow::LogLevel::Warning:
|
||||
level = spdlog::level::warn;
|
||||
break;
|
||||
|
||||
case crow::LogLevel::Info:
|
||||
level = spdlog::level::info;
|
||||
break;
|
||||
|
||||
case crow::LogLevel::Debug:
|
||||
level = spdlog::level::debug;
|
||||
break;
|
||||
|
||||
default:
|
||||
// if case is not a known value, assume the worst
|
||||
level = spdlog::level::critical;
|
||||
}
|
||||
|
||||
spdlog::log(level, message);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
#endif // __TURBOPILOT_SERVER_H
|
||||
|
79
include/turbopilot/starcoder.hpp
Normal file
79
include/turbopilot/starcoder.hpp
Normal file
@ -0,0 +1,79 @@
|
||||
#ifndef __TURBOPILOT_STARCODER_H
|
||||
#define __TURBOPILOT_STARCODER_H
|
||||
|
||||
#include <turbopilot/model.hpp>
|
||||
|
||||
// default hparams (GPT-2 117M)
|
||||
// https://huggingface.co/bigcode/gpt_bigcode-santacoder/blob/main/config.json
|
||||
struct starcoder_hparams {
|
||||
int32_t n_vocab = 49280;
|
||||
int32_t n_ctx = 2048;
|
||||
int32_t n_embd = 2048;
|
||||
int32_t n_head = 16;
|
||||
int32_t n_layer = 24;
|
||||
int32_t ftype = 1;
|
||||
};
|
||||
|
||||
struct starcoder_layer {
|
||||
// normalization
|
||||
struct ggml_tensor * ln_1_g;
|
||||
struct ggml_tensor * ln_1_b;
|
||||
|
||||
struct ggml_tensor * ln_2_g;
|
||||
struct ggml_tensor * ln_2_b;
|
||||
|
||||
// attention
|
||||
struct ggml_tensor * c_attn_attn_w;
|
||||
struct ggml_tensor * c_attn_attn_b;
|
||||
|
||||
struct ggml_tensor * c_attn_proj_w;
|
||||
struct ggml_tensor * c_attn_proj_b;
|
||||
|
||||
// mlp
|
||||
struct ggml_tensor * c_mlp_fc_w;
|
||||
struct ggml_tensor * c_mlp_fc_b;
|
||||
|
||||
struct ggml_tensor * c_mlp_proj_w;
|
||||
struct ggml_tensor * c_mlp_proj_b;
|
||||
};
|
||||
|
||||
struct starcoder_model {
|
||||
starcoder_hparams hparams;
|
||||
|
||||
// normalization
|
||||
struct ggml_tensor * ln_f_g;
|
||||
struct ggml_tensor * ln_f_b;
|
||||
|
||||
struct ggml_tensor * wte; // position embedding
|
||||
struct ggml_tensor * wpe; // token embedding
|
||||
struct ggml_tensor * lm_head; // language model head
|
||||
|
||||
std::vector<starcoder_layer> layers;
|
||||
|
||||
// key + value memory
|
||||
struct ggml_tensor * memory_k;
|
||||
struct ggml_tensor * memory_v;
|
||||
|
||||
//
|
||||
struct ggml_context * ctx;
|
||||
std::map<std::string, struct ggml_tensor *> tensors;
|
||||
};
|
||||
|
||||
|
||||
class StarcoderModel : public TurbopilotModel {
|
||||
public:
|
||||
StarcoderModel(ModelConfig config, std::mt19937 &rng) : TurbopilotModel(config, rng){
|
||||
this->model = new starcoder_model{};
|
||||
this->vocab = new gpt_vocab{};
|
||||
}
|
||||
virtual ~StarcoderModel();
|
||||
bool load_model(std::string path);
|
||||
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
|
||||
|
||||
private:
|
||||
starcoder_model *model = NULL;
|
||||
gpt_vocab *vocab = NULL;
|
||||
};
|
||||
|
||||
|
||||
#endif //__TURBOPILOT_STARCODER_H
|
7
run.sh
7
run.sh
@ -1,3 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
/app/codegen-serve -t $THREADS -m $MODEL -b $BATCHSIZE
|
||||
if [ -z "$GPU_LAYERS" ]; then
|
||||
/app/turbopilot -t $THREADS -m $MODEL_TYPE -f $MODEL
|
||||
else
|
||||
/app/turbopilot -t $THREADS -m $MODEL_TYPE -f $MODEL --ngl $GPU_LAYERS
|
||||
fi
|
32
src/CMakeLists.txt
Normal file
32
src/CMakeLists.txt
Normal file
@ -0,0 +1,32 @@
|
||||
set(TURBOPILOT_TARGET turbopilot)
|
||||
|
||||
find_package(Boost COMPONENTS thread system REQUIRED)
|
||||
|
||||
include_directories(${Boost_INCLUDE_DIRS})
|
||||
|
||||
add_executable(${TURBOPILOT_TARGET}
|
||||
main.cpp
|
||||
gptj.cpp
|
||||
gptneox.cpp
|
||||
common.cpp
|
||||
server.cpp
|
||||
starcoder.cpp
|
||||
../include/turbopilot/model.hpp
|
||||
../include/turbopilot/gptj.hpp
|
||||
../include/turbopilot/gptneox.hpp
|
||||
../include/turbopilot/starcoder.hpp
|
||||
)
|
||||
|
||||
#set(THREADS_PREFER_PTHREAD_FLAG TRUE)
|
||||
#find_package(Threads REQUIRED)
|
||||
|
||||
|
||||
target_include_directories(${TURBOPILOT_TARGET} PRIVATE
|
||||
../include
|
||||
../extern/spdlog/include
|
||||
../extern/crow/include
|
||||
)
|
||||
|
||||
#target_compile_features(${TURBOPILOT_TARGET} PRIVATE cxx_std_11)
|
||||
|
||||
target_link_libraries(${TURBOPILOT_TARGET} PRIVATE ggml argparse)
|
184
src/common.cpp
Normal file
184
src/common.cpp
Normal file
@ -0,0 +1,184 @@
|
||||
#include "turbopilot/model.hpp"
|
||||
|
||||
#include <regex>
|
||||
#include <cmath>
|
||||
#include <random>
|
||||
|
||||
|
||||
void TurbopilotModel::lock(){
|
||||
this->model_lock.lock();
|
||||
}
|
||||
|
||||
void TurbopilotModel::unlock(){
|
||||
this->model_lock.unlock();
|
||||
}
|
||||
|
||||
std::stringstream TurbopilotModel::predict(std::string prompt, int max_length, bool include_prompt){
|
||||
lock();
|
||||
auto result = predict_impl(prompt, max_length, include_prompt);
|
||||
unlock();
|
||||
return result;
|
||||
}
|
||||
|
||||
void llama_nop(struct ggml_tensor * tensor) { // don't offload by default
|
||||
(void) tensor;
|
||||
}
|
||||
|
||||
void gpt_vocab::add_special_token(const std::string & token) {
|
||||
special_tokens.push_back(token);
|
||||
}
|
||||
|
||||
|
||||
void gpt_split_words(std::string str, std::vector<std::string>& words) {
|
||||
const std::string pattern = R"('s|'t|'re|'ve|'m|'ll|'d| ?[[:alpha:]]+| ?[[:digit:]]+| ?[^\s[:alpha:][:digit:]]+|\s+(?!\S)|\s+)";
|
||||
const std::regex re(pattern);
|
||||
std::smatch m;
|
||||
|
||||
while (std::regex_search(str, m, re)) {
|
||||
for (auto x : m) {
|
||||
words.push_back(x);
|
||||
}
|
||||
str = m.suffix();
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab & vocab, const std::string & text) {
|
||||
std::vector<std::string> words;
|
||||
|
||||
// first split the text into words
|
||||
{
|
||||
std::string str = text;
|
||||
|
||||
// Generate the subpattern from the special_tokens vector if it's not empty
|
||||
if (!vocab.special_tokens.empty()) {
|
||||
const std::regex escape(R"([\[\\\^\$\.\|\?\*\+\(\)\{\}])");
|
||||
std::string special_tokens_subpattern;
|
||||
for (const auto & token : vocab.special_tokens) {
|
||||
if (!special_tokens_subpattern.empty()) {
|
||||
special_tokens_subpattern += "|";
|
||||
}
|
||||
special_tokens_subpattern += std::regex_replace(token, escape, R"(\$&)");
|
||||
}
|
||||
|
||||
std::regex re(special_tokens_subpattern);
|
||||
std::smatch m;
|
||||
// Split the text by special tokens.
|
||||
while (std::regex_search(str, m, re)) {
|
||||
// Split the substrings in-between special tokens into words.
|
||||
gpt_split_words(m.prefix(), words);
|
||||
// Add matched special tokens as words.
|
||||
for (auto x : m) {
|
||||
words.push_back(x);
|
||||
}
|
||||
str = m.suffix();
|
||||
}
|
||||
// Remaining text without special tokens will be handled below.
|
||||
}
|
||||
|
||||
gpt_split_words(str, words);
|
||||
}
|
||||
|
||||
// find the longest token that forms each word in words:
|
||||
std::vector<gpt_vocab::id> tokens;
|
||||
for (const auto & word : words) {
|
||||
for (int i = 0; i < (int) word.size(); ){
|
||||
for (int j = word.size() - 1; j >= i; j--){
|
||||
auto cand = word.substr(i, j-i+1);
|
||||
auto it = vocab.token_to_id.find(cand);
|
||||
if (it != vocab.token_to_id.end()){ // word.substr(i, j-i+1) in vocab
|
||||
tokens.push_back(it->second);
|
||||
i = j + 1;
|
||||
break;
|
||||
}
|
||||
else if (j == i){ // word.substr(i, 1) has no matching
|
||||
fprintf(stderr, "%s: unknown token '%s'\n", __func__, word.substr(i, 1).data());
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return tokens;
|
||||
}
|
||||
|
||||
gpt_vocab::id gpt_sample_top_k_top_p(
|
||||
const gpt_vocab & vocab,
|
||||
const float * logits,
|
||||
int top_k,
|
||||
double top_p,
|
||||
double temp,
|
||||
std::mt19937 & rng) {
|
||||
int n_logits = vocab.id_to_token.size();
|
||||
|
||||
std::vector<std::pair<double, gpt_vocab::id>> logits_id;
|
||||
logits_id.reserve(n_logits);
|
||||
|
||||
{
|
||||
const double scale = 1.0/temp;
|
||||
for (int i = 0; i < n_logits; ++i) {
|
||||
logits_id.push_back(std::make_pair(logits[i]*scale, i));
|
||||
}
|
||||
}
|
||||
|
||||
// find the top K tokens
|
||||
std::partial_sort(
|
||||
logits_id.begin(),
|
||||
logits_id.begin() + top_k, logits_id.end(),
|
||||
[](const std::pair<double, gpt_vocab::id> & a, const std::pair<double, gpt_vocab::id> & b) {
|
||||
return a.first > b.first;
|
||||
});
|
||||
|
||||
logits_id.resize(top_k);
|
||||
|
||||
double maxl = -INFINITY;
|
||||
for (const auto & kv : logits_id) {
|
||||
maxl = std::max(maxl, kv.first);
|
||||
}
|
||||
|
||||
// compute probs for the top K tokens
|
||||
std::vector<double> probs;
|
||||
probs.reserve(logits_id.size());
|
||||
|
||||
double sum = 0.0;
|
||||
for (const auto & kv : logits_id) {
|
||||
double p = exp(kv.first - maxl);
|
||||
probs.push_back(p);
|
||||
sum += p;
|
||||
}
|
||||
|
||||
// normalize the probs
|
||||
for (auto & p : probs) {
|
||||
p /= sum;
|
||||
}
|
||||
|
||||
if (top_p < 1.0f) {
|
||||
double cumsum = 0.0f;
|
||||
for (int i = 0; i < top_k; i++) {
|
||||
cumsum += probs[i];
|
||||
if (cumsum >= top_p) {
|
||||
top_k = i + 1;
|
||||
probs.resize(top_k);
|
||||
logits_id.resize(top_k);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
cumsum = 1.0/cumsum;
|
||||
for (int i = 0; i < (int) probs.size(); i++) {
|
||||
probs[i] *= cumsum;
|
||||
}
|
||||
}
|
||||
|
||||
//printf("\n");
|
||||
//for (int i = 0; i < (int) probs.size(); i++) {
|
||||
// printf("%d: '%s' %f\n", i, vocab.id_to_token.at(logits_id[i].second).c_str(), probs[i]);
|
||||
//}
|
||||
//exit(0);
|
||||
|
||||
std::discrete_distribution<> dist(probs.begin(), probs.end());
|
||||
int idx = dist(rng);
|
||||
|
||||
return logits_id[idx].second;
|
||||
}
|
||||
|
||||
|
702
src/gptj.cpp
Normal file
702
src/gptj.cpp
Normal file
@ -0,0 +1,702 @@
|
||||
#include <turbopilot/gptj.hpp>
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include <ggml/ggml.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
|
||||
#ifdef GGML_USE_CLBLAST
|
||||
#include "ggml-opencl.h"
|
||||
#endif
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
#include "ggml-cuda.h"
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||
#endif
|
||||
|
||||
|
||||
// evaluate the transformer
|
||||
//
|
||||
// - model: the model
|
||||
// - n_threads: number of threads to use
|
||||
// - n_past: the context size so far
|
||||
// - embd_inp: the embeddings of the tokens in the context
|
||||
// - embd_w: the predicted logits for the next token
|
||||
//
|
||||
// The GPT-J model requires about 16MB of memory per input token.
|
||||
//
|
||||
bool gptj_eval(
|
||||
const gptj_model & model,
|
||||
const int n_threads,
|
||||
const int n_past,
|
||||
const std::vector<gpt_vocab::id> & embd_inp,
|
||||
std::vector<float> & embd_w,
|
||||
size_t & mem_per_token) {
|
||||
const int N = embd_inp.size();
|
||||
|
||||
const auto & hparams = model.hparams;
|
||||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
const int n_head = hparams.n_head;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
const int n_rot = hparams.n_rot;
|
||||
|
||||
static size_t buf_size = 256u*1024*1024;
|
||||
static void * buf = malloc(buf_size);
|
||||
|
||||
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
|
||||
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
|
||||
//printf("\n{}: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
|
||||
|
||||
// reallocate
|
||||
buf_size = buf_size_new;
|
||||
buf = realloc(buf, buf_size);
|
||||
if (buf == nullptr) {
|
||||
spdlog::error("{}: failed to allocate {} bytes\n", __func__, buf_size);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_init_params params = {
|
||||
/*.mem_size =*/ buf_size,
|
||||
/*.mem_buffer =*/ buf,
|
||||
/*.no_alloc =*/ false,
|
||||
};
|
||||
|
||||
struct ggml_context * ctx0 = ggml_init(params);
|
||||
struct ggml_cgraph gf = {};
|
||||
|
||||
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
||||
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));
|
||||
|
||||
// wte
|
||||
struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte, embd);
|
||||
|
||||
for (int il = 0; il < n_layer; ++il) {
|
||||
struct ggml_tensor * cur;
|
||||
|
||||
// norm
|
||||
{
|
||||
cur = ggml_norm(ctx0, inpL);
|
||||
|
||||
// cur = ln_1_g*cur + ln_1_b
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].ln_1_g, cur),
|
||||
cur),
|
||||
ggml_repeat(ctx0, model.layers[il].ln_1_b, cur));
|
||||
}
|
||||
|
||||
struct ggml_tensor * inpSA = cur;
|
||||
|
||||
// self-attention
|
||||
{
|
||||
struct ggml_tensor * Qcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_q_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0, 0);
|
||||
struct ggml_tensor * Kcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_k_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0, 0);
|
||||
|
||||
// store key and value to memory
|
||||
{
|
||||
struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_v_proj_w, cur));
|
||||
|
||||
struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
|
||||
struct ggml_tensor * v = ggml_view_2d(ctx0, model.memory_v, N, n_embd,
|
||||
( n_ctx)*ggml_element_size(model.memory_v),
|
||||
(il*n_ctx)*ggml_element_size(model.memory_v)*n_embd + n_past*ggml_element_size(model.memory_v));
|
||||
|
||||
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
|
||||
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
|
||||
}
|
||||
|
||||
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
|
||||
struct ggml_tensor * Q =
|
||||
ggml_permute(ctx0,
|
||||
Qcur,
|
||||
0, 2, 1, 3);
|
||||
|
||||
// K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3)
|
||||
struct ggml_tensor * K =
|
||||
ggml_permute(ctx0,
|
||||
ggml_reshape_3d(ctx0,
|
||||
ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd),
|
||||
n_embd/n_head, n_head, n_past + N),
|
||||
0, 2, 1, 3);
|
||||
|
||||
// K * Q
|
||||
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
|
||||
|
||||
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
||||
struct ggml_tensor * KQ_scaled =
|
||||
ggml_scale_inplace(ctx0,
|
||||
KQ,
|
||||
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
|
||||
);
|
||||
|
||||
// KQ_masked = mask_past(KQ_scaled)
|
||||
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
|
||||
|
||||
// KQ = soft_max(KQ_masked)
|
||||
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
|
||||
|
||||
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
|
||||
struct ggml_tensor * V =
|
||||
ggml_view_3d(ctx0, model.memory_v,
|
||||
n_past + N, n_embd/n_head, n_head,
|
||||
n_ctx*ggml_element_size(model.memory_v),
|
||||
n_ctx*ggml_element_size(model.memory_v)*n_embd/n_head,
|
||||
il*n_ctx*ggml_element_size(model.memory_v)*n_embd);
|
||||
|
||||
// KQV = transpose(V) * KQ_soft_max
|
||||
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
|
||||
|
||||
// KQV_merged = KQV.permute(0, 2, 1, 3)
|
||||
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
|
||||
|
||||
// cur = KQV_merged.contiguous().view(n_embd, N)
|
||||
cur = ggml_cpy(ctx0,
|
||||
KQV_merged,
|
||||
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
|
||||
|
||||
// projection (no bias)
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
model.layers[il].c_attn_proj_w,
|
||||
cur);
|
||||
}
|
||||
|
||||
struct ggml_tensor * inpFF = cur;
|
||||
|
||||
// feed-forward network
|
||||
// this is independent of the self-attention result, so it could be done in parallel to the self-attention
|
||||
{
|
||||
// note here we pass inpSA instead of cur
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
model.layers[il].c_mlp_fc_w,
|
||||
inpSA);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur),
|
||||
cur);
|
||||
|
||||
// GELU activation
|
||||
cur = ggml_gelu(ctx0, cur);
|
||||
|
||||
// projection
|
||||
// cur = proj_w*cur + proj_b
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
model.layers[il].c_mlp_proj_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur),
|
||||
cur);
|
||||
}
|
||||
|
||||
// self-attention + FF
|
||||
cur = ggml_add(ctx0, cur, inpFF);
|
||||
|
||||
// input for next layer
|
||||
inpL = ggml_add(ctx0, cur, inpL);
|
||||
}
|
||||
|
||||
// norm
|
||||
{
|
||||
inpL = ggml_norm(ctx0, inpL);
|
||||
|
||||
// inpL = ln_f_g*inpL + ln_f_b
|
||||
inpL = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.ln_f_g, inpL),
|
||||
inpL),
|
||||
ggml_repeat(ctx0, model.ln_f_b, inpL));
|
||||
}
|
||||
|
||||
// lm_head
|
||||
{
|
||||
inpL = ggml_mul_mat(ctx0, model.lmh_g, inpL);
|
||||
|
||||
inpL = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.lmh_b, inpL),
|
||||
inpL);
|
||||
}
|
||||
|
||||
// logits -> probs
|
||||
//inpL = ggml_soft_max_inplace(ctx0, inpL);
|
||||
|
||||
// run the computation
|
||||
ggml_build_forward_expand(&gf, inpL);
|
||||
ggml_graph_compute_with_ctx(ctx0, &gf, n_threads);
|
||||
|
||||
//if (n_past%100 == 0) {
|
||||
// ggml_graph_print (&gf);
|
||||
// ggml_graph_dump_dot(&gf, NULL, "gpt-j.dot");
|
||||
//}
|
||||
|
||||
//embd_w.resize(n_vocab*N);
|
||||
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
|
||||
|
||||
// return result for just the last token
|
||||
embd_w.resize(n_vocab);
|
||||
memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
||||
|
||||
if (mem_per_token == 0) {
|
||||
mem_per_token = ggml_used_mem(ctx0)/N;
|
||||
}
|
||||
//printf("used_mem = %zu\n", ggml_used_mem(ctx0));
|
||||
|
||||
ggml_free(ctx0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
GPTJModel::~GPTJModel(){
|
||||
ggml_free(model->ctx);
|
||||
free(model);
|
||||
free(vocab);
|
||||
}
|
||||
|
||||
bool GPTJModel::load_model(std::string fname) {
|
||||
spdlog::info("{}: loading model from '{}' - please wait ...\n", __func__, fname.c_str());
|
||||
|
||||
auto fin = std::ifstream(fname, std::ios::binary);
|
||||
if (!fin) {
|
||||
spdlog::error("{}: failed to open '{}'\n", __func__, fname.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
// verify magic
|
||||
{
|
||||
uint32_t magic;
|
||||
fin.read((char *) &magic, sizeof(magic));
|
||||
if (magic != GGML_FILE_MAGIC) {
|
||||
spdlog::error("{}: invalid model file '{}' (bad magic)\n", __func__, fname.c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// load hparams
|
||||
{
|
||||
auto & hparams = model->hparams;
|
||||
|
||||
fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
|
||||
fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
|
||||
fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd));
|
||||
fin.read((char *) &hparams.n_head, sizeof(hparams.n_head));
|
||||
fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
|
||||
fin.read((char *) &hparams.n_rot, sizeof(hparams.n_rot));
|
||||
fin.read((char *) &hparams.ftype, sizeof(hparams.ftype));
|
||||
|
||||
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
|
||||
|
||||
spdlog::info("{}: n_vocab = {}\n", __func__, hparams.n_vocab);
|
||||
spdlog::info("{}: n_ctx = {}\n", __func__, hparams.n_ctx);
|
||||
spdlog::info("{}: n_embd = {}\n", __func__, hparams.n_embd);
|
||||
spdlog::info("{}: n_head = {}\n", __func__, hparams.n_head);
|
||||
spdlog::info("{}: n_layer = {}\n", __func__, hparams.n_layer);
|
||||
spdlog::info("{}: n_rot = {}\n", __func__, hparams.n_rot);
|
||||
spdlog::info("{}: ftype = {}\n", __func__, hparams.ftype);
|
||||
spdlog::info("{}: qntvr = {}\n", __func__, qntvr);
|
||||
|
||||
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
|
||||
}
|
||||
|
||||
// load vocab
|
||||
{
|
||||
int32_t n_vocab = 0;
|
||||
fin.read((char *) &n_vocab, sizeof(n_vocab));
|
||||
|
||||
if (n_vocab != model->hparams.n_vocab) {
|
||||
spdlog::error("{}: invalid model file '{}' (bad vocab size {} != {})\n",
|
||||
__func__, fname.c_str(), n_vocab, model->hparams.n_vocab);
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string word;
|
||||
std::vector<char> buf(128);
|
||||
|
||||
for (int i = 0; i < n_vocab; i++) {
|
||||
uint32_t len;
|
||||
fin.read((char *) &len, sizeof(len));
|
||||
|
||||
buf.resize(len);
|
||||
fin.read((char *) buf.data(), len);
|
||||
word.assign(buf.data(), len);
|
||||
|
||||
vocab->token_to_id[word] = i;
|
||||
vocab->id_to_token[i] = word;
|
||||
}
|
||||
}
|
||||
|
||||
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
|
||||
// in order to save memory and also to speed up the computation
|
||||
ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model->hparams.ftype));
|
||||
if (wtype == GGML_TYPE_COUNT) {
|
||||
spdlog::error("{}: invalid model file '{}' (bad ftype value {})\n",
|
||||
__func__, fname.c_str(), model->hparams.ftype);
|
||||
return false;
|
||||
}
|
||||
|
||||
auto & ctx = model->ctx;
|
||||
|
||||
size_t ctx_size = 0;
|
||||
|
||||
{
|
||||
const auto & hparams = model->hparams;
|
||||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
|
||||
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_g
|
||||
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_b
|
||||
|
||||
ctx_size += n_embd*n_vocab*ggml_type_sizef(wtype); // wte
|
||||
|
||||
ctx_size += n_embd*n_vocab*ggml_type_sizef(wtype); // lmh_g
|
||||
ctx_size += n_vocab*ggml_type_sizef(GGML_TYPE_F32); // lmh_b
|
||||
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_g
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_b
|
||||
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_q_proj_w
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_k_proj_w
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_v_proj_w
|
||||
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_proj_w
|
||||
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_fc_w
|
||||
ctx_size += n_layer*( 4*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_fc_b
|
||||
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
|
||||
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
|
||||
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_k
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F16); // memory_v
|
||||
|
||||
ctx_size += (5 + 10*n_layer)*512; // object overhead
|
||||
|
||||
spdlog::info("{}: ggml ctx size = {} MB\n", __func__, ctx_size/(1024.0*1024.0));
|
||||
}
|
||||
|
||||
|
||||
// create the ggml context
|
||||
{
|
||||
struct ggml_init_params params = {
|
||||
/*.mem_size =*/ ctx_size,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ false,
|
||||
};
|
||||
|
||||
model->ctx = ggml_init(params);
|
||||
if (!model->ctx) {
|
||||
spdlog::error("{}: ggml_init() failed\n", __func__);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// prepare memory for the weights
|
||||
{
|
||||
const auto & hparams = model->hparams;
|
||||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
|
||||
model->layers.resize(n_layer);
|
||||
|
||||
model->wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
|
||||
model->ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
model->ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
model->lmh_g = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
model->lmh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_vocab);
|
||||
|
||||
// map by name
|
||||
model->tensors["transformer.wte.weight"] = model->wte;
|
||||
|
||||
model->tensors["transformer.ln_f.weight"] = model->ln_f_g;
|
||||
model->tensors["transformer.ln_f.bias"] = model->ln_f_b;
|
||||
|
||||
model->tensors["lm_head.weight"] = model->lmh_g;
|
||||
model->tensors["lm_head.bias"] = model->lmh_b;
|
||||
|
||||
for (int i = 0; i < n_layer; ++i) {
|
||||
auto & layer = model->layers[i];
|
||||
|
||||
layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
layer.c_attn_q_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.c_attn_k_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.c_attn_v_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
|
||||
layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
|
||||
layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd);
|
||||
layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd);
|
||||
|
||||
layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
|
||||
layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
// map by name
|
||||
model->tensors["transformer.h." + std::to_string(i) + ".ln_1.weight"] = layer.ln_1_g;
|
||||
model->tensors["transformer.h." + std::to_string(i) + ".ln_1.bias"] = layer.ln_1_b;
|
||||
|
||||
model->tensors["transformer.h." + std::to_string(i) + ".attn.q_proj.weight"] = layer.c_attn_q_proj_w;
|
||||
model->tensors["transformer.h." + std::to_string(i) + ".attn.k_proj.weight"] = layer.c_attn_k_proj_w;
|
||||
model->tensors["transformer.h." + std::to_string(i) + ".attn.v_proj.weight"] = layer.c_attn_v_proj_w;
|
||||
|
||||
model->tensors["transformer.h." + std::to_string(i) + ".attn.out_proj.weight"] = layer.c_attn_proj_w;
|
||||
|
||||
model->tensors["transformer.h." + std::to_string(i) + ".mlp.fc_in.weight"] = layer.c_mlp_fc_w;
|
||||
model->tensors["transformer.h." + std::to_string(i) + ".mlp.fc_in.bias"] = layer.c_mlp_fc_b;
|
||||
|
||||
model->tensors["transformer.h." + std::to_string(i) + ".mlp.fc_out.weight"] = layer.c_mlp_proj_w;
|
||||
model->tensors["transformer.h." + std::to_string(i) + ".mlp.fc_out.bias"] = layer.c_mlp_proj_b;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// key + value memory
|
||||
{
|
||||
const auto & hparams = model->hparams;
|
||||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
|
||||
const int n_mem = n_layer*n_ctx;
|
||||
const int n_elements = n_embd*n_mem;
|
||||
|
||||
model->memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||
model->memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||
|
||||
const size_t memory_size = ggml_nbytes(model->memory_k) + ggml_nbytes(model->memory_v);
|
||||
|
||||
spdlog::info("{}: memory_size = {} MB, n_mem = {}\n", __func__, memory_size/1024.0/1024.0, n_mem);
|
||||
}
|
||||
|
||||
// load weights
|
||||
{
|
||||
int n_tensors = 0;
|
||||
size_t total_size = 0;
|
||||
|
||||
spdlog::info("{}: ", __func__);
|
||||
|
||||
while (true) {
|
||||
int32_t n_dims;
|
||||
int32_t length;
|
||||
int32_t ttype;
|
||||
|
||||
fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
|
||||
fin.read(reinterpret_cast<char *>(&length), sizeof(length));
|
||||
fin.read(reinterpret_cast<char *>(&ttype), sizeof(ttype));
|
||||
|
||||
if (fin.eof()) {
|
||||
break;
|
||||
}
|
||||
|
||||
int32_t nelements = 1;
|
||||
int32_t ne[2] = { 1, 1 };
|
||||
for (int i = 0; i < n_dims; ++i) {
|
||||
fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
|
||||
nelements *= ne[i];
|
||||
}
|
||||
|
||||
std::string name(length, 0);
|
||||
fin.read(&name[0], length);
|
||||
|
||||
if (model->tensors.find(name.data()) == model->tensors.end()) {
|
||||
spdlog::error("{}: unknown tensor '{}' in model file\n", __func__, name.data());
|
||||
return false;
|
||||
}
|
||||
|
||||
auto tensor = model->tensors[name.data()];
|
||||
if (ggml_nelements(tensor) != nelements) {
|
||||
spdlog::error("{}: tensor '{}' has wrong size in model file\n", __func__, name.data());
|
||||
return false;
|
||||
}
|
||||
|
||||
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
|
||||
spdlog::error("{}: tensor '{}' has wrong shape in model file: got [{}, {}], expected [{}, {}]\n",
|
||||
__func__, name.data(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]);
|
||||
return false;
|
||||
}
|
||||
|
||||
// for debugging
|
||||
if (0) {
|
||||
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor));
|
||||
}
|
||||
|
||||
const size_t bpe = ggml_type_size(ggml_type(ttype));
|
||||
|
||||
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
|
||||
spdlog::error("{}: tensor '{}' has wrong size in model file: got %zu, expected %zu\n",
|
||||
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
|
||||
return false;
|
||||
}
|
||||
|
||||
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
|
||||
|
||||
//printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ttype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0);
|
||||
total_size += ggml_nbytes(tensor);
|
||||
if (++n_tensors % 8 == 0) {
|
||||
printf(".");
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
printf("\n");
|
||||
spdlog::info(" done\n");
|
||||
|
||||
spdlog::info("{}: model size = {:06.2f} MB / num tensors = {}\n", __func__, total_size/1024.0/1024.0, n_tensors);
|
||||
}
|
||||
|
||||
fin.close();
|
||||
|
||||
|
||||
|
||||
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
if(config.n_gpu_layers > 0){
|
||||
size_t vram_total = 0;
|
||||
int gpu_layers = std::min(config.n_gpu_layers, model->hparams.n_layer);
|
||||
spdlog::info("Attempting to offload {} layers to GPU", gpu_layers);
|
||||
|
||||
for(int i=0; i < gpu_layers; i++) {
|
||||
const auto & layer = model->layers[i];
|
||||
layer.c_attn_q_proj_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_attn_k_proj_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_attn_v_proj_w->backend = GGML_BACKEND_GPU;
|
||||
|
||||
layer.c_attn_proj_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_mlp_fc_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_mlp_proj_w->backend = GGML_BACKEND_GPU;
|
||||
|
||||
#if defined(GGML_USE_CLBLAST)
|
||||
ggml_cl_transform_tensor(layer.c_attn_q_proj_w->data,layer.c_attn_q_proj_w); vram_total += ggml_nbytes(layer.c_attn_q_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_attn_k_proj_w->data,layer.c_attn_k_proj_w); vram_total += ggml_nbytes(layer.c_attn_k_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_attn_v_proj_w->data,layer.c_attn_v_proj_w); vram_total += ggml_nbytes(layer.c_attn_v_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
ggml_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
#else
|
||||
ggml_cuda_transform_tensor(layer.c_attn_q_proj_w->data,layer.c_attn_q_proj_w); vram_total += ggml_nbytes(layer.c_attn_q_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_attn_k_proj_w->data,layer.c_attn_k_proj_w); vram_total += ggml_nbytes(layer.c_attn_k_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_attn_v_proj_w->data,layer.c_attn_v_proj_w); vram_total += ggml_nbytes(layer.c_attn_v_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
#endif
|
||||
}
|
||||
|
||||
spdlog::info("{}: [GPU] total VRAM used: {} MB\n", __func__, vram_total / 1024 / 1024);
|
||||
}
|
||||
|
||||
#endif // defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::stringstream GPTJModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
|
||||
|
||||
std::stringstream result;
|
||||
// tokenize the prompt
|
||||
std::vector<gpt_vocab::id> embd_inp = ::gpt_tokenize((*vocab), prompt);
|
||||
|
||||
int n_past = 0;
|
||||
|
||||
int64_t t_sample_us = 0;
|
||||
int64_t t_predict_us = 0;
|
||||
|
||||
int n_predict = std::min(max_length, model->hparams.n_ctx - (int) embd_inp.size());
|
||||
|
||||
spdlog::debug("{}: number of tokens in prompt = {}", __func__, embd_inp.size());
|
||||
|
||||
std::vector<gpt_vocab::id> embd;
|
||||
|
||||
// determine the required inference memory per token:
|
||||
size_t mem_per_token = 0;
|
||||
|
||||
std::vector<float> logits;
|
||||
|
||||
gptj_eval((*model), config.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token);
|
||||
|
||||
for (int i = embd.size(); i < embd_inp.size() + n_predict; i++) {
|
||||
// predict
|
||||
if (embd.size() > 0) {
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
if (!gptj_eval((*model), config.n_threads, n_past, embd, logits, mem_per_token)) {
|
||||
throw std::runtime_error("Failed to predict");
|
||||
}
|
||||
|
||||
t_predict_us += ggml_time_us() - t_start_us;
|
||||
}
|
||||
|
||||
n_past += embd.size();
|
||||
embd.clear();
|
||||
|
||||
if (i >= embd_inp.size()) {
|
||||
// sample next token
|
||||
const int top_k = config.top_k;
|
||||
const float top_p = config.top_p;
|
||||
const float temp = config.temp;
|
||||
|
||||
const int n_vocab = model->hparams.n_vocab;
|
||||
|
||||
gpt_vocab::id id = 0;
|
||||
|
||||
{
|
||||
const int64_t t_start_sample_us = ggml_time_us();
|
||||
|
||||
id = gpt_sample_top_k_top_p((*vocab), logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng);
|
||||
|
||||
t_sample_us += ggml_time_us() - t_start_sample_us;
|
||||
}
|
||||
|
||||
// add it to the context
|
||||
embd.push_back(id);
|
||||
|
||||
if(id != 50256){
|
||||
result << vocab->id_to_token[id].c_str();
|
||||
}
|
||||
|
||||
} else {
|
||||
// if here, it means we are still processing the input prompt
|
||||
for (int k = i; k < embd_inp.size(); k++) {
|
||||
embd.push_back(embd_inp[k]);
|
||||
|
||||
if(include_prompt){
|
||||
result << vocab->id_to_token[embd_inp[k]].c_str();
|
||||
}
|
||||
|
||||
if (embd.size() > config.n_batch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
i += embd.size() - 1;
|
||||
}
|
||||
|
||||
// end of text token
|
||||
if (embd.back() == 50256) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spdlog::debug("{}: sample time = {:8.2f} ms\n", __func__, t_sample_us/1000.0f);
|
||||
spdlog::debug("{}: predict time = {:8.2f} ms / {:.2f} ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past);
|
||||
|
||||
|
||||
return result;
|
||||
}
|
776
src/gptneox.cpp
Normal file
776
src/gptneox.cpp
Normal file
@ -0,0 +1,776 @@
|
||||
#include <turbopilot/gptneox.hpp>
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include <ggml/ggml.h>
|
||||
|
||||
#ifdef GGML_USE_CLBLAST
|
||||
#include "ggml-opencl.h"
|
||||
#endif
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
#include "ggml-cuda.h"
|
||||
#endif
|
||||
|
||||
#include <cinttypes>
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||
#endif
|
||||
|
||||
|
||||
// feed-forward network
|
||||
ggml_tensor * gpt_neox_ff(
|
||||
const gpt_neox_layer &layer,
|
||||
ggml_context * ctx0,
|
||||
ggml_tensor * inp) {
|
||||
ggml_tensor * cur = ggml_norm(ctx0, inp);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, layer.ln_2_g, cur),
|
||||
cur),
|
||||
ggml_repeat(ctx0, layer.ln_2_b, cur));
|
||||
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
layer.c_mlp_fc_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, layer.c_mlp_fc_b, cur),
|
||||
cur);
|
||||
|
||||
// GELU activation
|
||||
cur = ggml_gelu(ctx0, cur);
|
||||
|
||||
// projection
|
||||
// cur = proj_w*cur + proj_b
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
layer.c_mlp_proj_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, layer.c_mlp_proj_b, cur),
|
||||
cur);
|
||||
return cur;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// evaluate the transformer
|
||||
//
|
||||
// - model: the model
|
||||
// - n_threads: number of threads to use
|
||||
// - n_past: the context size so far
|
||||
// - embd_inp: the embeddings of the tokens in the context
|
||||
// - embd_w: the predicted logits for the next token
|
||||
//
|
||||
bool gpt_neox_eval(
|
||||
const gpt_neox_model & model,
|
||||
const int n_threads,
|
||||
const int n_past,
|
||||
const std::vector<gpt_vocab::id> & embd_inp,
|
||||
std::vector<float> & embd_w,
|
||||
size_t & mem_per_token) {
|
||||
const int N = embd_inp.size();
|
||||
|
||||
const auto & hparams = model.hparams;
|
||||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
const int n_head = hparams.n_head;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
const int n_rot = hparams.n_rot;
|
||||
|
||||
static size_t buf_size = 512u*1024*1024;
|
||||
static void * buf = malloc(buf_size);
|
||||
|
||||
// use 2 scratch buffers
|
||||
// TODO: very hacky solution - reimplement in a more elegant way
|
||||
static size_t scr0_size = 512*1024*1024;
|
||||
static void * scr0 = malloc(scr0_size);
|
||||
|
||||
static size_t scr1_size = 512*1024*1024;
|
||||
static void * scr1 = malloc(scr1_size);
|
||||
|
||||
|
||||
if (mem_per_token > 0 && (mem_per_token*N) > buf_size) {
|
||||
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
|
||||
spdlog::debug("{}: reallocating buffer from {} to {} bytes\n", __func__, buf_size, buf_size_new);
|
||||
|
||||
|
||||
// reallocate
|
||||
buf_size = buf_size_new;
|
||||
buf = realloc(buf, buf_size);
|
||||
if (buf == nullptr) {
|
||||
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_init_params params = {
|
||||
/*.mem_size =*/ buf_size,
|
||||
/*.mem_buffer =*/ buf,
|
||||
/*.no_alloc =*/ false,
|
||||
};
|
||||
|
||||
struct ggml_context * ctx0 = ggml_init(params);
|
||||
struct ggml_cgraph gf = {};
|
||||
|
||||
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
||||
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));
|
||||
|
||||
// wte
|
||||
struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte, embd);
|
||||
|
||||
for (int il = 0; il < n_layer; ++il) {
|
||||
struct ggml_tensor * cur;
|
||||
|
||||
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
|
||||
|
||||
// self-attention
|
||||
{
|
||||
{
|
||||
cur = ggml_norm(ctx0, inpL);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].ln_1_g, cur),
|
||||
cur),
|
||||
ggml_repeat(ctx0, model.layers[il].ln_1_b, cur));
|
||||
}
|
||||
|
||||
// compute QKV
|
||||
{
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
model.layers[il].c_attn_attn_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_attn_attn_b, cur),
|
||||
cur);
|
||||
}
|
||||
|
||||
struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 0*sizeof(float)*n_embd/n_head));
|
||||
struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 1*sizeof(float)*n_embd/n_head));
|
||||
struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 2*sizeof(float)*n_embd/n_head));
|
||||
|
||||
// using mode = 2 for GPT-NeoX mode
|
||||
Qcur = ggml_rope_inplace(ctx0, Qcur, n_past, n_rot, 2, 0);
|
||||
Kcur = ggml_rope_inplace(ctx0, Kcur, n_past, n_rot, 2, 0);
|
||||
|
||||
// store key and value to memory
|
||||
{
|
||||
Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd, N));
|
||||
|
||||
struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
|
||||
struct ggml_tensor * v = ggml_view_2d(ctx0, model.memory_v, N, n_embd,
|
||||
( n_ctx)*ggml_element_size(model.memory_v),
|
||||
(il*n_ctx)*ggml_element_size(model.memory_v)*n_embd + n_past*ggml_element_size(model.memory_v));
|
||||
|
||||
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
|
||||
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
|
||||
}
|
||||
|
||||
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
|
||||
struct ggml_tensor * Q =
|
||||
ggml_permute(ctx0,
|
||||
Qcur,
|
||||
0, 2, 1, 3);
|
||||
|
||||
// K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3)
|
||||
struct ggml_tensor * K =
|
||||
ggml_permute(ctx0,
|
||||
ggml_reshape_3d(ctx0,
|
||||
ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd),
|
||||
n_embd/n_head, n_head, n_past + N),
|
||||
0, 2, 1, 3);
|
||||
|
||||
// K * Q
|
||||
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
|
||||
|
||||
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
||||
struct ggml_tensor * KQ_scaled =
|
||||
ggml_scale_inplace(ctx0,
|
||||
KQ,
|
||||
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
|
||||
);
|
||||
|
||||
// KQ_masked = mask_past(KQ_scaled)
|
||||
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
|
||||
|
||||
// KQ = soft_max(KQ_masked)
|
||||
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
|
||||
|
||||
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
|
||||
struct ggml_tensor * V =
|
||||
ggml_view_3d(ctx0, model.memory_v,
|
||||
n_past + N, n_embd/n_head, n_head,
|
||||
n_ctx*ggml_element_size(model.memory_v),
|
||||
n_ctx*ggml_element_size(model.memory_v)*n_embd/n_head,
|
||||
il*n_ctx*ggml_element_size(model.memory_v)*n_embd);
|
||||
|
||||
// KQV = transpose(V) * KQ_soft_max
|
||||
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
|
||||
|
||||
// KQV_merged = KQV.permute(0, 2, 1, 3)
|
||||
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
|
||||
|
||||
// cur = KQV_merged.contiguous().view(n_embd, N)
|
||||
cur = ggml_cpy(ctx0,
|
||||
KQV_merged,
|
||||
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
|
||||
|
||||
// projection
|
||||
{
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
model.layers[il].c_attn_proj_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_attn_proj_b, cur), cur);
|
||||
}
|
||||
}
|
||||
|
||||
ggml_set_scratch(ctx0, { 0, scr1_size, scr1, });
|
||||
|
||||
if (hparams.par_res == 0) {
|
||||
struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpL);
|
||||
|
||||
cur = gpt_neox_ff(model.layers[il], ctx0, inpFF);
|
||||
|
||||
// input for next layer
|
||||
inpL = ggml_add(ctx0, cur, inpFF);
|
||||
} else {
|
||||
struct ggml_tensor * inpFF = cur;
|
||||
|
||||
// this is independent of the self-attention result, so it could be done in parallel to the self-attention
|
||||
// note here we pass inpL instead of cur
|
||||
cur = gpt_neox_ff(model.layers[il], ctx0, inpL);
|
||||
|
||||
// layer input + FF
|
||||
cur = ggml_add(ctx0, cur, inpFF);
|
||||
|
||||
// input for next layer
|
||||
inpL = ggml_add(ctx0, cur, inpL);
|
||||
}
|
||||
}
|
||||
|
||||
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
|
||||
|
||||
// norm
|
||||
{
|
||||
inpL = ggml_norm(ctx0, inpL);
|
||||
|
||||
// inpL = ln_f_g*inpL + ln_f_b
|
||||
inpL = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.ln_f_g, inpL),
|
||||
inpL),
|
||||
ggml_repeat(ctx0, model.ln_f_b, inpL));
|
||||
}
|
||||
|
||||
ggml_set_scratch(ctx0, { 0, 0, nullptr, });
|
||||
|
||||
// lm_head
|
||||
{
|
||||
inpL = ggml_mul_mat(ctx0, model.lmh_g, inpL);
|
||||
|
||||
//inpL = ggml_add(ctx0,
|
||||
// ggml_repeat(ctx0, model.lmh_b, inpL),
|
||||
// inpL);
|
||||
}
|
||||
|
||||
// logits -> probs
|
||||
//inpL = ggml_soft_max_inplace(ctx0, inpL);
|
||||
|
||||
// run the computation
|
||||
ggml_build_forward_expand(&gf, inpL);
|
||||
ggml_graph_compute_with_ctx(ctx0, &gf, n_threads);
|
||||
|
||||
//if (n_past%100 == 0) {
|
||||
// ggml_graph_print (&gf);
|
||||
// ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
|
||||
//}
|
||||
|
||||
|
||||
//embd_w.resize(n_vocab*N);
|
||||
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
|
||||
|
||||
// return result for just the last token
|
||||
embd_w.resize(n_vocab);
|
||||
memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
||||
|
||||
|
||||
spdlog::debug("used_mem = {}\n", ggml_used_mem(ctx0));
|
||||
|
||||
if (mem_per_token == 0) {
|
||||
mem_per_token = ggml_used_mem(ctx0) / N; //* 4;
|
||||
spdlog::debug("Set mem_per_token={} / {} * {} = {}", ggml_used_mem(ctx0), N, 4, mem_per_token);
|
||||
|
||||
}
|
||||
|
||||
ggml_free(ctx0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
GPTNEOXModel::~GPTNEOXModel(){
|
||||
ggml_free(model->ctx);
|
||||
free(model);
|
||||
free(vocab);
|
||||
}
|
||||
|
||||
bool GPTNEOXModel::load_model(std::string fname) {
|
||||
printf("%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
|
||||
|
||||
auto fin = std::ifstream(fname, std::ios::binary);
|
||||
if (!fin) {
|
||||
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
// verify magic
|
||||
{
|
||||
uint32_t magic;
|
||||
fin.read((char *) &magic, sizeof(magic));
|
||||
if (magic != GGML_FILE_MAGIC) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// load hparams
|
||||
{
|
||||
auto & hparams = model->hparams;
|
||||
|
||||
fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
|
||||
fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
|
||||
fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd));
|
||||
fin.read((char *) &hparams.n_head, sizeof(hparams.n_head));
|
||||
fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
|
||||
fin.read((char *) &hparams.n_rot, sizeof(hparams.n_rot));
|
||||
fin.read((char *) &hparams.par_res, sizeof(hparams.par_res));
|
||||
fin.read((char *) &hparams.ftype, sizeof(hparams.ftype));
|
||||
|
||||
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
|
||||
|
||||
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
||||
printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
|
||||
printf("%s: n_embd = %d\n", __func__, hparams.n_embd);
|
||||
printf("%s: n_head = %d\n", __func__, hparams.n_head);
|
||||
printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
|
||||
printf("%s: n_rot = %d\n", __func__, hparams.n_rot);
|
||||
printf("%s: par_res = %d\n", __func__, hparams.par_res);
|
||||
printf("%s: ftype = %d\n", __func__, hparams.ftype);
|
||||
printf("%s: qntvr = %d\n", __func__, qntvr);
|
||||
|
||||
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
|
||||
}
|
||||
|
||||
// load vocab
|
||||
{
|
||||
const int32_t n_vocab = model->hparams.n_vocab;
|
||||
|
||||
std::string word;
|
||||
std::vector<char> buf(128);
|
||||
|
||||
for (int i = 0; i < n_vocab; i++) {
|
||||
uint32_t len;
|
||||
fin.read((char *) &len, sizeof(len));
|
||||
|
||||
buf.resize(len);
|
||||
fin.read((char *) buf.data(), len);
|
||||
word.assign(buf.data(), len);
|
||||
|
||||
vocab->token_to_id[word] = i;
|
||||
vocab->id_to_token[i] = word;
|
||||
}
|
||||
}
|
||||
|
||||
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
|
||||
// in order to save memory and also to speed up the computation
|
||||
ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model->hparams.ftype));
|
||||
if (wtype == GGML_TYPE_COUNT) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n",
|
||||
__func__, fname.c_str(), model->hparams.ftype);
|
||||
return false;
|
||||
}
|
||||
|
||||
auto & ctx = model->ctx;
|
||||
|
||||
size_t ctx_size = 0;
|
||||
|
||||
{
|
||||
const auto & hparams = model->hparams;
|
||||
|
||||
const size_t n_embd = hparams.n_embd;
|
||||
const size_t n_layer = hparams.n_layer;
|
||||
const size_t n_ctx = hparams.n_ctx;
|
||||
const size_t n_vocab = hparams.n_vocab;
|
||||
|
||||
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_g
|
||||
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_b
|
||||
|
||||
ctx_size += n_embd*n_vocab*ggml_type_sizef(wtype); // wte
|
||||
|
||||
ctx_size += n_embd*n_vocab*ggml_type_sizef(wtype); // lmh_g
|
||||
//ctx_size += n_vocab*ggml_type_sizef(GGML_TYPE_F32); // lmh_b
|
||||
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_g
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_b
|
||||
|
||||
ctx_size += n_layer*(3*n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_attn_w
|
||||
ctx_size += n_layer*( 3*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_attn_b
|
||||
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_proj_w
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_proj_b
|
||||
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_g
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_b
|
||||
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_fc_w
|
||||
ctx_size += n_layer*( 4*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_fc_b
|
||||
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
|
||||
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
|
||||
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_k
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_v
|
||||
|
||||
ctx_size += (6 + 16*n_layer)*1024; // object overhead
|
||||
|
||||
printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0));
|
||||
}
|
||||
|
||||
// create the ggml context
|
||||
{
|
||||
struct ggml_init_params params = {
|
||||
/*.mem_size =*/ ctx_size,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ false,
|
||||
};
|
||||
|
||||
model->ctx = ggml_init(params);
|
||||
if (!model->ctx) {
|
||||
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// prepare memory for the weights
|
||||
{
|
||||
const auto & hparams = model->hparams;
|
||||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
|
||||
model->layers.resize(n_layer);
|
||||
|
||||
model->wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
|
||||
model->ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
model->ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
model->lmh_g = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
//model->lmh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_vocab);
|
||||
|
||||
// map by name
|
||||
model->tensors["gpt_neox.embed_in.weight"] = model->wte;
|
||||
|
||||
model->tensors["gpt_neox.final_layer_norm.weight"] = model->ln_f_g;
|
||||
model->tensors["gpt_neox.final_layer_norm.bias"] = model->ln_f_b;
|
||||
|
||||
model->tensors["embed_out.weight"] = model->lmh_g;
|
||||
//model->tensors["lm_head.bias"] = model->lmh_b;
|
||||
|
||||
for (int i = 0; i < n_layer; ++i) {
|
||||
auto & layer = model->layers[i];
|
||||
|
||||
layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd);
|
||||
layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3*n_embd);
|
||||
|
||||
layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd);
|
||||
layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd);
|
||||
|
||||
layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
|
||||
layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
// map by name
|
||||
model->tensors["gpt_neox.layers." + std::to_string(i) + ".input_layernorm.weight"] = layer.ln_1_g;
|
||||
model->tensors["gpt_neox.layers." + std::to_string(i) + ".input_layernorm.bias"] = layer.ln_1_b;
|
||||
|
||||
model->tensors["gpt_neox.layers." + std::to_string(i) + ".attention.query_key_value.weight"] = layer.c_attn_attn_w;
|
||||
model->tensors["gpt_neox.layers." + std::to_string(i) + ".attention.query_key_value.bias"] = layer.c_attn_attn_b;
|
||||
|
||||
model->tensors["gpt_neox.layers." + std::to_string(i) + ".attention.dense.weight"] = layer.c_attn_proj_w;
|
||||
model->tensors["gpt_neox.layers." + std::to_string(i) + ".attention.dense.bias"] = layer.c_attn_proj_b;
|
||||
|
||||
model->tensors["gpt_neox.layers." + std::to_string(i) + ".post_attention_layernorm.weight"] = layer.ln_2_g;
|
||||
model->tensors["gpt_neox.layers." + std::to_string(i) + ".post_attention_layernorm.bias"] = layer.ln_2_b;
|
||||
|
||||
model->tensors["gpt_neox.layers." + std::to_string(i) + ".mlp.dense_h_to_4h.weight"] = layer.c_mlp_fc_w;
|
||||
model->tensors["gpt_neox.layers." + std::to_string(i) + ".mlp.dense_h_to_4h.bias"] = layer.c_mlp_fc_b;
|
||||
|
||||
model->tensors["gpt_neox.layers." + std::to_string(i) + ".mlp.dense_4h_to_h.weight"] = layer.c_mlp_proj_w;
|
||||
model->tensors["gpt_neox.layers." + std::to_string(i) + ".mlp.dense_4h_to_h.bias"] = layer.c_mlp_proj_b;
|
||||
}
|
||||
}
|
||||
|
||||
// key + value memory
|
||||
{
|
||||
const auto & hparams = model->hparams;
|
||||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
|
||||
const int64_t n_mem = n_layer*n_ctx;
|
||||
const int64_t n_elements = n_embd*n_mem;
|
||||
|
||||
model->memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||
model->memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||
|
||||
const size_t memory_size = ggml_nbytes(model->memory_k) + ggml_nbytes(model->memory_v);
|
||||
|
||||
printf("%s: memory_size = %8.2f MB, n_mem = %" PRId64 "\n", __func__, memory_size/1024.0/1024.0, n_mem);
|
||||
}
|
||||
|
||||
// load weights
|
||||
{
|
||||
int n_tensors = 0;
|
||||
size_t total_size = 0;
|
||||
|
||||
printf("%s: ", __func__);
|
||||
|
||||
while (true) {
|
||||
int32_t n_dims;
|
||||
int32_t length;
|
||||
int32_t ttype;
|
||||
|
||||
fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
|
||||
fin.read(reinterpret_cast<char *>(&length), sizeof(length));
|
||||
fin.read(reinterpret_cast<char *>(&ttype), sizeof(ttype));
|
||||
|
||||
if (fin.eof()) {
|
||||
break;
|
||||
}
|
||||
|
||||
int32_t nelements = 1;
|
||||
int32_t ne[2] = { 1, 1 };
|
||||
for (int i = 0; i < n_dims; ++i) {
|
||||
fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
|
||||
nelements *= ne[i];
|
||||
}
|
||||
|
||||
std::string name(length, 0);
|
||||
fin.read(&name[0], length);
|
||||
|
||||
if (model->tensors.find(name.data()) == model->tensors.end()) {
|
||||
fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data());
|
||||
return false;
|
||||
}
|
||||
|
||||
auto tensor = model->tensors[name.data()];
|
||||
if (ggml_nelements(tensor) != nelements) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
||||
return false;
|
||||
}
|
||||
|
||||
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%5d, %5d], expected [%5d, %5d]\n",
|
||||
__func__, name.data(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]);
|
||||
return false;
|
||||
}
|
||||
|
||||
// for debugging
|
||||
if (0) {
|
||||
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor));
|
||||
}
|
||||
|
||||
const size_t bpe = ggml_type_size(ggml_type(ttype));
|
||||
|
||||
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
||||
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
|
||||
return false;
|
||||
}
|
||||
|
||||
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
|
||||
|
||||
total_size += ggml_nbytes(tensor);
|
||||
if (++n_tensors % 8 == 0) {
|
||||
printf(".");
|
||||
fflush(stdout);
|
||||
}
|
||||
}
|
||||
|
||||
printf(" done\n");
|
||||
|
||||
printf("%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, n_tensors);
|
||||
}
|
||||
fin.close();
|
||||
|
||||
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
|
||||
if(config.n_gpu_layers > 0){
|
||||
size_t vram_total = 0;
|
||||
int gpu_layers = std::min(config.n_gpu_layers, model->hparams.n_layer);
|
||||
spdlog::info("Attempting to offload {} layers to GPU", gpu_layers);
|
||||
|
||||
|
||||
for(int i=0; i < gpu_layers; i++) {
|
||||
const auto & layer = model->layers[i];
|
||||
layer.c_attn_attn_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_attn_proj_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_mlp_fc_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_mlp_proj_w->backend = GGML_BACKEND_GPU;
|
||||
|
||||
#if defined(GGML_USE_CLBLAST)
|
||||
ggml_cl_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
|
||||
ggml_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
ggml_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
#else
|
||||
ggml_cuda_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
|
||||
ggml_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
#endif
|
||||
}
|
||||
|
||||
spdlog::info("{}: [GPU] total VRAM used: {} MB\n", __func__, vram_total / 1024 / 1024);
|
||||
}
|
||||
|
||||
#endif // defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::stringstream GPTNEOXModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
|
||||
|
||||
std::stringstream result;
|
||||
// tokenize the prompt
|
||||
std::vector<gpt_vocab::id> embd_inp = ::gpt_tokenize((*vocab), prompt);
|
||||
|
||||
auto END_TOKEN_ID = vocab->token_to_id["<|endoftext|>"];
|
||||
|
||||
int n_past = 0;
|
||||
|
||||
int64_t t_sample_us = 0;
|
||||
int64_t t_predict_us = 0;
|
||||
|
||||
int n_predict = std::min(max_length, model->hparams.n_ctx - (int) embd_inp.size());
|
||||
|
||||
spdlog::debug("{}: number of tokens in prompt = {}", __func__, embd_inp.size());
|
||||
|
||||
std::vector<gpt_vocab::id> embd;
|
||||
|
||||
|
||||
|
||||
// determine the required inference memory per token:
|
||||
size_t mem_per_token = 0;
|
||||
|
||||
std::vector<float> logits;
|
||||
|
||||
std::vector<gpt_vocab::id> test = {};
|
||||
|
||||
for(int i=0;i<64;i++){
|
||||
test.push_back(i);
|
||||
}
|
||||
|
||||
gpt_neox_eval((*model), config.n_threads, 0, test, logits, mem_per_token);
|
||||
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
int64_t t_prompt_us = 0;
|
||||
|
||||
int64_t t_response_us = 0;
|
||||
|
||||
for (int i = embd.size(); i < embd_inp.size() + n_predict; i++) {
|
||||
// predict
|
||||
if (embd.size() > 0) {
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
if (!gpt_neox_eval((*model), config.n_threads, n_past, embd, logits, mem_per_token)) {
|
||||
throw std::runtime_error("Failed to predict");
|
||||
}
|
||||
|
||||
t_predict_us += ggml_time_us() - t_start_us;
|
||||
}
|
||||
|
||||
n_past += embd.size();
|
||||
embd.clear();
|
||||
|
||||
if (i >= embd_inp.size()) {
|
||||
// sample next token
|
||||
const int top_k = config.top_k;
|
||||
const float top_p = config.top_p;
|
||||
const float temp = config.temp;
|
||||
|
||||
const int n_vocab = model->hparams.n_vocab;
|
||||
|
||||
gpt_vocab::id id = 0;
|
||||
|
||||
{
|
||||
const int64_t t_start_sample_us = ggml_time_us();
|
||||
|
||||
id = gpt_sample_top_k_top_p((*vocab), logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng);
|
||||
|
||||
t_sample_us += ggml_time_us() - t_start_sample_us;
|
||||
}
|
||||
|
||||
// add it to the context
|
||||
embd.push_back(id);
|
||||
|
||||
// do not actually add endoftext char to the end string
|
||||
if(id != END_TOKEN_ID){
|
||||
result << vocab->id_to_token[id].c_str();
|
||||
}
|
||||
|
||||
} else {
|
||||
// if here, it means we are still processing the input prompt
|
||||
for (int k = i; k < embd_inp.size(); k++) {
|
||||
embd.push_back(embd_inp[k]);
|
||||
|
||||
if(include_prompt){
|
||||
result << vocab->id_to_token[embd_inp[k]].c_str();
|
||||
}
|
||||
|
||||
if (embd.size() > config.n_batch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
i += embd.size() - 1;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// end of text token
|
||||
//if (embd.back() == 50256) {
|
||||
if(embd.back() == END_TOKEN_ID){
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spdlog::debug("{}: sample time = {:8.2f} ms\n", __func__, t_sample_us/1000.0f);
|
||||
spdlog::debug("{}: predict time = {:8.2f} ms / {:.2f} ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past);
|
||||
|
||||
|
||||
|
||||
return result;
|
||||
}
|
||||
|
196
src/main.cpp
Normal file
196
src/main.cpp
Normal file
@ -0,0 +1,196 @@
|
||||
#include <iostream>
|
||||
#include <cstdio>
|
||||
#include <ggml/ggml.h>
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include <turbopilot/crow_all.h>
|
||||
|
||||
#include <argparse/argparse.hpp>
|
||||
|
||||
#include "turbopilot/model.hpp"
|
||||
#include "turbopilot/starcoder.hpp"
|
||||
#include "turbopilot/gptj.hpp"
|
||||
#include "turbopilot/gptneox.hpp"
|
||||
#include "turbopilot/server.hpp"
|
||||
|
||||
|
||||
#define TURBOPILOT_VERSION "1.1.0"
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
|
||||
argparse::ArgumentParser program("turbopilot", TURBOPILOT_VERSION);
|
||||
|
||||
program.add_argument("--debug")
|
||||
.default_value(false)
|
||||
.help("Output verbose logs and timings")
|
||||
.implicit_value(true);
|
||||
|
||||
program.add_argument("-f", "--model-file")
|
||||
.help("Path to the model that turbopilot should serve")
|
||||
.required();
|
||||
|
||||
program.add_argument("-m", "--model-type")
|
||||
.help("The type of model to load. Can be codegen,starcoder,wizardcoder,stablecode")
|
||||
.default_value("codegen");
|
||||
|
||||
program.add_argument("-t", "--threads")
|
||||
.help("The number of CPU threads turbopilot is allowed to use. Defaults to 4")
|
||||
.default_value(4)
|
||||
.scan<'i', int>();
|
||||
|
||||
|
||||
program.add_argument("--ngl", "--n-gpu-layers")
|
||||
.help("The number of layers to offload to GPU")
|
||||
.default_value(0)
|
||||
.scan<'i', int>();
|
||||
|
||||
|
||||
program.add_argument("-p", "--port")
|
||||
.help("The tcp port that turbopilot should listen on")
|
||||
.default_value(18080)
|
||||
.scan<'i', int>();
|
||||
|
||||
program.add_argument("-r", "--random-seed")
|
||||
.help("Set the random seed for RNG functions")
|
||||
.default_value(-1)
|
||||
.scan<'i', int>();
|
||||
|
||||
program.add_argument("--temperature")
|
||||
.help("Set the generation temperature")
|
||||
.default_value(0.2f)
|
||||
.scan<'g', float>();
|
||||
|
||||
program.add_argument("--top-p")
|
||||
.help("Set the generation top_p")
|
||||
.default_value(0.1f)
|
||||
.scan<'g', float>();
|
||||
|
||||
program.add_argument("-b", "--batch-size")
|
||||
.help("set batch size for model completion")
|
||||
.default_value(512)
|
||||
.scan<'i',int>();
|
||||
|
||||
|
||||
program.add_argument("prompt").remaining();
|
||||
|
||||
|
||||
|
||||
try
|
||||
{
|
||||
program.parse_args(argc, argv);
|
||||
}
|
||||
catch (const std::runtime_error &err)
|
||||
{
|
||||
std::cerr << err.what() << std::endl;
|
||||
std::cerr << program;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if(program.get<bool>("--debug")){
|
||||
spdlog::set_level(spdlog::level::level_enum::debug);
|
||||
spdlog::debug("debug logging enabled");
|
||||
}
|
||||
|
||||
ggml_time_init();
|
||||
|
||||
const int64_t t_main_start_us = ggml_time_us();
|
||||
|
||||
|
||||
TurbopilotModel *model = NULL;
|
||||
|
||||
auto model_type = program.get<std::string>("--model-type");
|
||||
|
||||
ModelConfig config{};
|
||||
std::mt19937 rng(program.get<int>("--random-seed"));
|
||||
|
||||
config.n_threads = program.get<int>("--threads");
|
||||
config.temp = program.get<float>("--temperature");
|
||||
config.top_p = program.get<float>("--top-p");
|
||||
config.n_batch = program.get<int>("--batch-size");
|
||||
config.n_gpu_layers = program.get<int>("--ngl");
|
||||
|
||||
if(model_type.compare("codegen") == 0) {
|
||||
spdlog::info("Initializing GPT-J type model for '{}' model", model_type);
|
||||
model = new GPTJModel(config, rng);
|
||||
}else if(model_type.compare("starcoder") == 0 || model_type.compare("wizardcoder") == 0){
|
||||
spdlog::info("Initializing Starcoder/Wizardcoder type model for '{}' model type", model_type);
|
||||
model = new StarcoderModel(config, rng);
|
||||
}else if(model_type.compare("stablecode") == 0){
|
||||
spdlog::info("Initializing StableLM type model for '{}' model type", model_type);
|
||||
model = new GPTNEOXModel(config, rng);
|
||||
}else{
|
||||
spdlog::error("Invalid model type: {}", model_type);
|
||||
}
|
||||
|
||||
spdlog::info("Attempt to load model from {}", program.get<std::string>("--model-type"));
|
||||
int64_t t_load_us = 0;
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
auto loaded = model->load_model(program.get<std::string>("--model-file"));
|
||||
|
||||
if(!loaded){
|
||||
spdlog::error("Failed to load model");
|
||||
return -1;
|
||||
}
|
||||
|
||||
t_load_us = ggml_time_us() - t_start_us;
|
||||
|
||||
|
||||
spdlog::info("Loaded model in {:0.2f}ms", t_load_us/1000.0f);
|
||||
|
||||
|
||||
crow::SimpleApp app;
|
||||
|
||||
TBPLogger logger;
|
||||
|
||||
crow::logger::setHandler(&logger);
|
||||
|
||||
CROW_ROUTE(app, "/")([](){
|
||||
return "Hello world";
|
||||
});
|
||||
|
||||
|
||||
CROW_ROUTE(app, "/copilot_internal/v2/token")([](){
|
||||
//return "Hello world";
|
||||
|
||||
crow::json::wvalue response = {{"token","1"}, {"expires_at", static_cast<std::uint64_t>(2600000000)}, {"refresh_in",900}};
|
||||
|
||||
crow::response res;
|
||||
res.code = 200;
|
||||
res.set_header("Content-Type", "application/json");
|
||||
res.body = response.dump();
|
||||
return res;
|
||||
});
|
||||
|
||||
//huggingface code compatible endpoint
|
||||
CROW_ROUTE(app, "/api/generate").methods(crow::HTTPMethod::Post)
|
||||
([&model](const crow::request& req) {
|
||||
return handle_hf_request(model, req);
|
||||
});
|
||||
|
||||
CROW_ROUTE(app, "/v1/completions").methods(crow::HTTPMethod::Post)
|
||||
([&model](const crow::request& req) {
|
||||
return handle_openai_request(model, req);
|
||||
});
|
||||
|
||||
CROW_ROUTE(app, "/v1/engines/codegen/completions").methods(crow::HTTPMethod::Post)
|
||||
([&model](const crow::request& req) {
|
||||
return handle_openai_request(model, req);
|
||||
});
|
||||
|
||||
|
||||
CROW_ROUTE(app, "/v1/engines/copilot-codex/completions").methods(crow::HTTPMethod::Post)
|
||||
([&model](const crow::request& req) {
|
||||
return handle_openai_request(model, req);
|
||||
});
|
||||
|
||||
|
||||
app.port(program.get<int>("--port")).multithreaded().run();
|
||||
|
||||
|
||||
|
||||
free(model);
|
||||
}
|
||||
|
||||
|
110
src/server.cpp
Normal file
110
src/server.cpp
Normal file
@ -0,0 +1,110 @@
|
||||
|
||||
#include "turbopilot/server.hpp"
|
||||
#include "turbopilot/model.hpp"
|
||||
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <boost/uuid/uuid.hpp> // uuid class
|
||||
#include <boost/uuid/uuid_generators.hpp> // generators
|
||||
#include <boost/uuid/uuid_io.hpp> // streaming operators etc.
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* This function serves requests for autocompletion from crow
|
||||
*
|
||||
*/
|
||||
crow::response handle_hf_request(TurbopilotModel *model, const crow::request& req){
|
||||
|
||||
crow::json::rvalue data = crow::json::load(req.body);
|
||||
|
||||
if(!data.has("inputs")){
|
||||
crow::response res;
|
||||
res.code = 400;
|
||||
res.set_header("Content-Type", "application/json");
|
||||
res.body = "{\"message\":\"you must specify inputs field or\"}";
|
||||
return res;
|
||||
}
|
||||
|
||||
// std::string suffix = data["suffix"].s();
|
||||
int maxTokens = 200;
|
||||
if(data.has("max_tokens")){
|
||||
maxTokens = data["max_tokens"].i();
|
||||
}
|
||||
|
||||
|
||||
auto result = model->predict(data["inputs"].s(), maxTokens, false);
|
||||
|
||||
crow::json::wvalue response = {
|
||||
{"generated_text", result.str()},
|
||||
};
|
||||
|
||||
|
||||
crow::response res;
|
||||
res.code = 200;
|
||||
res.set_header("Content-Type", "application/json");
|
||||
res.body = response.dump(); //ss.str();
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* This function serves requests for autocompletion from crow
|
||||
*
|
||||
*/
|
||||
crow::response handle_openai_request(TurbopilotModel *model, const crow::request& req){
|
||||
|
||||
crow::json::rvalue data = crow::json::load(req.body);
|
||||
|
||||
if(!data.has("prompt") && !data.has("input_ids")){
|
||||
crow::response res;
|
||||
res.code = 400;
|
||||
res.set_header("Content-Type", "application/json");
|
||||
res.body = "{\"message\":\"you must specify a prompt or input_ids\"}";
|
||||
return res;
|
||||
}
|
||||
|
||||
// std::string suffix = data["suffix"].s();
|
||||
int maxTokens = 200;
|
||||
if(data.has("max_tokens")){
|
||||
maxTokens = data["max_tokens"].i();
|
||||
}
|
||||
|
||||
|
||||
auto result = model->predict(data["prompt"].s(), maxTokens, false);
|
||||
|
||||
|
||||
boost::uuids::uuid uuid = boost::uuids::random_generator()();
|
||||
|
||||
|
||||
// Generate a mock response based on the input parameters
|
||||
crow::json::wvalue choice = {
|
||||
{"text", result.str()},
|
||||
{"index",0},
|
||||
{"finish_reason", "length"},
|
||||
{"logprobs", nullptr}
|
||||
};
|
||||
crow::json::wvalue::list choices = {choice};
|
||||
|
||||
|
||||
crow::json::wvalue usage = {
|
||||
{"completion_tokens", 0},
|
||||
// {"prompt_tokens", static_cast<std::uint64_t>(embd_inp.size())},
|
||||
{"prompt_tokens", 0},
|
||||
{"total_tokens", 0}
|
||||
};
|
||||
|
||||
crow::json::wvalue response = {
|
||||
{"id", boost::lexical_cast<std::string>(uuid)},
|
||||
{"model", "codegen"},
|
||||
{"object","text_completion"},
|
||||
{"created", static_cast<std::int64_t>(std::time(nullptr))},
|
||||
{"choices", choices },
|
||||
{"usage", usage}
|
||||
};
|
||||
|
||||
crow::response res;
|
||||
res.code = 200;
|
||||
res.set_header("Content-Type", "application/json");
|
||||
|
||||
res.body = response.dump(); //ss.str();
|
||||
return res;
|
||||
}
|
834
src/starcoder.cpp
Normal file
834
src/starcoder.cpp
Normal file
@ -0,0 +1,834 @@
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
#include <turbopilot/starcoder.hpp>
|
||||
#include <ggml/ggml.h>
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#ifdef GGML_USE_CLBLAST
|
||||
#include "ggml-opencl.h"
|
||||
#endif
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
#include "ggml-cuda.h"
|
||||
#endif
|
||||
|
||||
// evaluate the transformer
|
||||
//
|
||||
// - model: the model
|
||||
// - n_threads: number of threads to use
|
||||
// - n_past: the context size so far
|
||||
// - embd_inp: the embeddings of the tokens in the context
|
||||
// - embd_w: the predicted logits for the next token
|
||||
//
|
||||
bool starcoder_eval(
|
||||
const starcoder_model & model,
|
||||
const int n_threads,
|
||||
const int n_past,
|
||||
const std::vector<gpt_vocab::id> & embd_inp,
|
||||
std::vector<float> & embd_w,
|
||||
size_t & mem_per_token) {
|
||||
const int N = embd_inp.size();
|
||||
|
||||
const auto & hparams = model.hparams;
|
||||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
const int n_head = hparams.n_head;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
|
||||
static size_t buf_size = 512u*1024*1024;
|
||||
static void * buf = malloc(buf_size);
|
||||
|
||||
// use 2 scratch buffers
|
||||
// TODO: very hacky solution - reimplement in a more elegant way
|
||||
static size_t scr0_size = 512u*1024*1024;
|
||||
static void * scr0 = malloc(scr0_size);
|
||||
|
||||
static size_t scr1_size = 512u*1024*1024;
|
||||
static void * scr1 = malloc(scr1_size);
|
||||
|
||||
if (mem_per_token > 0 && 2*mem_per_token*N > buf_size) {
|
||||
const size_t buf_size_new = 2*(mem_per_token*N); // add 10% to account for ggml object overhead
|
||||
|
||||
if(buf_size_new > buf_size){
|
||||
spdlog::debug("{}: reallocating buffer from {} to {} bytes\n", __func__, buf_size, buf_size_new);
|
||||
|
||||
// reallocate
|
||||
buf_size = buf_size_new;
|
||||
buf = realloc(buf, buf_size);
|
||||
if (buf == nullptr) {
|
||||
spdlog::error("{}: failed to allocate {} bytes\n", __func__, buf_size);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
struct ggml_init_params params = {
|
||||
/*.mem_size =*/ buf_size,
|
||||
/*.mem_buffer =*/ buf,
|
||||
/*.no_alloc =*/ false,
|
||||
};
|
||||
|
||||
|
||||
struct ggml_context * ctx0 = ggml_init(params);
|
||||
struct ggml_cgraph gf = {};
|
||||
|
||||
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
||||
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));
|
||||
|
||||
struct ggml_tensor * position = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
||||
for (int i = 0; i < N; ++i) {
|
||||
((int32_t *) position->data)[i] = n_past + i;
|
||||
}
|
||||
|
||||
// wte + wpe
|
||||
struct ggml_tensor * inpL =
|
||||
ggml_add(ctx0,
|
||||
ggml_get_rows(ctx0, model.wte, embd),
|
||||
ggml_get_rows(ctx0, model.wpe, position));
|
||||
|
||||
for (int il = 0; il < n_layer; ++il) {
|
||||
struct ggml_tensor * cur;
|
||||
|
||||
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
|
||||
|
||||
// norm
|
||||
{
|
||||
// [ 768, N]
|
||||
cur = ggml_norm(ctx0, inpL);
|
||||
|
||||
// cur = ln_1_g*cur + ln_1_b
|
||||
// [ 768, N]
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].ln_1_g, cur),
|
||||
cur),
|
||||
ggml_repeat(ctx0, model.layers[il].ln_1_b, cur));
|
||||
}
|
||||
|
||||
// attn
|
||||
// [2304, 768] - model.layers[il].c_attn_attn_w
|
||||
// [2304, 1] - model.layers[il].c_attn_attn_b
|
||||
// [ 768, N] - cur (in)
|
||||
// [2304, N] - cur (out)
|
||||
//
|
||||
// cur = attn_w*cur + attn_b
|
||||
// [2304, N]
|
||||
{
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
model.layers[il].c_attn_attn_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_attn_attn_b, cur),
|
||||
cur);
|
||||
}
|
||||
|
||||
// self-attention
|
||||
{
|
||||
struct ggml_tensor * Qcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0*sizeof(float)*n_embd);
|
||||
struct ggml_tensor * Kcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1*sizeof(float)*n_embd);
|
||||
struct ggml_tensor * Vcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2*sizeof(float)*n_embd);
|
||||
|
||||
// store key and value to memory
|
||||
if (N >= 1) {
|
||||
struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
|
||||
struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past));
|
||||
|
||||
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
|
||||
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
|
||||
}
|
||||
|
||||
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
|
||||
// [64, N, 12]
|
||||
struct ggml_tensor * Q =
|
||||
ggml_permute(ctx0,
|
||||
ggml_cpy(ctx0,
|
||||
Qcur,
|
||||
ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)),
|
||||
0, 2, 1, 3);
|
||||
|
||||
// K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3)
|
||||
// [64, n_past + N, 12]
|
||||
struct ggml_tensor * K =
|
||||
ggml_permute(ctx0,
|
||||
ggml_reshape_3d(ctx0,
|
||||
ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd),
|
||||
n_embd/n_head, n_head, n_past + N),
|
||||
0, 2, 1, 3); //TODO: need to be tiled
|
||||
|
||||
// GG: flash attention
|
||||
//struct ggml_tensor * V =
|
||||
// ggml_cpy(ctx0,
|
||||
// ggml_permute(ctx0,
|
||||
// ggml_reshape_3d(ctx0,
|
||||
// ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd),
|
||||
// n_embd/n_head, n_head, n_past + N),
|
||||
// 1, 2, 0, 3),
|
||||
// ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head));
|
||||
|
||||
//struct ggml_tensor * KQV = ggml_flash_attn(ctx0, Q, K, V, true);
|
||||
|
||||
// K * Q
|
||||
// [n_past + N, N, 12]
|
||||
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); //TODO: check if it broadcasts
|
||||
|
||||
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
||||
// [n_past + N, N, 12]
|
||||
struct ggml_tensor * KQ_scaled =
|
||||
ggml_scale_inplace(ctx0,
|
||||
KQ,
|
||||
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
|
||||
);
|
||||
|
||||
// KQ_masked = mask_past(KQ_scaled)
|
||||
// [n_past + N, N, 12]
|
||||
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
|
||||
|
||||
// KQ = soft_max(KQ_masked)
|
||||
// [n_past + N, N, 12]
|
||||
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
|
||||
|
||||
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
|
||||
// [n_past + N, 64, 12]
|
||||
struct ggml_tensor * V_trans =
|
||||
ggml_cpy(ctx0,
|
||||
ggml_permute(ctx0,
|
||||
ggml_reshape_3d(ctx0,
|
||||
ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd),
|
||||
n_embd/n_head, n_head, n_past + N),
|
||||
1, 2, 0, 3),
|
||||
ggml_new_tensor_3d(ctx0, model.memory_v->type, n_past + N, n_embd/n_head, n_head));
|
||||
|
||||
// KQV = transpose(V) * KQ_soft_max
|
||||
// [64, N, 12]
|
||||
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max);
|
||||
|
||||
// KQV_merged = KQV.permute(0, 2, 1, 3)
|
||||
// [64, 12, N]
|
||||
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
|
||||
|
||||
// cur = KQV_merged.contiguous().view(n_embd, N)
|
||||
// [768, N]
|
||||
cur = ggml_cpy(ctx0,
|
||||
KQV_merged,
|
||||
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
|
||||
}
|
||||
|
||||
// projection
|
||||
// [ 768, 768] - model.layers[il].c_attn_proj_w
|
||||
// [ 768, 1] - model.layers[il].c_attn_proj_b
|
||||
// [ 768, N] - cur (in)
|
||||
// [ 768, N] - cur (out)
|
||||
//
|
||||
// cur = proj_w*cur + proj_b
|
||||
// [768, N]
|
||||
{
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
model.layers[il].c_attn_proj_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_attn_proj_b, cur),
|
||||
cur);
|
||||
}
|
||||
|
||||
// add the input
|
||||
cur = ggml_add(ctx0, cur, inpL);
|
||||
|
||||
struct ggml_tensor * inpFF = cur;
|
||||
|
||||
ggml_set_scratch(ctx0, { 0, scr1_size, scr1, });
|
||||
|
||||
// feed-forward network
|
||||
{
|
||||
// norm
|
||||
{
|
||||
cur = ggml_norm(ctx0, inpFF);
|
||||
|
||||
// cur = ln_2_g*cur + ln_2_b
|
||||
// [ 768, N]
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].ln_2_g, cur),
|
||||
cur),
|
||||
ggml_repeat(ctx0, model.layers[il].ln_2_b, cur));
|
||||
}
|
||||
|
||||
// fully connected
|
||||
// [3072, 768] - model.layers[il].c_mlp_fc_w
|
||||
// [3072, 1] - model.layers[il].c_mlp_fc_b
|
||||
// [ 768, N] - cur (in)
|
||||
// [3072, N] - cur (out)
|
||||
//
|
||||
// cur = fc_w*cur + fc_b
|
||||
// [3072, N]
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
model.layers[il].c_mlp_fc_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur),
|
||||
cur);
|
||||
|
||||
// GELU activation
|
||||
// [3072, N]
|
||||
cur = ggml_gelu(ctx0, cur);
|
||||
|
||||
// projection
|
||||
// [ 768, 3072] - model.layers[il].c_mlp_proj_w
|
||||
// [ 768, 1] - model.layers[il].c_mlp_proj_b
|
||||
// [3072, N] - cur (in)
|
||||
// [ 768, N] - cur (out)
|
||||
//
|
||||
// cur = proj_w*cur + proj_b
|
||||
// [768, N]
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
model.layers[il].c_mlp_proj_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur),
|
||||
cur);
|
||||
}
|
||||
|
||||
// input for next layer
|
||||
inpL = ggml_add(ctx0, cur, inpFF);
|
||||
}
|
||||
|
||||
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
|
||||
|
||||
// norm
|
||||
{
|
||||
// [ 768, N]
|
||||
inpL = ggml_norm(ctx0, inpL);
|
||||
|
||||
// inpL = ln_f_g*inpL + ln_f_b
|
||||
// [ 768, N]
|
||||
inpL = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.ln_f_g, inpL),
|
||||
inpL),
|
||||
ggml_repeat(ctx0, model.ln_f_b, inpL));
|
||||
}
|
||||
|
||||
ggml_set_scratch(ctx0, { 0, 0, nullptr, });
|
||||
|
||||
// inpL = WTE * inpL
|
||||
// [ 768, 50257] - model.lm_head
|
||||
// [ 768, N] - inpL
|
||||
inpL = ggml_mul_mat(ctx0, model.lm_head, inpL);
|
||||
|
||||
// logits -> probs
|
||||
//inpL = ggml_soft_max_inplace(ctx0, inpL);
|
||||
|
||||
// run the computation
|
||||
ggml_build_forward_expand(&gf, inpL);
|
||||
ggml_graph_compute_with_ctx(ctx0, &gf, n_threads);
|
||||
|
||||
//if (n_past%100 == 0) {
|
||||
// ggml_graph_print (&gf);
|
||||
// ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
|
||||
//}
|
||||
|
||||
//embd_w.resize(n_vocab*N);
|
||||
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
|
||||
|
||||
// return result just for the last token
|
||||
embd_w.resize(n_vocab);
|
||||
memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
||||
|
||||
if (mem_per_token == 0) {
|
||||
mem_per_token = ggml_used_mem(ctx0)/N;
|
||||
}
|
||||
|
||||
spdlog::debug("{}: used mem buf={} bytes", __func__, ggml_used_mem(ctx0));
|
||||
|
||||
|
||||
ggml_free(ctx0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
StarcoderModel::~StarcoderModel(){
|
||||
ggml_free(model->ctx);
|
||||
free(model);
|
||||
free(vocab);
|
||||
}
|
||||
|
||||
bool StarcoderModel::load_model(std::string fname) {
|
||||
printf("%s: loading model from '%s'\n", __func__, fname.c_str());
|
||||
|
||||
auto fin = std::ifstream(fname, std::ios::binary);
|
||||
if (!fin) {
|
||||
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
// verify magic
|
||||
{
|
||||
uint32_t magic;
|
||||
fin.read((char *) &magic, sizeof(magic));
|
||||
if (magic != GGML_FILE_MAGIC) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// load hparams
|
||||
{
|
||||
auto & hparams = model->hparams;
|
||||
|
||||
fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
|
||||
fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
|
||||
fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd));
|
||||
fin.read((char *) &hparams.n_head, sizeof(hparams.n_head));
|
||||
fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
|
||||
fin.read((char *) &hparams.ftype, sizeof(hparams.ftype));
|
||||
|
||||
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
|
||||
|
||||
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
||||
printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
|
||||
printf("%s: n_embd = %d\n", __func__, hparams.n_embd);
|
||||
printf("%s: n_head = %d\n", __func__, hparams.n_head);
|
||||
printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
|
||||
printf("%s: ftype = %d\n", __func__, hparams.ftype);
|
||||
printf("%s: qntvr = %d\n", __func__, qntvr);
|
||||
|
||||
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
|
||||
}
|
||||
|
||||
// load vocab
|
||||
{
|
||||
int32_t n_vocab = 0;
|
||||
fin.read((char *) &n_vocab, sizeof(n_vocab));
|
||||
|
||||
if (n_vocab != model->hparams.n_vocab) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n",
|
||||
__func__, fname.c_str(), n_vocab, model->hparams.n_vocab);
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string word;
|
||||
std::vector<char> buf(128);
|
||||
|
||||
for (int i = 0; i < n_vocab; i++) {
|
||||
uint32_t len;
|
||||
fin.read((char *) &len, sizeof(len));
|
||||
|
||||
buf.resize(len);
|
||||
fin.read((char *) buf.data(), len);
|
||||
word.assign(buf.data(), len);
|
||||
|
||||
vocab->token_to_id[word] = i;
|
||||
vocab->id_to_token[i] = word;
|
||||
|
||||
// if (i < 10) fprintf(stderr, "%.s: vocab[%d] = '%s'\n", __func__, i, word.c_str());
|
||||
}
|
||||
|
||||
// Add StarChat special tokens.
|
||||
for (const std::string & token : {
|
||||
"<|system|>",
|
||||
"<|user|>",
|
||||
"<|assistant|>",
|
||||
"<|end|>",
|
||||
"<fim-prefix>",
|
||||
"<fim-middle>",
|
||||
"<fim-suffix>",
|
||||
"<fim-pad>",
|
||||
"<|end_of_turn|>"
|
||||
}) {
|
||||
if (vocab->token_to_id.find(token) != vocab->token_to_id.end()) {
|
||||
vocab->add_special_token(token);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
|
||||
// in order to save memory and also to speed up the computation
|
||||
ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model->hparams.ftype));
|
||||
if (wtype == GGML_TYPE_COUNT) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n",
|
||||
__func__, fname.c_str(), model->hparams.ftype);
|
||||
return false;
|
||||
}
|
||||
|
||||
auto & ctx = model->ctx;
|
||||
|
||||
size_t ctx_size = 0;
|
||||
|
||||
{
|
||||
const auto & hparams = model->hparams;
|
||||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
|
||||
const int head_dim = n_embd / hparams.n_head;
|
||||
const int kv_heads = hparams.n_head; // 1 if MQA else hparams.n_head
|
||||
const int kv_dim = kv_heads * head_dim;
|
||||
|
||||
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_g
|
||||
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_b
|
||||
|
||||
ctx_size += n_vocab*n_embd*ggml_type_sizef(wtype); // wte
|
||||
ctx_size += n_ctx*n_embd*ggml_type_sizef(GGML_TYPE_F32); // wpe
|
||||
ctx_size += n_vocab*n_embd*ggml_type_sizef(wtype); // lm_head
|
||||
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_g
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_b
|
||||
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_g
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_b
|
||||
|
||||
ctx_size += n_layer*((n_embd + 2*kv_dim)*n_embd*ggml_type_sizef(wtype)); // c_attn_attn_w // TODO:
|
||||
ctx_size += n_layer*( (n_embd + 2*kv_dim)*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_attn_b
|
||||
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_proj_w
|
||||
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_proj_b
|
||||
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_fc_w
|
||||
ctx_size += n_layer*( 4*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_fc_b
|
||||
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
|
||||
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
|
||||
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_k
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_v
|
||||
|
||||
ctx_size += (6 + 12*n_layer)*512; // object overhead
|
||||
|
||||
printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0));
|
||||
}
|
||||
|
||||
// create the ggml context
|
||||
{
|
||||
struct ggml_init_params params = {
|
||||
/*.mem_size =*/ ctx_size,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ false,
|
||||
};
|
||||
|
||||
model->ctx = ggml_init(params);
|
||||
if (!model->ctx) {
|
||||
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// prepare memory for the weights
|
||||
{
|
||||
const auto & hparams = model->hparams;
|
||||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
|
||||
const int head_dim = n_embd / hparams.n_head;
|
||||
const int kv_heads = hparams.n_head; // 1 if MQA else hparams.n_head
|
||||
const int kv_dim = kv_heads * head_dim;
|
||||
|
||||
model->layers.resize(n_layer);
|
||||
|
||||
model->ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
model->ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
model->wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
model->wpe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ctx);
|
||||
model->lm_head = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
|
||||
// map by name
|
||||
model->tensors["model/ln_f/g"] = model->ln_f_g;
|
||||
model->tensors["model/ln_f/b"] = model->ln_f_b;
|
||||
|
||||
model->tensors["model/wte"] = model->wte;
|
||||
model->tensors["model/wpe"] = model->wpe;
|
||||
model->tensors["model/lm_head"] = model->lm_head;
|
||||
|
||||
for (int i = 0; i < n_layer; ++i) {
|
||||
auto & layer = model->layers[i];
|
||||
|
||||
layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd + 2*kv_dim);
|
||||
layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd + 2*kv_dim);
|
||||
|
||||
layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd); //TODO: 4*n_embd = config.n_inner
|
||||
layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd);
|
||||
|
||||
layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
|
||||
layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
// map by name
|
||||
model->tensors["model/h" + std::to_string(i) + "/ln_1/g"] = layer.ln_1_g;
|
||||
model->tensors["model/h" + std::to_string(i) + "/ln_1/b"] = layer.ln_1_b;
|
||||
|
||||
model->tensors["model/h" + std::to_string(i) + "/ln_2/g"] = layer.ln_2_g;
|
||||
model->tensors["model/h" + std::to_string(i) + "/ln_2/b"] = layer.ln_2_b;
|
||||
|
||||
model->tensors["model/h" + std::to_string(i) + "/attn/c_attn/w"] = layer.c_attn_attn_w;
|
||||
model->tensors["model/h" + std::to_string(i) + "/attn/c_attn/b"] = layer.c_attn_attn_b;
|
||||
|
||||
model->tensors["model/h" + std::to_string(i) + "/attn/c_proj/w"] = layer.c_attn_proj_w;
|
||||
model->tensors["model/h" + std::to_string(i) + "/attn/c_proj/b"] = layer.c_attn_proj_b;
|
||||
|
||||
model->tensors["model/h" + std::to_string(i) + "/mlp/c_fc/w"] = layer.c_mlp_fc_w;
|
||||
model->tensors["model/h" + std::to_string(i) + "/mlp/c_fc/b"] = layer.c_mlp_fc_b;
|
||||
|
||||
model->tensors["model/h" + std::to_string(i) + "/mlp/c_proj/w"] = layer.c_mlp_proj_w;
|
||||
model->tensors["model/h" + std::to_string(i) + "/mlp/c_proj/b"] = layer.c_mlp_proj_b;
|
||||
}
|
||||
}
|
||||
|
||||
// key + value memory
|
||||
{
|
||||
const auto & hparams = model->hparams;
|
||||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
|
||||
const int n_mem = n_layer*n_ctx;
|
||||
const int n_elements = n_embd*n_mem;
|
||||
|
||||
model->memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements);
|
||||
model->memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements);
|
||||
|
||||
const size_t memory_size = ggml_nbytes(model->memory_k) + ggml_nbytes(model->memory_v);
|
||||
|
||||
printf("%s: memory size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem);
|
||||
}
|
||||
|
||||
// load weights
|
||||
{
|
||||
size_t total_size = 0;
|
||||
|
||||
bool has_lm_head = false;
|
||||
|
||||
while (true) {
|
||||
int32_t n_dims;
|
||||
int32_t length;
|
||||
int32_t ttype;
|
||||
|
||||
fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
|
||||
fin.read(reinterpret_cast<char *>(&length), sizeof(length));
|
||||
fin.read(reinterpret_cast<char *>(&ttype), sizeof(ttype));
|
||||
|
||||
if (fin.eof()) {
|
||||
break;
|
||||
}
|
||||
|
||||
int32_t nelements = 1;
|
||||
int32_t ne[2] = { 1, 1 };
|
||||
for (int i = 0; i < n_dims; ++i) {
|
||||
fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
|
||||
nelements *= ne[i];
|
||||
}
|
||||
|
||||
std::string name(length, 0);
|
||||
fin.read(&name[0], length);
|
||||
|
||||
if (model->tensors.find(name.data()) == model->tensors.end()) {
|
||||
fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data());
|
||||
return false;
|
||||
}
|
||||
|
||||
auto tensor = model->tensors[name.data()];
|
||||
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
||||
__func__, name.data(), (int) tensor->ne[0], (int) tensor->ne[1], ne[0], ne[1]);
|
||||
return false;
|
||||
}
|
||||
if (ggml_nelements(tensor) != nelements) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file. got %d, expected %d\n",
|
||||
__func__, name.data(), (int) ggml_nelements(tensor), nelements);
|
||||
return false;
|
||||
}
|
||||
|
||||
// for debugging
|
||||
if (0) {
|
||||
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor));
|
||||
}
|
||||
|
||||
const size_t bpe = ggml_type_size(ggml_type(ttype));
|
||||
|
||||
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
||||
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
|
||||
return false;
|
||||
}
|
||||
|
||||
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
|
||||
|
||||
// GPT-2 models share the WTE tensor as the LM head
|
||||
if (name == "model/wte" && has_lm_head == false) {
|
||||
memcpy(model->lm_head->data, tensor->data, ggml_nbytes(tensor));
|
||||
}
|
||||
|
||||
if (name == "model/lm_head") {
|
||||
has_lm_head = true;
|
||||
}
|
||||
|
||||
total_size += ggml_nbytes(tensor);
|
||||
}
|
||||
|
||||
printf("%s: model size = %8.2f MB\n", __func__, total_size/1024.0/1024.0);
|
||||
}
|
||||
|
||||
fin.close();
|
||||
|
||||
|
||||
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
if(config.n_gpu_layers > 0){
|
||||
size_t vram_total = 0;
|
||||
int gpu_layers = std::min(config.n_gpu_layers, model->hparams.n_layer);
|
||||
spdlog::info("Attempting to offload {} layers to GPU", gpu_layers);
|
||||
|
||||
|
||||
for(int i=0; i < gpu_layers; i++) {
|
||||
const auto & layer = model->layers[i];
|
||||
layer.c_attn_attn_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_attn_proj_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_mlp_fc_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_mlp_proj_w->backend = GGML_BACKEND_GPU;
|
||||
|
||||
#if defined(GGML_USE_CLBLAST)
|
||||
ggml_cl_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
|
||||
ggml_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
ggml_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
#else
|
||||
ggml_cuda_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
|
||||
ggml_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
#endif
|
||||
}
|
||||
|
||||
spdlog::info("{}: [GPU] total VRAM used: {} MB\n", __func__, vram_total / 1024 / 1024);
|
||||
}
|
||||
|
||||
#endif // defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
std::stringstream StarcoderModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
|
||||
|
||||
std::stringstream result;
|
||||
// tokenize the prompt
|
||||
std::vector<gpt_vocab::id> embd_inp = ::gpt_tokenize((*vocab), prompt);
|
||||
|
||||
int n_past = 0;
|
||||
|
||||
int64_t t_sample_us = 0;
|
||||
int64_t t_predict_us = 0;
|
||||
|
||||
int n_predict = std::min(max_length, model->hparams.n_ctx - (int) embd_inp.size());
|
||||
|
||||
spdlog::debug("{}: number of tokens in prompt = {}", __func__, embd_inp.size());
|
||||
|
||||
std::vector<gpt_vocab::id> embd;
|
||||
|
||||
// determine the required inference memory per token:
|
||||
size_t mem_per_token = 0;
|
||||
|
||||
std::vector<float> logits;
|
||||
std::vector<gpt_vocab::id> test = {};
|
||||
|
||||
for(int i=0;i<64;i++){
|
||||
test.push_back(i);
|
||||
}
|
||||
|
||||
spdlog::debug("{}: calculate required memory per token", __func__);
|
||||
starcoder_eval((*model), config.n_threads, 0, test, logits, mem_per_token);
|
||||
spdlog::debug("{}: mem_per_token={}", __func__, mem_per_token);
|
||||
spdlog::debug("{}: total mem needed for prompt = {}*{}={}", __func__, embd_inp.size(), mem_per_token, embd_inp.size()*mem_per_token);
|
||||
|
||||
|
||||
for (int i = embd.size(); i < embd_inp.size() + n_predict; i++) {
|
||||
// predict
|
||||
spdlog::debug("{}: process token #{}: ", __func__, i);
|
||||
|
||||
if (embd.size() > 0) {
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
if (!starcoder_eval((*model), config.n_threads, n_past, embd, logits, mem_per_token)) {
|
||||
throw std::runtime_error("Failed to predict");
|
||||
}
|
||||
|
||||
t_predict_us += ggml_time_us() - t_start_us;
|
||||
}
|
||||
|
||||
n_past += embd.size();
|
||||
embd.clear();
|
||||
|
||||
if (i >= embd_inp.size()) {
|
||||
// sample next token
|
||||
const int top_k = config.top_k;
|
||||
const float top_p = config.top_p;
|
||||
const float temp = config.temp;
|
||||
|
||||
const int n_vocab = model->hparams.n_vocab;
|
||||
|
||||
gpt_vocab::id id = 0;
|
||||
|
||||
{
|
||||
const int64_t t_start_sample_us = ggml_time_us();
|
||||
|
||||
id = gpt_sample_top_k_top_p((*vocab), logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng);
|
||||
|
||||
t_sample_us += ggml_time_us() - t_start_sample_us;
|
||||
}
|
||||
|
||||
// add it to the context
|
||||
embd.push_back(id);
|
||||
|
||||
if(id != 50256){
|
||||
result << vocab->id_to_token[id].c_str();
|
||||
}
|
||||
|
||||
} else {
|
||||
// if here, it means we are still processing the input prompt
|
||||
for (int k = i; k < embd_inp.size(); k++) {
|
||||
embd.push_back(embd_inp[k]);
|
||||
|
||||
if(include_prompt){
|
||||
result << vocab->id_to_token[embd_inp[k]].c_str();
|
||||
}
|
||||
|
||||
if (embd.size() > config.n_batch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
i += embd.size() - 1;
|
||||
}
|
||||
|
||||
// end of text token
|
||||
if (embd.back() == 50256) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
spdlog::debug("{}: sample time = {:8.2f} ms\n", __func__, t_sample_us/1000.0f);
|
||||
spdlog::debug("{}: predict time = {:8.2f} ms / {:.2f} ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past);
|
||||
|
||||
|
||||
|
||||
return result;
|
||||
}
|
65
test_codegen2.py
Normal file
65
test_codegen2.py
Normal file
@ -0,0 +1,65 @@
|
||||
#%%
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen2-1B")
|
||||
model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen2-1B", trust_remote_code=True, revision="main")
|
||||
|
||||
|
||||
#%%
|
||||
model = model.to(device="cuda")
|
||||
|
||||
#%%
|
||||
text = """
|
||||
import os
|
||||
|
||||
def post_to_pastebin"""
|
||||
input_ids = tokenizer(text, return_tensors="pt").to("cuda").input_ids
|
||||
generated_ids = model.generate(input_ids, max_length=512)
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
|
||||
|
||||
# %%
|
||||
|
||||
def format_model_input(prefix, suffix):
|
||||
return prefix + "<mask_1>" + suffix + "<|endoftext|>" + "<sep>" + "<mask_1>"
|
||||
|
||||
|
||||
prefix = """
|
||||
import os
|
||||
|
||||
def post_to_pastebin"""
|
||||
suffix = "result = post_to_pastebin(content)"
|
||||
text = format_model_input(prefix, suffix)
|
||||
input_ids = tokenizer(text, return_tensors="pt").to("cuda").input_ids
|
||||
generated_ids = model.generate(input_ids, max_length=128)
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=False))
|
||||
# %%
|
||||
def main():
|
||||
text = """
|
||||
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
|
||||
# %%
|
||||
|
||||
import os
|
||||
|
||||
def post_to_pastebin"""
|
||||
input_ids = tokenizer(text, return_tensors="pt").to("cuda").input_ids
|
||||
generated_ids = model.generate(input_ids, max_length=512)
|
||||
|
||||
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
|
||||
|
||||
# %%
|
||||
|
||||
def post_to_pastebin(content):
|
||||
input_ids = tokenizer(content, return_tensors="pt").to("cuda").input_ids
|
||||
generated_ids = model.generate(input_ids, max_length=512)
|
||||
return tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
||||
|
||||
|
||||
|
||||
|
||||
|
45
test_santa.py
Normal file
45
test_santa.py
Normal file
@ -0,0 +1,45 @@
|
||||
#%%
|
||||
import torch
|
||||
from transformers import CodeGenTokenizer, GPTJForCausalLM
|
||||
|
||||
|
||||
checkpoint = "/home/james/workspace/rafael-llm/codegen-2B-multi-gptj"
|
||||
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
||||
|
||||
tokenizer = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-multi")
|
||||
model = GPTJForCausalLM.from_pretrained(checkpoint).to(device)
|
||||
|
||||
|
||||
#model = AutoModel.from_pretrained(checkpoint, trust_remote_code=True).to(device)
|
||||
#%%
|
||||
|
||||
# define the user model
|
||||
class User:
|
||||
|
||||
|
||||
# %%
|
||||
code = """import os
|
||||
import requests
|
||||
|
||||
#send the json data to pastebin
|
||||
def send_data"""
|
||||
inputs = tokenizer.encode(code, return_tensors="pt").to(device)
|
||||
outputs = model.generate(inputs, max_length=200)
|
||||
response = tokenizer.decode(outputs[0])
|
||||
|
||||
print(response)
|
||||
|
||||
import requests
|
||||
|
||||
#send the json data to pastebin
|
||||
def send_data(data):
|
||||
url = "http://pastebin.com/api_post.php"
|
||||
data = {"api_dev_key": "<api_key>", "api_user_key": "<user_key>", "api_content": data}
|
||||
response = requests.post(url, data=data).text
|
||||
return response
|
||||
|
||||
|
||||
|
||||
# %%
|
||||
code
|
||||
# %%
|
94
turbopilot.code-workspace
Normal file
94
turbopilot.code-workspace
Normal file
@ -0,0 +1,94 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"path": "."
|
||||
},
|
||||
{
|
||||
"path": "extern/ggml"
|
||||
},
|
||||
{
|
||||
"path": "../../pymicrocosm"
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"files.associations": {
|
||||
"array": "cpp",
|
||||
"atomic": "cpp",
|
||||
"bit": "cpp",
|
||||
"*.tcc": "cpp",
|
||||
"bitset": "cpp",
|
||||
"cctype": "cpp",
|
||||
"chrono": "cpp",
|
||||
"clocale": "cpp",
|
||||
"cmath": "cpp",
|
||||
"compare": "cpp",
|
||||
"concepts": "cpp",
|
||||
"cstdint": "cpp",
|
||||
"cstdio": "cpp",
|
||||
"cstdlib": "cpp",
|
||||
"cstring": "cpp",
|
||||
"ctime": "cpp",
|
||||
"cwchar": "cpp",
|
||||
"cwctype": "cpp",
|
||||
"deque": "cpp",
|
||||
"map": "cpp",
|
||||
"unordered_map": "cpp",
|
||||
"vector": "cpp",
|
||||
"exception": "cpp",
|
||||
"fstream": "cpp",
|
||||
"functional": "cpp",
|
||||
"initializer_list": "cpp",
|
||||
"iosfwd": "cpp",
|
||||
"istream": "cpp",
|
||||
"limits": "cpp",
|
||||
"memory": "cpp",
|
||||
"new": "cpp",
|
||||
"numbers": "cpp",
|
||||
"numeric": "cpp",
|
||||
"ostream": "cpp",
|
||||
"ratio": "cpp",
|
||||
"regex": "cpp",
|
||||
"semaphore": "cpp",
|
||||
"sstream": "cpp",
|
||||
"stdexcept": "cpp",
|
||||
"stop_token": "cpp",
|
||||
"streambuf": "cpp",
|
||||
"string": "cpp",
|
||||
"string_view": "cpp",
|
||||
"system_error": "cpp",
|
||||
"thread": "cpp",
|
||||
"type_traits": "cpp",
|
||||
"tuple": "cpp",
|
||||
"typeinfo": "cpp",
|
||||
"utility": "cpp",
|
||||
"csignal": "cpp",
|
||||
"cstdarg": "cpp",
|
||||
"cstddef": "cpp",
|
||||
"any": "cpp",
|
||||
"strstream": "cpp",
|
||||
"charconv": "cpp",
|
||||
"cinttypes": "cpp",
|
||||
"codecvt": "cpp",
|
||||
"complex": "cpp",
|
||||
"condition_variable": "cpp",
|
||||
"coroutine": "cpp",
|
||||
"list": "cpp",
|
||||
"set": "cpp",
|
||||
"algorithm": "cpp",
|
||||
"iterator": "cpp",
|
||||
"memory_resource": "cpp",
|
||||
"optional": "cpp",
|
||||
"random": "cpp",
|
||||
"source_location": "cpp",
|
||||
"future": "cpp",
|
||||
"iomanip": "cpp",
|
||||
"iostream": "cpp",
|
||||
"mutex": "cpp",
|
||||
"span": "cpp",
|
||||
"cfenv": "cpp",
|
||||
"typeindex": "cpp",
|
||||
"variant": "cpp",
|
||||
"unordered_set": "cpp"
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user