mirror of
https://github.com/ravenscroftj/turbopilot.git
synced 2024-09-28 19:56:07 +00:00
Compare commits
64 Commits
c164deb042
...
a00de2a332
Author | SHA1 | Date | |
---|---|---|---|
|
a00de2a332 | ||
|
215a69b5af | ||
|
91639b8fc0 | ||
|
0b408510f4 | ||
|
604183380d | ||
|
88683abe50 | ||
|
326e76c9bb | ||
|
23c0a3d19e | ||
|
31bb33c731 | ||
|
d4989b543c | ||
|
e9dc6a304a | ||
|
97a0377cd6 | ||
|
6d26c9b064 | ||
|
63b554793d | ||
|
0cf7a9c341 | ||
|
356a83c5fd | ||
|
a5517b0fcd | ||
|
8fa70e1518 | ||
|
b79ab46b50 | ||
|
4a47251822 | ||
|
b2b4a1480f | ||
|
5f7155a314 | ||
|
77cde95cb9 | ||
|
bea7ebdb34 | ||
|
812bbea9d7 | ||
|
08e8834390 | ||
|
25680e64d8 | ||
|
dca25d8456 | ||
|
1f6f84a783 | ||
|
0183b30502 | ||
|
b465eae818 | ||
|
e8adff5339 | ||
|
f12dacaa15 | ||
|
0b0b914f92 | ||
|
b21dd0799d | ||
|
c73c196364 | ||
|
30834e3121 | ||
|
39c3182a3a | ||
|
2abdcabf02 | ||
|
6877542ad8 | ||
|
5b561f7b7e | ||
|
e85492d8ba | ||
|
f840ea0b73 | ||
|
3e37c4bb7c | ||
|
20b1460bd8 | ||
|
2308b9ae21 | ||
|
c4e57e0aab | ||
|
dc81abbc52 | ||
|
ae2d505a2f | ||
|
143155dac3 | ||
|
596c835939 | ||
|
6b0a25cb71 | ||
|
227501188c | ||
|
f69a8f65d4 | ||
|
22f2993db4 | ||
|
e8beac34e7 | ||
|
ccf425f019 | ||
|
cceee41f79 | ||
|
113544400a | ||
|
f0627cd567 | ||
|
2d617b458e | ||
|
0c1fc1a04e | ||
|
e0adf0519b | ||
|
ef1402a1a8 |
6
.github/workflows/build-commit.yml
vendored
6
.github/workflows/build-commit.yml
vendored
@ -25,7 +25,6 @@ jobs:
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
run: |
|
||||
brew update
|
||||
brew install cmake boost asio
|
||||
- name: Build
|
||||
id: make_build
|
||||
@ -92,7 +91,7 @@ jobs:
|
||||
submodules: true
|
||||
|
||||
- name: Install Dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -yq libboost-dev libasio-dev
|
||||
run: sudo apt-get update && sudo apt-get install -yq libboost-dev libasio-dev libboost-thread-dev
|
||||
|
||||
- name: Install OpenBlas
|
||||
if: ${{ matrix.build == 'avx2-openblas' }}
|
||||
@ -207,6 +206,7 @@ jobs:
|
||||
$msvc = $(join-path $vcdir $('VC\Tools\MSVC\'+$(gc -raw $(join-path $vcdir 'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt')).Trim()))
|
||||
$lib = $(join-path $msvc 'bin\Hostx64\x64\lib.exe')
|
||||
& $lib /machine:x64 "/def:${env:RUNNER_TEMP}/openblas/lib/libopenblas.def" "/out:${env:RUNNER_TEMP}/openblas/lib/openblas.lib" /name:openblas.dll
|
||||
|
||||
- name: Build
|
||||
id: cmake_build
|
||||
env:
|
||||
@ -214,7 +214,7 @@ jobs:
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. ${{ matrix.defines }}
|
||||
cmake .. ${{ matrix.defines }} -DBoost_LIBRARYDIRS=${{ steps.install-boost.outputs.BOOST_ROOT }}/lib
|
||||
cmake --build . --config Release --target turbopilot
|
||||
|
||||
# - name: Add libopenblas.dll
|
||||
|
67
.github/workflows/docker-image.yml
vendored
67
.github/workflows/docker-image.yml
vendored
@ -16,9 +16,52 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
- {tag: "", dockerfile: "./Dockerfile.default", platforms: "linux/amd64,linux/arm64"}
|
||||
- {tag: "-cuda11", dockerfile: "./Dockerfile.cuda11", platforms: "linux/amd64"}
|
||||
- {tag: "-cuda12", dockerfile: "./Dockerfile.cuda12", platforms: "linux/amd64"}
|
||||
- tag:
|
||||
dockerfile: ./Dockerfile.default
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build_base: ubuntu:22.04
|
||||
runtime_base: ubuntu:22.04
|
||||
|
||||
|
||||
- tag: -openblas
|
||||
dockerfile: ./Dockerfile.default
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build_base: ubuntu:22.04
|
||||
runtime_base: ubuntu:22.04
|
||||
extra_deps: libopenblas-dev
|
||||
cmake_args: -DGGML_OPENBLAS=On
|
||||
|
||||
|
||||
- tag: -cuda11-7
|
||||
dockerfile: ./Dockerfile.default
|
||||
platforms: linux/amd64
|
||||
build_base: nvidia/cuda:11.7.1-devel-ubuntu22.04
|
||||
runtime_base: nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu22.04
|
||||
cmake_args: -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc
|
||||
|
||||
- tag: -cuda12-0
|
||||
dockerfile: ./Dockerfile.default
|
||||
platforms: linux/amd64
|
||||
build_base: nvidia/cuda:12.0.0-devel-ubuntu22.04
|
||||
runtime_base: nvidia/cuda:12.0.0-runtime-ubuntu22.04
|
||||
cmake_args: -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc
|
||||
|
||||
- tag: -cuda12-2
|
||||
dockerfile: ./Dockerfile.default
|
||||
platforms: linux/amd64
|
||||
build_base: nvidia/cuda:12.2.0-devel-ubuntu22.04
|
||||
runtime_base: nvidia/cuda:12.2.0-runtime-ubuntu22.04
|
||||
cmake_args: -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc
|
||||
|
||||
# - tag: -clblast
|
||||
# dockerfile: ./Dockerfile.default
|
||||
# platforms: linux/amd64
|
||||
# build_base: ubuntu:22.04
|
||||
# runtime_base: ubuntu:22.04
|
||||
# runtime_deps: libclblast1
|
||||
# extra_deps: libclblast-dev
|
||||
# cmake_args: -DGGML_CLBLAST=On
|
||||
|
||||
|
||||
steps:
|
||||
|
||||
@ -45,7 +88,7 @@ jobs:
|
||||
password: ${{ secrets.GH_TOKEN }}
|
||||
|
||||
- name: Build and push incremental
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/build-push-action@v4.1.1
|
||||
if: (!startsWith(github.ref, 'refs/tags/'))
|
||||
with:
|
||||
file: ${{matrix.config.dockerfile}}
|
||||
@ -53,6 +96,12 @@ jobs:
|
||||
tags: ghcr.io/ravenscroftj/turbopilot:nightly${{matrix.config.tag}}-${{ github.sha }}
|
||||
context: ${{github.workspace}}
|
||||
platforms: ${{matrix.config.platforms}}
|
||||
build-args: |
|
||||
EXTRA_DEPS=${{matrix.config.extra_deps}}
|
||||
CMAKE_ARGS=${{matrix.config.cmake_args}}
|
||||
BUILD_BASE=${{matrix.config.build_base}}
|
||||
RUNTIME_BASE=${{matrix.config.runtime_base}}
|
||||
RUNTIME_DEPS=${{matrix.config.runtime_deps}}
|
||||
|
||||
|
||||
- name: Build and push release (Main Latest Build)
|
||||
@ -64,6 +113,11 @@ jobs:
|
||||
tags: ghcr.io/ravenscroftj/turbopilot:${{ github.ref_name }}, ghcr.io/ravenscroftj/turbopilot:latest
|
||||
context: ${{github.workspace}}
|
||||
platforms: ${{matrix.config.platforms}}
|
||||
build-args: |
|
||||
EXTRA_DEPS="${{matrix.config.extra_deps}}"
|
||||
CMAKE_ARGS="${{matrix.config.cmake_args}}"
|
||||
BUILD_BASE="${{matrix.config.build_base}}"
|
||||
RUNTIME_BASE="${{matrix.config.runtime_base}}"
|
||||
|
||||
|
||||
- name: Build and push release (Accelerated Builds)
|
||||
@ -75,3 +129,8 @@ jobs:
|
||||
tags: ghcr.io/ravenscroftj/turbopilot:${{ github.ref_name }}${{matrix.config.tag}}
|
||||
context: ${{github.workspace}}
|
||||
platforms: ${{matrix.config.platforms}}
|
||||
build-args: |
|
||||
EXTRA_DEPS="${{matrix.config.extra_deps}}"
|
||||
CMAKE_ARGS="${{matrix.config.cmake_args}}"
|
||||
BUILD_BASE="${{matrix.config.build_base}}"
|
||||
RUNTIME_BASE="${{matrix.config.runtime_base}}"
|
||||
|
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
build/
|
||||
models/
|
5
.gitmodules
vendored
5
.gitmodules
vendored
@ -1,9 +1,12 @@
|
||||
[submodule "ggml"]
|
||||
path = extern/ggml
|
||||
url = git@github.com:ravenscroftj/ggml.git
|
||||
url = git@github.com:ggerganov/ggml.git
|
||||
[submodule "extern/argparse"]
|
||||
path = extern/argparse
|
||||
url = https://github.com/p-ranav/argparse.git
|
||||
[submodule "extern/sbdlog"]
|
||||
path = extern/spdlog
|
||||
url = https://github.com/gabime/spdlog.git
|
||||
[submodule "extern/ggml"]
|
||||
path = extern/ggml
|
||||
url = https://github.com/ggerganov/ggml
|
||||
|
20
.vscode/c_cpp_properties.json
vendored
Normal file
20
.vscode/c_cpp_properties.json
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
{
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Linux",
|
||||
"includePath": [
|
||||
"${workspaceFolder}/**",
|
||||
"${workspaceFolder}/extern/crow/include",
|
||||
"${workspaceFolder}/include",
|
||||
"${workspaceFolder}/include"
|
||||
],
|
||||
"defines": [],
|
||||
"compilerPath": "/usr/bin/gcc",
|
||||
"cStandard": "c17",
|
||||
"cppStandard": "gnu++17",
|
||||
"intelliSenseMode": "linux-gcc-x64",
|
||||
"configurationProvider": "ms-vscode.cmake-tools"
|
||||
}
|
||||
],
|
||||
"version": 4
|
||||
}
|
70
.vscode/launch.json
vendored
Normal file
70
.vscode/launch.json
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "(gdb) Launch TBP",
|
||||
"type": "cppdbg",
|
||||
"request": "launch",
|
||||
"program": "/home/james/workspace/rafael-llm/turbopilot/build/bin/turbopilot",
|
||||
"args": [
|
||||
//TBP ARGS
|
||||
"-v",
|
||||
"-f",
|
||||
"/home/james/Downloads/replit-code-v1-3b-q4_0.bin",
|
||||
"-m",
|
||||
"replit",
|
||||
],
|
||||
"stopAtEntry": false,
|
||||
"cwd": "${workspaceFolder}",
|
||||
"environment": [],
|
||||
"externalConsole": false,
|
||||
"MIMode": "gdb",
|
||||
"setupCommands": [
|
||||
{
|
||||
"description": "Enable pretty-printing for gdb",
|
||||
"text": "-enable-pretty-printing",
|
||||
"ignoreFailures": true
|
||||
},
|
||||
{
|
||||
"description": "Set Disassembly Flavor to Intel",
|
||||
"text": "-gdb-set disassembly-flavor intel",
|
||||
"ignoreFailures": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "(gdb) Launch Replut",
|
||||
"type": "cppdbg",
|
||||
"request": "launch",
|
||||
"program": "/home/james/workspace/rafael-llm/turbopilot/extern/ggml/build/bin/replit",
|
||||
"args": [
|
||||
// REPLIT ARGS
|
||||
"-m",
|
||||
"/home/james/Downloads/replit-code-v1-3b-q4_0.bin",
|
||||
"-f",
|
||||
"/home/james/workspace/rafael-llm/turbopilot/test.txt"
|
||||
],
|
||||
"stopAtEntry": false,
|
||||
"cwd": "${workspaceFolder}",
|
||||
"environment": [],
|
||||
"externalConsole": false,
|
||||
"MIMode": "gdb",
|
||||
"setupCommands": [
|
||||
{
|
||||
"description": "Enable pretty-printing for gdb",
|
||||
"text": "-enable-pretty-printing",
|
||||
"ignoreFailures": true
|
||||
},
|
||||
{
|
||||
"description": "Set Disassembly Flavor to Intel",
|
||||
"text": "-gdb-set disassembly-flavor intel",
|
||||
"ignoreFailures": true
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
]
|
||||
}
|
28
.vscode/tasks.json
vendored
Normal file
28
.vscode/tasks.json
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
{
|
||||
"tasks": [
|
||||
{
|
||||
"type": "cppbuild",
|
||||
"label": "C/C++: g++ build active file",
|
||||
"command": "/usr/bin/g++",
|
||||
"args": [
|
||||
"-fdiagnostics-color=always",
|
||||
"-g",
|
||||
"${file}",
|
||||
"-o",
|
||||
"${fileDirname}/${fileBasenameNoExtension}"
|
||||
],
|
||||
"options": {
|
||||
"cwd": "${fileDirname}"
|
||||
},
|
||||
"problemMatcher": [
|
||||
"$gcc"
|
||||
],
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
"detail": "Task generated by Debugger."
|
||||
}
|
||||
],
|
||||
"version": "2.0.0"
|
||||
}
|
@ -1,5 +1,15 @@
|
||||
cmake_minimum_required (VERSION 3.0)
|
||||
project(turbopilot VERSION 0.1.0)
|
||||
set(CMAKE_CXX_STANDARD 11)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED true)
|
||||
set(CMAKE_C_STANDARD 11)
|
||||
set(CMAKE_C_STANDARD_REQUIRED true)
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
|
||||
# option(BUILD_SHARED_LIBS "Build using shared libraries" OFF)
|
||||
|
||||
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS "on")
|
||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
||||
@ -32,6 +42,9 @@ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
add_subdirectory(extern/ggml)
|
||||
add_subdirectory(extern/argparse)
|
||||
add_subdirectory(extern/spdlog)
|
||||
|
@ -1,39 +0,0 @@
|
||||
FROM nvidia/cuda:11.7.1-devel-ubuntu22.04 AS build
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# inlude kitware apt repo to allow us to grab latest cmake
|
||||
RUN apt-get update && apt-get install ca-certificates gpg wget
|
||||
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null
|
||||
RUN echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ jammy main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null
|
||||
|
||||
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev
|
||||
|
||||
|
||||
ADD ./ /turbopilot
|
||||
|
||||
RUN mkdir /turbopilot/build
|
||||
|
||||
WORKDIR /turbopilot/build
|
||||
|
||||
RUN cmake -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc ..
|
||||
RUN make turbopilot
|
||||
|
||||
FROM nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu22.04 AS runtime
|
||||
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=build /turbopilot/build/bin/turbopilot /app/turbopilot
|
||||
|
||||
ENV THREADS=4
|
||||
|
||||
ENV MODEL="/models/codegen-2B-multi-ggml-4bit-quant.bin"
|
||||
|
||||
ENV BATCHSIZE=64
|
||||
|
||||
COPY ./run.sh /app/
|
||||
|
||||
EXPOSE 18080
|
||||
|
||||
CMD /app/run.sh
|
@ -1,37 +0,0 @@
|
||||
FROM nvidia/cuda:12.2.0-devel-ubuntu20.04 AS build
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# inlude kitware apt repo to allow us to grab latest cmake
|
||||
RUN apt-get update && apt-get install ca-certificates gpg wget
|
||||
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null
|
||||
RUN echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ focal main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null
|
||||
|
||||
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev
|
||||
|
||||
ADD ./ /turbopilot
|
||||
|
||||
RUN mkdir /turbopilot/build
|
||||
|
||||
WORKDIR /turbopilot/build
|
||||
|
||||
RUN cmake -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc ..
|
||||
RUN make turbopilot
|
||||
|
||||
FROM nvidia/cuda:12.2.0-runtime-ubuntu20.04 AS runtime
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=build /turbopilot/build/bin/turbopilot /app/turbopilot
|
||||
|
||||
ENV THREADS=4
|
||||
|
||||
ENV MODEL="/models/codegen-2B-multi-ggml-4bit-quant.bin"
|
||||
|
||||
ENV BATCHSIZE=64
|
||||
|
||||
COPY ./run.sh /app/
|
||||
|
||||
EXPOSE 18080
|
||||
|
||||
CMD /app/run.sh
|
@ -1,17 +1,37 @@
|
||||
FROM alpine AS build
|
||||
ARG BUILD_BASE="ubuntu:22.04"
|
||||
ARG RUNTIME_BASE="ubuntu:22.04"
|
||||
|
||||
RUN apk add --update alpine-sdk boost-dev cmake asio-dev
|
||||
FROM ${BUILD_BASE} AS build
|
||||
|
||||
ADD ./ /turbopilot/
|
||||
ARG EXTRA_DEPS=""
|
||||
ARG CMAKE_ARGS=""
|
||||
|
||||
RUN echo "CMAKE_ARGS: ${CMAKE_ARGS}"
|
||||
RUN echo "EXTRA_DEPS: ${EXTRA_DEPS}"
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# inlude kitware apt repo to allow us to grab latest cmake
|
||||
RUN apt-get update && apt-get install -y ca-certificates gpg wget
|
||||
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null
|
||||
RUN echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ jammy main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null
|
||||
|
||||
RUN apt-get update && apt-get install -y build-essential cmake libboost-dev libboost-thread-dev ${EXTRA_DEPS}
|
||||
|
||||
ADD ./ /turbopilot
|
||||
|
||||
RUN mkdir /turbopilot/build
|
||||
|
||||
WORKDIR /turbopilot/build
|
||||
|
||||
RUN cmake -D GGML_STATIC=ON ..
|
||||
RUN cmake .. ${CMAKE_ARGS}
|
||||
RUN make turbopilot
|
||||
|
||||
FROM alpine AS runtime
|
||||
FROM ${RUNTIME_BASE} AS runtime
|
||||
|
||||
ARG RUNTIME_DEPS=""
|
||||
|
||||
RUN if [[ -z "${RUNTIME_DEPS}" ]] ; then echo "No runtime libs required" ; else apt-get update && apt-get install -y ${RUNTIME_DEPS} ; fi
|
||||
|
||||
|
||||
WORKDIR /app
|
||||
|
@ -6,9 +6,9 @@
|
||||
|
||||
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|
||||
|---------------------|-----------------|-----------------|-----------------|
|
||||
| StarCoder | ~3GiB | [:arrow_down:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/blob/main/stablecode-instruct-alpha-3b.ggmlv1.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/) |
|
||||
|
||||
| StableCode | ~3GiB | [:arrow_down:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/resolve/main/stablecode-instruct-alpha-3b.ggmlv1.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/stablecode-instruct-alpha-3b-GGML/) |
|
||||
|
||||
To run in Turbopilot set model type `-m stablecode`
|
||||
|
||||
## "Coder" family models
|
||||
|
||||
@ -23,7 +23,7 @@ This model is primarily trained on Python, Java and Javscript.
|
||||
|
||||
| Model Name | RAM Requirement | Direct Download | HF Project Link |
|
||||
|---------------------|-----------------|-----------------|-----------------|
|
||||
| StarCoder | ~2GiB | [:arrow_down:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/resolve/main/santacoder-q4_0.bin) | [:hugs:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/) |
|
||||
| SantaCoder | ~2GiB | [:arrow_down:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/resolve/main/santacoder-q4_0.bin) | [:hugs:](https://huggingface.co/mike-ravkine/gpt_bigcode-santacoder-GGML/) |
|
||||
|
||||
To run in Turbopilot set model type `-m starcoder`
|
||||
|
||||
@ -39,7 +39,7 @@ Even when quantized, WizardCoder is a large model that takes up a significant am
|
||||
|---------------------|-----------------|-----------------|-----------------|
|
||||
| WizardCoder | ~12GiB | [:arrow_down:](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/resolve/main/WizardCoder-15B-1.0.ggmlv3.q4_0.bin) | [:hugs:](https://huggingface.co/TheBloke/WizardCoder-15B-1.0-GGML/) |
|
||||
|
||||
To run in Turbopilot set model type `-m starcoder`
|
||||
To run in Turbopilot set model type `-m wizardcoder`
|
||||
|
||||
|
||||
### StarCoder (Released 4/5/2023)
|
||||
|
2
extern/ggml
vendored
2
extern/ggml
vendored
@ -1 +1 @@
|
||||
Subproject commit f6365c0605ac86c6ab106cda0e8d6650e54097a7
|
||||
Subproject commit 1a5d5f331de1d3c7ace40d86fe2373021a42f9ce
|
@ -71,7 +71,7 @@ public:
|
||||
}
|
||||
virtual ~GPTJModel();
|
||||
bool load_model(std::string path);
|
||||
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
|
||||
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
|
||||
|
||||
private:
|
||||
gptj_model *model = NULL;
|
||||
|
@ -75,7 +75,7 @@ public:
|
||||
}
|
||||
virtual ~GPTNEOXModel();
|
||||
bool load_model(std::string path);
|
||||
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
|
||||
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
|
||||
|
||||
private:
|
||||
gpt_neox_model *model = NULL;
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <random>
|
||||
#include <mutex>
|
||||
|
||||
typedef void (*offload_func_t)(struct ggml_tensor * tensor);
|
||||
void ggml_nop(struct ggml_tensor * tensor);
|
||||
@ -55,11 +56,16 @@ public:
|
||||
rng(rng)
|
||||
{}
|
||||
virtual bool load_model(std::string model_path) = 0;
|
||||
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt) = 0;
|
||||
std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
|
||||
void lock();
|
||||
void unlock();
|
||||
|
||||
|
||||
protected:
|
||||
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt) = 0;
|
||||
ModelConfig config;
|
||||
std::mt19937 &rng;
|
||||
std::mutex model_lock;
|
||||
};
|
||||
|
||||
|
||||
|
@ -2,6 +2,8 @@
|
||||
#define __TURBOPILOT_SERVER_H
|
||||
|
||||
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#include "turbopilot/model.hpp"
|
||||
|
||||
#include "crow_all.h"
|
||||
@ -10,6 +12,46 @@ crow::response handle_openai_request(TurbopilotModel *model, const crow::request
|
||||
|
||||
crow::response handle_hf_request(TurbopilotModel *model, const crow::request& req);
|
||||
|
||||
class TBPLogger : public crow::ILogHandler {
|
||||
public:
|
||||
TBPLogger() {}
|
||||
void log(std::string message, crow::LogLevel crow_level) {
|
||||
// "message" doesn't contain the timestamp and loglevel
|
||||
// prefix the default logger does and it doesn't end
|
||||
// in a newline.
|
||||
|
||||
spdlog::level::level_enum level = spdlog::level::info;
|
||||
|
||||
switch(crow_level){
|
||||
case crow::LogLevel::Critical:
|
||||
level = spdlog::level::critical;
|
||||
break;
|
||||
|
||||
case crow::LogLevel::Error:
|
||||
level = spdlog::level::err;
|
||||
break;
|
||||
|
||||
case crow::LogLevel::Warning:
|
||||
level = spdlog::level::warn;
|
||||
break;
|
||||
|
||||
case crow::LogLevel::Info:
|
||||
level = spdlog::level::info;
|
||||
break;
|
||||
|
||||
case crow::LogLevel::Debug:
|
||||
level = spdlog::level::debug;
|
||||
break;
|
||||
|
||||
default:
|
||||
// if case is not a known value, assume the worst
|
||||
level = spdlog::level::critical;
|
||||
}
|
||||
|
||||
spdlog::log(level, message);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
#endif // __TURBOPILOT_SERVER_H
|
||||
|
||||
|
@ -68,7 +68,7 @@ public:
|
||||
}
|
||||
virtual ~StarcoderModel();
|
||||
bool load_model(std::string path);
|
||||
virtual std::stringstream predict(std::string prompt, int max_length, bool include_prompt);
|
||||
virtual std::stringstream predict_impl(std::string prompt, int max_length, bool include_prompt);
|
||||
|
||||
private:
|
||||
starcoder_model *model = NULL;
|
||||
|
7
run.sh
7
run.sh
@ -1,3 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
/app/turbopilot -t $THREADS -m $MODEL_TYPE -f $MODEL
|
||||
if [ -z "$GPU_LAYERS" ]; then
|
||||
/app/turbopilot -t $THREADS -m $MODEL_TYPE -f $MODEL
|
||||
else
|
||||
/app/turbopilot -t $THREADS -m $MODEL_TYPE -f $MODEL --ngl $GPU_LAYERS
|
||||
fi
|
@ -1,6 +1,7 @@
|
||||
set(TURBOPILOT_TARGET turbopilot)
|
||||
|
||||
find_package(Boost REQUIRED)
|
||||
find_package(Boost COMPONENTS thread system REQUIRED)
|
||||
|
||||
include_directories(${Boost_INCLUDE_DIRS})
|
||||
|
||||
add_executable(${TURBOPILOT_TARGET}
|
||||
@ -16,6 +17,9 @@ add_executable(${TURBOPILOT_TARGET}
|
||||
../include/turbopilot/starcoder.hpp
|
||||
)
|
||||
|
||||
#set(THREADS_PREFER_PTHREAD_FLAG TRUE)
|
||||
#find_package(Threads REQUIRED)
|
||||
|
||||
|
||||
target_include_directories(${TURBOPILOT_TARGET} PRIVATE
|
||||
../include
|
||||
@ -23,8 +27,6 @@ target_include_directories(${TURBOPILOT_TARGET} PRIVATE
|
||||
../extern/crow/include
|
||||
)
|
||||
|
||||
#target_compile_features(${TURBOPILOT_TARGET} PRIVATE cxx_std_11)
|
||||
|
||||
target_link_libraries(${TURBOPILOT_TARGET} PRIVATE ggml argparse)
|
||||
|
||||
|
||||
#target_link_libraries(${TURBOPILOT_TARGET} PRIVATE spdlog::spdlog_header_only)
|
||||
target_link_libraries(${TURBOPILOT_TARGET} PRIVATE ggml argparse)
|
@ -4,6 +4,22 @@
|
||||
#include <cmath>
|
||||
#include <random>
|
||||
|
||||
|
||||
void TurbopilotModel::lock(){
|
||||
this->model_lock.lock();
|
||||
}
|
||||
|
||||
void TurbopilotModel::unlock(){
|
||||
this->model_lock.unlock();
|
||||
}
|
||||
|
||||
std::stringstream TurbopilotModel::predict(std::string prompt, int max_length, bool include_prompt){
|
||||
lock();
|
||||
auto result = predict_impl(prompt, max_length, include_prompt);
|
||||
unlock();
|
||||
return result;
|
||||
}
|
||||
|
||||
void llama_nop(struct ggml_tensor * tensor) { // don't offload by default
|
||||
(void) tensor;
|
||||
}
|
||||
@ -163,4 +179,6 @@ gpt_vocab::id gpt_sample_top_k_top_p(
|
||||
int idx = dist(rng);
|
||||
|
||||
return logits_id[idx].second;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
54
src/gptj.cpp
54
src/gptj.cpp
@ -6,6 +6,14 @@
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
|
||||
#ifdef GGML_USE_CLBLAST
|
||||
#include "ggml-opencl.h"
|
||||
#endif
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
#include "ggml-cuda.h"
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||
#endif
|
||||
@ -455,6 +463,9 @@ bool GPTJModel::load_model(std::string fname) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// key + value memory
|
||||
{
|
||||
const auto & hparams = model->hparams;
|
||||
@ -553,10 +564,51 @@ bool GPTJModel::load_model(std::string fname) {
|
||||
|
||||
fin.close();
|
||||
|
||||
|
||||
|
||||
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
if(config.n_gpu_layers > 0){
|
||||
size_t vram_total = 0;
|
||||
int gpu_layers = std::min(config.n_gpu_layers, model->hparams.n_layer);
|
||||
spdlog::info("Attempting to offload {} layers to GPU", gpu_layers);
|
||||
|
||||
for(int i=0; i < gpu_layers; i++) {
|
||||
const auto & layer = model->layers[i];
|
||||
layer.c_attn_q_proj_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_attn_k_proj_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_attn_v_proj_w->backend = GGML_BACKEND_GPU;
|
||||
|
||||
layer.c_attn_proj_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_mlp_fc_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_mlp_proj_w->backend = GGML_BACKEND_GPU;
|
||||
|
||||
#if defined(GGML_USE_CLBLAST)
|
||||
ggml_cl_transform_tensor(layer.c_attn_q_proj_w->data,layer.c_attn_q_proj_w); vram_total += ggml_nbytes(layer.c_attn_q_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_attn_k_proj_w->data,layer.c_attn_k_proj_w); vram_total += ggml_nbytes(layer.c_attn_k_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_attn_v_proj_w->data,layer.c_attn_v_proj_w); vram_total += ggml_nbytes(layer.c_attn_v_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
ggml_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
#else
|
||||
ggml_cuda_transform_tensor(layer.c_attn_q_proj_w->data,layer.c_attn_q_proj_w); vram_total += ggml_nbytes(layer.c_attn_q_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_attn_k_proj_w->data,layer.c_attn_k_proj_w); vram_total += ggml_nbytes(layer.c_attn_k_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_attn_v_proj_w->data,layer.c_attn_v_proj_w); vram_total += ggml_nbytes(layer.c_attn_v_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
#endif
|
||||
}
|
||||
|
||||
spdlog::info("{}: [GPU] total VRAM used: {} MB\n", __func__, vram_total / 1024 / 1024);
|
||||
}
|
||||
|
||||
#endif // defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::stringstream GPTJModel::predict(std::string prompt, int max_length, bool include_prompt) {
|
||||
std::stringstream GPTJModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
|
||||
|
||||
std::stringstream result;
|
||||
// tokenize the prompt
|
||||
|
@ -99,6 +99,7 @@ bool gpt_neox_eval(
|
||||
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
|
||||
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
|
||||
|
||||
|
||||
// reallocate
|
||||
buf_size = buf_size_new;
|
||||
buf = realloc(buf, buf_size);
|
||||
@ -106,6 +107,8 @@ bool gpt_neox_eval(
|
||||
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
|
||||
return false;
|
||||
}
|
||||
|
||||
spdlog::debug("{}: reallocating context buffer {} -> now {} bytes of tokens in prompt = {}", __func__, buf_size, buf_size_new);
|
||||
}
|
||||
|
||||
struct ggml_init_params params = {
|
||||
@ -291,6 +294,7 @@ bool gpt_neox_eval(
|
||||
// ggml_graph_print (&gf);
|
||||
// ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
|
||||
//}
|
||||
|
||||
|
||||
//embd_w.resize(n_vocab*N);
|
||||
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
|
||||
@ -301,7 +305,9 @@ bool gpt_neox_eval(
|
||||
|
||||
if (mem_per_token == 0) {
|
||||
mem_per_token = ggml_used_mem(ctx0)/N;
|
||||
|
||||
}
|
||||
spdlog::debug("used_mem = {}\n", ggml_used_mem(ctx0));
|
||||
//printf("used_mem = %zu\n", ggml_used_mem(ctx0));
|
||||
|
||||
ggml_free(ctx0);
|
||||
@ -618,8 +624,6 @@ bool GPTNEOXModel::load_model(std::string fname) {
|
||||
|
||||
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
printf("inside ggml clblast check\n");
|
||||
|
||||
|
||||
if(config.n_gpu_layers > 0){
|
||||
size_t vram_total = 0;
|
||||
@ -647,7 +651,7 @@ bool GPTNEOXModel::load_model(std::string fname) {
|
||||
#endif
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: [GPU] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
||||
spdlog::info("{}: [GPU] total VRAM used: {} MB\n", __func__, vram_total / 1024 / 1024);
|
||||
}
|
||||
|
||||
#endif // defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
@ -655,7 +659,7 @@ bool GPTNEOXModel::load_model(std::string fname) {
|
||||
return true;
|
||||
}
|
||||
|
||||
std::stringstream GPTNEOXModel::predict(std::string prompt, int max_length, bool include_prompt) {
|
||||
std::stringstream GPTNEOXModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
|
||||
|
||||
std::stringstream result;
|
||||
// tokenize the prompt
|
||||
@ -674,6 +678,8 @@ std::stringstream GPTNEOXModel::predict(std::string prompt, int max_length, bool
|
||||
|
||||
std::vector<gpt_vocab::id> embd;
|
||||
|
||||
|
||||
|
||||
// determine the required inference memory per token:
|
||||
size_t mem_per_token = 0;
|
||||
|
||||
@ -760,3 +766,4 @@ std::stringstream GPTNEOXModel::predict(std::string prompt, int max_length, bool
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
14
src/main.cpp
14
src/main.cpp
@ -67,6 +67,11 @@ int main(int argc, char **argv)
|
||||
.default_value(0.1f)
|
||||
.scan<'g', float>();
|
||||
|
||||
program.add_argument("-b", "--batch-size")
|
||||
.help("set batch size for model completion")
|
||||
.default_value(512)
|
||||
.scan<'i',int>();
|
||||
|
||||
|
||||
program.add_argument("prompt").remaining();
|
||||
|
||||
@ -103,6 +108,7 @@ int main(int argc, char **argv)
|
||||
config.n_threads = program.get<int>("--threads");
|
||||
config.temp = program.get<float>("--temperature");
|
||||
config.top_p = program.get<float>("--top-p");
|
||||
config.n_batch = program.get<int>("--batch-size");
|
||||
config.n_gpu_layers = program.get<int>("--ngl");
|
||||
|
||||
if(model_type.compare("codegen") == 0) {
|
||||
@ -129,16 +135,22 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
t_load_us = ggml_time_us() - t_start_us;
|
||||
|
||||
|
||||
spdlog::info("Loaded model in {:0.2f}ms", t_load_us/1000.0f);
|
||||
|
||||
|
||||
crow::SimpleApp app;
|
||||
|
||||
TBPLogger logger;
|
||||
|
||||
crow::logger::setHandler(&logger);
|
||||
|
||||
CROW_ROUTE(app, "/")([](){
|
||||
return "Hello world";
|
||||
});
|
||||
|
||||
|
||||
CROW_ROUTE(app, "/copilot_internal/v2/token")([](){
|
||||
//return "Hello world";
|
||||
|
||||
@ -176,6 +188,8 @@ int main(int argc, char **argv)
|
||||
|
||||
app.port(program.get<int>("--port")).multithreaded().run();
|
||||
|
||||
|
||||
|
||||
free(model);
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,6 @@ crow::response handle_hf_request(TurbopilotModel *model, const crow::request& re
|
||||
crow::json::wvalue response = {
|
||||
{"generated_text", result.str()},
|
||||
};
|
||||
|
||||
|
||||
|
||||
crow::response res;
|
||||
|
@ -50,13 +50,13 @@ bool starcoder_eval(
|
||||
|
||||
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
|
||||
const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead
|
||||
//printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new);
|
||||
spdlog::debug("{}: reallocating buffer from {} to {} bytes\n", __func__, buf_size, buf_size_new);
|
||||
|
||||
// reallocate
|
||||
buf_size = buf_size_new;
|
||||
buf = realloc(buf, buf_size);
|
||||
if (buf == nullptr) {
|
||||
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
|
||||
spdlog::error("{}: failed to allocate {} bytes\n", __func__, buf_size);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -686,8 +686,6 @@ bool StarcoderModel::load_model(std::string fname) {
|
||||
|
||||
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
printf("inside ggml clblast check\n");
|
||||
|
||||
if(config.n_gpu_layers > 0){
|
||||
size_t vram_total = 0;
|
||||
int gpu_layers = std::min(config.n_gpu_layers, model->hparams.n_layer);
|
||||
@ -714,7 +712,7 @@ bool StarcoderModel::load_model(std::string fname) {
|
||||
#endif
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: [GPU] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
||||
spdlog::info("{}: [GPU] total VRAM used: {} MB\n", __func__, vram_total / 1024 / 1024);
|
||||
}
|
||||
|
||||
#endif // defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
@ -724,7 +722,7 @@ bool StarcoderModel::load_model(std::string fname) {
|
||||
}
|
||||
|
||||
|
||||
std::stringstream StarcoderModel::predict(std::string prompt, int max_length, bool include_prompt) {
|
||||
std::stringstream StarcoderModel::predict_impl(std::string prompt, int max_length, bool include_prompt) {
|
||||
|
||||
std::stringstream result;
|
||||
// tokenize the prompt
|
||||
|
65
test_codegen2.py
Normal file
65
test_codegen2.py
Normal file
@ -0,0 +1,65 @@
|
||||
#%%
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen2-1B")
|
||||
model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen2-1B", trust_remote_code=True, revision="main")
|
||||
|
||||
|
||||
#%%
|
||||
model = model.to(device="cuda")
|
||||
|
||||
#%%
|
||||
text = """
|
||||
import os
|
||||
|
||||
def post_to_pastebin"""
|
||||
input_ids = tokenizer(text, return_tensors="pt").to("cuda").input_ids
|
||||
generated_ids = model.generate(input_ids, max_length=512)
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
|
||||
|
||||
# %%
|
||||
|
||||
def format_model_input(prefix, suffix):
|
||||
return prefix + "<mask_1>" + suffix + "<|endoftext|>" + "<sep>" + "<mask_1>"
|
||||
|
||||
|
||||
prefix = """
|
||||
import os
|
||||
|
||||
def post_to_pastebin"""
|
||||
suffix = "result = post_to_pastebin(content)"
|
||||
text = format_model_input(prefix, suffix)
|
||||
input_ids = tokenizer(text, return_tensors="pt").to("cuda").input_ids
|
||||
generated_ids = model.generate(input_ids, max_length=128)
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=False))
|
||||
# %%
|
||||
def main():
|
||||
text = """
|
||||
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
|
||||
# %%
|
||||
|
||||
import os
|
||||
|
||||
def post_to_pastebin"""
|
||||
input_ids = tokenizer(text, return_tensors="pt").to("cuda").input_ids
|
||||
generated_ids = model.generate(input_ids, max_length=512)
|
||||
|
||||
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
|
||||
|
||||
# %%
|
||||
|
||||
def post_to_pastebin(content):
|
||||
input_ids = tokenizer(content, return_tensors="pt").to("cuda").input_ids
|
||||
generated_ids = model.generate(input_ids, max_length=512)
|
||||
return tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
||||
|
||||
|
||||
|
||||
|
||||
|
45
test_santa.py
Normal file
45
test_santa.py
Normal file
@ -0,0 +1,45 @@
|
||||
#%%
|
||||
import torch
|
||||
from transformers import CodeGenTokenizer, GPTJForCausalLM
|
||||
|
||||
|
||||
checkpoint = "/home/james/workspace/rafael-llm/codegen-2B-multi-gptj"
|
||||
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
||||
|
||||
tokenizer = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-multi")
|
||||
model = GPTJForCausalLM.from_pretrained(checkpoint).to(device)
|
||||
|
||||
|
||||
#model = AutoModel.from_pretrained(checkpoint, trust_remote_code=True).to(device)
|
||||
#%%
|
||||
|
||||
# define the user model
|
||||
class User:
|
||||
|
||||
|
||||
# %%
|
||||
code = """import os
|
||||
import requests
|
||||
|
||||
#send the json data to pastebin
|
||||
def send_data"""
|
||||
inputs = tokenizer.encode(code, return_tensors="pt").to(device)
|
||||
outputs = model.generate(inputs, max_length=200)
|
||||
response = tokenizer.decode(outputs[0])
|
||||
|
||||
print(response)
|
||||
|
||||
import requests
|
||||
|
||||
#send the json data to pastebin
|
||||
def send_data(data):
|
||||
url = "http://pastebin.com/api_post.php"
|
||||
data = {"api_dev_key": "<api_key>", "api_user_key": "<user_key>", "api_content": data}
|
||||
response = requests.post(url, data=data).text
|
||||
return response
|
||||
|
||||
|
||||
|
||||
# %%
|
||||
code
|
||||
# %%
|
94
turbopilot.code-workspace
Normal file
94
turbopilot.code-workspace
Normal file
@ -0,0 +1,94 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"path": "."
|
||||
},
|
||||
{
|
||||
"path": "extern/ggml"
|
||||
},
|
||||
{
|
||||
"path": "../../pymicrocosm"
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"files.associations": {
|
||||
"array": "cpp",
|
||||
"atomic": "cpp",
|
||||
"bit": "cpp",
|
||||
"*.tcc": "cpp",
|
||||
"bitset": "cpp",
|
||||
"cctype": "cpp",
|
||||
"chrono": "cpp",
|
||||
"clocale": "cpp",
|
||||
"cmath": "cpp",
|
||||
"compare": "cpp",
|
||||
"concepts": "cpp",
|
||||
"cstdint": "cpp",
|
||||
"cstdio": "cpp",
|
||||
"cstdlib": "cpp",
|
||||
"cstring": "cpp",
|
||||
"ctime": "cpp",
|
||||
"cwchar": "cpp",
|
||||
"cwctype": "cpp",
|
||||
"deque": "cpp",
|
||||
"map": "cpp",
|
||||
"unordered_map": "cpp",
|
||||
"vector": "cpp",
|
||||
"exception": "cpp",
|
||||
"fstream": "cpp",
|
||||
"functional": "cpp",
|
||||
"initializer_list": "cpp",
|
||||
"iosfwd": "cpp",
|
||||
"istream": "cpp",
|
||||
"limits": "cpp",
|
||||
"memory": "cpp",
|
||||
"new": "cpp",
|
||||
"numbers": "cpp",
|
||||
"numeric": "cpp",
|
||||
"ostream": "cpp",
|
||||
"ratio": "cpp",
|
||||
"regex": "cpp",
|
||||
"semaphore": "cpp",
|
||||
"sstream": "cpp",
|
||||
"stdexcept": "cpp",
|
||||
"stop_token": "cpp",
|
||||
"streambuf": "cpp",
|
||||
"string": "cpp",
|
||||
"string_view": "cpp",
|
||||
"system_error": "cpp",
|
||||
"thread": "cpp",
|
||||
"type_traits": "cpp",
|
||||
"tuple": "cpp",
|
||||
"typeinfo": "cpp",
|
||||
"utility": "cpp",
|
||||
"csignal": "cpp",
|
||||
"cstdarg": "cpp",
|
||||
"cstddef": "cpp",
|
||||
"any": "cpp",
|
||||
"strstream": "cpp",
|
||||
"charconv": "cpp",
|
||||
"cinttypes": "cpp",
|
||||
"codecvt": "cpp",
|
||||
"complex": "cpp",
|
||||
"condition_variable": "cpp",
|
||||
"coroutine": "cpp",
|
||||
"list": "cpp",
|
||||
"set": "cpp",
|
||||
"algorithm": "cpp",
|
||||
"iterator": "cpp",
|
||||
"memory_resource": "cpp",
|
||||
"optional": "cpp",
|
||||
"random": "cpp",
|
||||
"source_location": "cpp",
|
||||
"future": "cpp",
|
||||
"iomanip": "cpp",
|
||||
"iostream": "cpp",
|
||||
"mutex": "cpp",
|
||||
"span": "cpp",
|
||||
"cfenv": "cpp",
|
||||
"typeindex": "cpp",
|
||||
"variant": "cpp",
|
||||
"unordered_set": "cpp"
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user