mirror of
https://github.com/ravenscroftj/turbopilot.git
synced 2024-10-01 01:06:01 -04:00
Merge pull request #55 from ravenscroftj/feature/gpu_layers
WIP: Integrate more direct GPU support
This commit is contained in:
commit
2b27760a7f
16
.github/workflows/docker-image.yml
vendored
16
.github/workflows/docker-image.yml
vendored
@ -53,14 +53,14 @@ jobs:
|
||||
runtime_base: nvidia/cuda:12.2.0-runtime-ubuntu22.04
|
||||
cmake_args: -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc
|
||||
|
||||
- tag: -clblast
|
||||
dockerfile: ./Dockerfile.default
|
||||
platforms: linux/amd64
|
||||
build_base: ubuntu:22.04
|
||||
runtime_base: ubuntu:22.04
|
||||
runtime_deps: libclblast1
|
||||
extra_deps: libclblast-dev
|
||||
cmake_args: -DGGML_CLBLAST=On
|
||||
# - tag: -clblast
|
||||
# dockerfile: ./Dockerfile.default
|
||||
# platforms: linux/amd64
|
||||
# build_base: ubuntu:22.04
|
||||
# runtime_base: ubuntu:22.04
|
||||
# runtime_deps: libclblast1
|
||||
# extra_deps: libclblast-dev
|
||||
# cmake_args: -DGGML_CLBLAST=On
|
||||
|
||||
|
||||
steps:
|
||||
|
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
build/
|
||||
models/
|
5
.gitmodules
vendored
5
.gitmodules
vendored
@ -1,9 +1,12 @@
|
||||
[submodule "ggml"]
|
||||
path = extern/ggml
|
||||
url = git@github.com:ravenscroftj/ggml.git
|
||||
url = git@github.com:ggerganov/ggml.git
|
||||
[submodule "extern/argparse"]
|
||||
path = extern/argparse
|
||||
url = https://github.com/p-ranav/argparse.git
|
||||
[submodule "extern/sbdlog"]
|
||||
path = extern/spdlog
|
||||
url = https://github.com/gabime/spdlog.git
|
||||
[submodule "extern/ggml"]
|
||||
path = extern/ggml
|
||||
url = https://github.com/ggerganov/ggml
|
||||
|
20
.vscode/c_cpp_properties.json
vendored
Normal file
20
.vscode/c_cpp_properties.json
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
{
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Linux",
|
||||
"includePath": [
|
||||
"${workspaceFolder}/**",
|
||||
"${workspaceFolder}/extern/crow/include",
|
||||
"${workspaceFolder}/include",
|
||||
"${workspaceFolder}/include"
|
||||
],
|
||||
"defines": [],
|
||||
"compilerPath": "/usr/bin/gcc",
|
||||
"cStandard": "c17",
|
||||
"cppStandard": "gnu++17",
|
||||
"intelliSenseMode": "linux-gcc-x64",
|
||||
"configurationProvider": "ms-vscode.cmake-tools"
|
||||
}
|
||||
],
|
||||
"version": 4
|
||||
}
|
70
.vscode/launch.json
vendored
Normal file
70
.vscode/launch.json
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "(gdb) Launch TBP",
|
||||
"type": "cppdbg",
|
||||
"request": "launch",
|
||||
"program": "/home/james/workspace/rafael-llm/turbopilot/build/bin/turbopilot",
|
||||
"args": [
|
||||
//TBP ARGS
|
||||
"-v",
|
||||
"-f",
|
||||
"/home/james/Downloads/replit-code-v1-3b-q4_0.bin",
|
||||
"-m",
|
||||
"replit",
|
||||
],
|
||||
"stopAtEntry": false,
|
||||
"cwd": "${workspaceFolder}",
|
||||
"environment": [],
|
||||
"externalConsole": false,
|
||||
"MIMode": "gdb",
|
||||
"setupCommands": [
|
||||
{
|
||||
"description": "Enable pretty-printing for gdb",
|
||||
"text": "-enable-pretty-printing",
|
||||
"ignoreFailures": true
|
||||
},
|
||||
{
|
||||
"description": "Set Disassembly Flavor to Intel",
|
||||
"text": "-gdb-set disassembly-flavor intel",
|
||||
"ignoreFailures": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "(gdb) Launch Replut",
|
||||
"type": "cppdbg",
|
||||
"request": "launch",
|
||||
"program": "/home/james/workspace/rafael-llm/turbopilot/extern/ggml/build/bin/replit",
|
||||
"args": [
|
||||
// REPLIT ARGS
|
||||
"-m",
|
||||
"/home/james/Downloads/replit-code-v1-3b-q4_0.bin",
|
||||
"-f",
|
||||
"/home/james/workspace/rafael-llm/turbopilot/test.txt"
|
||||
],
|
||||
"stopAtEntry": false,
|
||||
"cwd": "${workspaceFolder}",
|
||||
"environment": [],
|
||||
"externalConsole": false,
|
||||
"MIMode": "gdb",
|
||||
"setupCommands": [
|
||||
{
|
||||
"description": "Enable pretty-printing for gdb",
|
||||
"text": "-enable-pretty-printing",
|
||||
"ignoreFailures": true
|
||||
},
|
||||
{
|
||||
"description": "Set Disassembly Flavor to Intel",
|
||||
"text": "-gdb-set disassembly-flavor intel",
|
||||
"ignoreFailures": true
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
]
|
||||
}
|
28
.vscode/tasks.json
vendored
Normal file
28
.vscode/tasks.json
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
{
|
||||
"tasks": [
|
||||
{
|
||||
"type": "cppbuild",
|
||||
"label": "C/C++: g++ build active file",
|
||||
"command": "/usr/bin/g++",
|
||||
"args": [
|
||||
"-fdiagnostics-color=always",
|
||||
"-g",
|
||||
"${file}",
|
||||
"-o",
|
||||
"${fileDirname}/${fileBasenameNoExtension}"
|
||||
],
|
||||
"options": {
|
||||
"cwd": "${fileDirname}"
|
||||
},
|
||||
"problemMatcher": [
|
||||
"$gcc"
|
||||
],
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
"detail": "Task generated by Debugger."
|
||||
}
|
||||
],
|
||||
"version": "2.0.0"
|
||||
}
|
@ -15,6 +15,11 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS "on")
|
||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
||||
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib")
|
||||
|
||||
option(GGML_CLBLAST "ggml: use clBLAST" OFF)
|
||||
option(GGML_CUBLAS "ggml: use cuBLAS" OFF)
|
||||
|
||||
|
||||
|
||||
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
|
||||
message(STATUS "ARM detected")
|
||||
if (MSVC)
|
||||
@ -48,12 +53,20 @@ if (GGML_STATIC)
|
||||
SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
|
||||
SET(BUILD_SHARED_LIBS OFF)
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "-static")
|
||||
|
||||
# if(GGML_OPENBLAS)
|
||||
# set(BLA_STATIC ON)
|
||||
# endif()
|
||||
endif()
|
||||
|
||||
if (GGML_CUBLAS)
|
||||
cmake_minimum_required(VERSION 3.17)
|
||||
|
||||
find_package(CUDAToolkit)
|
||||
if (CUDAToolkit_FOUND)
|
||||
add_compile_definitions(GGML_USE_CUBLAS)
|
||||
else()
|
||||
message(WARNING "cuBLAS not found")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
add_subdirectory(src)
|
||||
|
||||
|
2
extern/ggml
vendored
2
extern/ggml
vendored
@ -1 +1 @@
|
||||
Subproject commit f6365c0605ac86c6ab106cda0e8d6650e54097a7
|
||||
Subproject commit 1a5d5f331de1d3c7ace40d86fe2373021a42f9ce
|
@ -44,6 +44,7 @@ struct ModelConfig
|
||||
int32_t seed = -1; // RNG seed
|
||||
int32_t n_ctx = 512; // context size
|
||||
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
|
||||
int32_t n_gpu_layers = 0;
|
||||
};
|
||||
|
||||
class TurbopilotModel
|
||||
@ -67,4 +68,5 @@ protected:
|
||||
std::mutex model_lock;
|
||||
};
|
||||
|
||||
#endif //__TURBOPILOT_MODEL_H
|
||||
|
||||
#endif //__TURBOPILOT_MODEL_H
|
||||
|
7
run.sh
7
run.sh
@ -1,3 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
/app/turbopilot -t $THREADS -m $MODEL_TYPE -f $MODEL
|
||||
if [ -z "$GPU_LAYERS" ]; then
|
||||
/app/turbopilot -t $THREADS -m $MODEL_TYPE -f $MODEL
|
||||
else
|
||||
/app/turbopilot -t $THREADS -m $MODEL_TYPE -f $MODEL --ngl $GPU_LAYERS
|
||||
fi
|
52
src/gptj.cpp
52
src/gptj.cpp
@ -6,6 +6,14 @@
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
|
||||
#ifdef GGML_USE_CLBLAST
|
||||
#include "ggml-opencl.h"
|
||||
#endif
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
#include "ggml-cuda.h"
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||
#endif
|
||||
@ -455,6 +463,9 @@ bool GPTJModel::load_model(std::string fname) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// key + value memory
|
||||
{
|
||||
const auto & hparams = model->hparams;
|
||||
@ -553,6 +564,47 @@ bool GPTJModel::load_model(std::string fname) {
|
||||
|
||||
fin.close();
|
||||
|
||||
|
||||
|
||||
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
if(config.n_gpu_layers > 0){
|
||||
size_t vram_total = 0;
|
||||
int gpu_layers = std::min(config.n_gpu_layers, model->hparams.n_layer);
|
||||
spdlog::info("Attempting to offload {} layers to GPU", gpu_layers);
|
||||
|
||||
for(int i=0; i < gpu_layers; i++) {
|
||||
const auto & layer = model->layers[i];
|
||||
layer.c_attn_q_proj_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_attn_k_proj_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_attn_v_proj_w->backend = GGML_BACKEND_GPU;
|
||||
|
||||
layer.c_attn_proj_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_mlp_fc_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_mlp_proj_w->backend = GGML_BACKEND_GPU;
|
||||
|
||||
#if defined(GGML_USE_CLBLAST)
|
||||
ggml_cl_transform_tensor(layer.c_attn_q_proj_w->data,layer.c_attn_q_proj_w); vram_total += ggml_nbytes(layer.c_attn_q_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_attn_k_proj_w->data,layer.c_attn_k_proj_w); vram_total += ggml_nbytes(layer.c_attn_k_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_attn_v_proj_w->data,layer.c_attn_v_proj_w); vram_total += ggml_nbytes(layer.c_attn_v_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
ggml_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
#else
|
||||
ggml_cuda_transform_tensor(layer.c_attn_q_proj_w->data,layer.c_attn_q_proj_w); vram_total += ggml_nbytes(layer.c_attn_q_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_attn_k_proj_w->data,layer.c_attn_k_proj_w); vram_total += ggml_nbytes(layer.c_attn_k_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_attn_v_proj_w->data,layer.c_attn_v_proj_w); vram_total += ggml_nbytes(layer.c_attn_v_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
#endif
|
||||
}
|
||||
|
||||
spdlog::info("{}: [GPU] total VRAM used: {} MB\n", __func__, vram_total / 1024 / 1024);
|
||||
}
|
||||
|
||||
#endif // defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,13 @@
|
||||
|
||||
#include <ggml/ggml.h>
|
||||
|
||||
#ifdef GGML_USE_CLBLAST
|
||||
#include "ggml-opencl.h"
|
||||
#endif
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
#include "ggml-cuda.h"
|
||||
#endif
|
||||
|
||||
#include <cinttypes>
|
||||
|
||||
#include <iostream>
|
||||
@ -50,6 +57,7 @@ ggml_tensor * gpt_neox_ff(
|
||||
}
|
||||
|
||||
|
||||
|
||||
// evaluate the transformer
|
||||
//
|
||||
// - model: the model
|
||||
@ -612,9 +620,42 @@ bool GPTNEOXModel::load_model(std::string fname) {
|
||||
|
||||
printf("%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, n_tensors);
|
||||
}
|
||||
|
||||
fin.close();
|
||||
|
||||
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
|
||||
if(config.n_gpu_layers > 0){
|
||||
size_t vram_total = 0;
|
||||
int gpu_layers = std::min(config.n_gpu_layers, model->hparams.n_layer);
|
||||
spdlog::info("Attempting to offload {} layers to GPU", gpu_layers);
|
||||
|
||||
|
||||
for(int i=0; i < gpu_layers; i++) {
|
||||
const auto & layer = model->layers[i];
|
||||
layer.c_attn_attn_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_attn_proj_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_mlp_fc_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_mlp_proj_w->backend = GGML_BACKEND_GPU;
|
||||
|
||||
#if defined(GGML_USE_CLBLAST)
|
||||
ggml_cl_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
|
||||
ggml_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
ggml_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
#else
|
||||
ggml_cuda_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
|
||||
ggml_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
#endif
|
||||
}
|
||||
|
||||
spdlog::info("{}: [GPU] total VRAM used: {} MB\n", __func__, vram_total / 1024 / 1024);
|
||||
}
|
||||
|
||||
#endif // defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
12
src/main.cpp
12
src/main.cpp
@ -40,6 +40,13 @@ int main(int argc, char **argv)
|
||||
.default_value(4)
|
||||
.scan<'i', int>();
|
||||
|
||||
|
||||
program.add_argument("--ngl", "--n-gpu-layers")
|
||||
.help("The number of layers to offload to GPU")
|
||||
.default_value(0)
|
||||
.scan<'i', int>();
|
||||
|
||||
|
||||
program.add_argument("-p", "--port")
|
||||
.help("The tcp port that turbopilot should listen on")
|
||||
.default_value(18080)
|
||||
@ -102,6 +109,7 @@ int main(int argc, char **argv)
|
||||
config.temp = program.get<float>("--temperature");
|
||||
config.top_p = program.get<float>("--top-p");
|
||||
config.n_batch = program.get<int>("--batch-size");
|
||||
config.n_gpu_layers = program.get<int>("--ngl");
|
||||
|
||||
if(model_type.compare("codegen") == 0) {
|
||||
spdlog::info("Initializing GPT-J type model for '{}' model", model_type);
|
||||
@ -183,4 +191,6 @@ int main(int argc, char **argv)
|
||||
|
||||
|
||||
free(model);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -5,6 +5,12 @@
|
||||
#include <ggml/ggml.h>
|
||||
#include <spdlog/spdlog.h>
|
||||
|
||||
#ifdef GGML_USE_CLBLAST
|
||||
#include "ggml-opencl.h"
|
||||
#endif
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
#include "ggml-cuda.h"
|
||||
#endif
|
||||
|
||||
// evaluate the transformer
|
||||
//
|
||||
@ -36,10 +42,10 @@ bool starcoder_eval(
|
||||
|
||||
// use 2 scratch buffers
|
||||
// TODO: very hacky solution - reimplement in a more elegant way
|
||||
static size_t scr0_size = 256u*1024*1024;
|
||||
static size_t scr0_size = 512u*1024*1024;
|
||||
static void * scr0 = malloc(scr0_size);
|
||||
|
||||
static size_t scr1_size = 256u*1024*1024;
|
||||
static size_t scr1_size = 512u*1024*1024;
|
||||
static void * scr1 = malloc(scr1_size);
|
||||
|
||||
if (mem_per_token > 0 && mem_per_token*N > buf_size) {
|
||||
@ -677,6 +683,41 @@ bool StarcoderModel::load_model(std::string fname) {
|
||||
|
||||
fin.close();
|
||||
|
||||
|
||||
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
if(config.n_gpu_layers > 0){
|
||||
size_t vram_total = 0;
|
||||
int gpu_layers = std::min(config.n_gpu_layers, model->hparams.n_layer);
|
||||
spdlog::info("Attempting to offload {} layers to GPU", gpu_layers);
|
||||
|
||||
|
||||
for(int i=0; i < gpu_layers; i++) {
|
||||
const auto & layer = model->layers[i];
|
||||
layer.c_attn_attn_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_attn_proj_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_mlp_fc_w->backend = GGML_BACKEND_GPU;
|
||||
layer.c_mlp_proj_w->backend = GGML_BACKEND_GPU;
|
||||
|
||||
#if defined(GGML_USE_CLBLAST)
|
||||
ggml_cl_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
|
||||
ggml_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
ggml_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
ggml_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
#else
|
||||
ggml_cuda_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
|
||||
ggml_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
ggml_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
#endif
|
||||
}
|
||||
|
||||
spdlog::info("{}: [GPU] total VRAM used: {} MB\n", __func__, vram_total / 1024 / 1024);
|
||||
}
|
||||
|
||||
#endif // defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
65
test_codegen2.py
Normal file
65
test_codegen2.py
Normal file
@ -0,0 +1,65 @@
|
||||
#%%
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen2-1B")
|
||||
model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen2-1B", trust_remote_code=True, revision="main")
|
||||
|
||||
|
||||
#%%
|
||||
model = model.to(device="cuda")
|
||||
|
||||
#%%
|
||||
text = """
|
||||
import os
|
||||
|
||||
def post_to_pastebin"""
|
||||
input_ids = tokenizer(text, return_tensors="pt").to("cuda").input_ids
|
||||
generated_ids = model.generate(input_ids, max_length=512)
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
|
||||
|
||||
# %%
|
||||
|
||||
def format_model_input(prefix, suffix):
|
||||
return prefix + "<mask_1>" + suffix + "<|endoftext|>" + "<sep>" + "<mask_1>"
|
||||
|
||||
|
||||
prefix = """
|
||||
import os
|
||||
|
||||
def post_to_pastebin"""
|
||||
suffix = "result = post_to_pastebin(content)"
|
||||
text = format_model_input(prefix, suffix)
|
||||
input_ids = tokenizer(text, return_tensors="pt").to("cuda").input_ids
|
||||
generated_ids = model.generate(input_ids, max_length=128)
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=False))
|
||||
# %%
|
||||
def main():
|
||||
text = """
|
||||
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
|
||||
# %%
|
||||
|
||||
import os
|
||||
|
||||
def post_to_pastebin"""
|
||||
input_ids = tokenizer(text, return_tensors="pt").to("cuda").input_ids
|
||||
generated_ids = model.generate(input_ids, max_length=512)
|
||||
|
||||
|
||||
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
|
||||
|
||||
# %%
|
||||
|
||||
def post_to_pastebin(content):
|
||||
input_ids = tokenizer(content, return_tensors="pt").to("cuda").input_ids
|
||||
generated_ids = model.generate(input_ids, max_length=512)
|
||||
return tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
||||
|
||||
|
||||
|
||||
|
||||
|
45
test_santa.py
Normal file
45
test_santa.py
Normal file
@ -0,0 +1,45 @@
|
||||
#%%
|
||||
import torch
|
||||
from transformers import CodeGenTokenizer, GPTJForCausalLM
|
||||
|
||||
|
||||
checkpoint = "/home/james/workspace/rafael-llm/codegen-2B-multi-gptj"
|
||||
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
||||
|
||||
tokenizer = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-multi")
|
||||
model = GPTJForCausalLM.from_pretrained(checkpoint).to(device)
|
||||
|
||||
|
||||
#model = AutoModel.from_pretrained(checkpoint, trust_remote_code=True).to(device)
|
||||
#%%
|
||||
|
||||
# define the user model
|
||||
class User:
|
||||
|
||||
|
||||
# %%
|
||||
code = """import os
|
||||
import requests
|
||||
|
||||
#send the json data to pastebin
|
||||
def send_data"""
|
||||
inputs = tokenizer.encode(code, return_tensors="pt").to(device)
|
||||
outputs = model.generate(inputs, max_length=200)
|
||||
response = tokenizer.decode(outputs[0])
|
||||
|
||||
print(response)
|
||||
|
||||
import requests
|
||||
|
||||
#send the json data to pastebin
|
||||
def send_data(data):
|
||||
url = "http://pastebin.com/api_post.php"
|
||||
data = {"api_dev_key": "<api_key>", "api_user_key": "<user_key>", "api_content": data}
|
||||
response = requests.post(url, data=data).text
|
||||
return response
|
||||
|
||||
|
||||
|
||||
# %%
|
||||
code
|
||||
# %%
|
94
turbopilot.code-workspace
Normal file
94
turbopilot.code-workspace
Normal file
@ -0,0 +1,94 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"path": "."
|
||||
},
|
||||
{
|
||||
"path": "extern/ggml"
|
||||
},
|
||||
{
|
||||
"path": "../../pymicrocosm"
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"files.associations": {
|
||||
"array": "cpp",
|
||||
"atomic": "cpp",
|
||||
"bit": "cpp",
|
||||
"*.tcc": "cpp",
|
||||
"bitset": "cpp",
|
||||
"cctype": "cpp",
|
||||
"chrono": "cpp",
|
||||
"clocale": "cpp",
|
||||
"cmath": "cpp",
|
||||
"compare": "cpp",
|
||||
"concepts": "cpp",
|
||||
"cstdint": "cpp",
|
||||
"cstdio": "cpp",
|
||||
"cstdlib": "cpp",
|
||||
"cstring": "cpp",
|
||||
"ctime": "cpp",
|
||||
"cwchar": "cpp",
|
||||
"cwctype": "cpp",
|
||||
"deque": "cpp",
|
||||
"map": "cpp",
|
||||
"unordered_map": "cpp",
|
||||
"vector": "cpp",
|
||||
"exception": "cpp",
|
||||
"fstream": "cpp",
|
||||
"functional": "cpp",
|
||||
"initializer_list": "cpp",
|
||||
"iosfwd": "cpp",
|
||||
"istream": "cpp",
|
||||
"limits": "cpp",
|
||||
"memory": "cpp",
|
||||
"new": "cpp",
|
||||
"numbers": "cpp",
|
||||
"numeric": "cpp",
|
||||
"ostream": "cpp",
|
||||
"ratio": "cpp",
|
||||
"regex": "cpp",
|
||||
"semaphore": "cpp",
|
||||
"sstream": "cpp",
|
||||
"stdexcept": "cpp",
|
||||
"stop_token": "cpp",
|
||||
"streambuf": "cpp",
|
||||
"string": "cpp",
|
||||
"string_view": "cpp",
|
||||
"system_error": "cpp",
|
||||
"thread": "cpp",
|
||||
"type_traits": "cpp",
|
||||
"tuple": "cpp",
|
||||
"typeinfo": "cpp",
|
||||
"utility": "cpp",
|
||||
"csignal": "cpp",
|
||||
"cstdarg": "cpp",
|
||||
"cstddef": "cpp",
|
||||
"any": "cpp",
|
||||
"strstream": "cpp",
|
||||
"charconv": "cpp",
|
||||
"cinttypes": "cpp",
|
||||
"codecvt": "cpp",
|
||||
"complex": "cpp",
|
||||
"condition_variable": "cpp",
|
||||
"coroutine": "cpp",
|
||||
"list": "cpp",
|
||||
"set": "cpp",
|
||||
"algorithm": "cpp",
|
||||
"iterator": "cpp",
|
||||
"memory_resource": "cpp",
|
||||
"optional": "cpp",
|
||||
"random": "cpp",
|
||||
"source_location": "cpp",
|
||||
"future": "cpp",
|
||||
"iomanip": "cpp",
|
||||
"iostream": "cpp",
|
||||
"mutex": "cpp",
|
||||
"span": "cpp",
|
||||
"cfenv": "cpp",
|
||||
"typeindex": "cpp",
|
||||
"variant": "cpp",
|
||||
"unordered_set": "cpp"
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user