Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: cuda implementation for ggml_conv_transpose_1d #854

Open
wants to merge 11 commits into
base: master
Choose a base branch
from
4 changes: 4 additions & 0 deletions src/ggml-cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include "ggml-cuda/tsembd.cuh"
#include "ggml-cuda/unary.cuh"
#include "ggml-cuda/upscale.cuh"
#include "ggml-cuda/conv-transpose-1d.cuh"

#include <algorithm>
#include <array>
Expand Down Expand Up @@ -2338,6 +2339,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
case GGML_OP_IM2COL:
ggml_cuda_op_im2col(ctx, dst);
break;
case GGML_OP_CONV_TRANSPOSE_1D:
ggml_cuda_op_conv_transpose_1d(ctx,dst);
break;
case GGML_OP_POOL_2D:
ggml_cuda_op_pool2d(ctx, dst);
break;
Expand Down
91 changes: 91 additions & 0 deletions src/ggml-cuda/conv-transpose-1d.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
#include "conv-transpose-1d.cuh"

static __global__ void conv_transpose_1d_kernel(
const int s0, const int p0, const int d0, const int output_size,
const int src0_ne0, const int src0_ne1, const int src0_ne2, const int src0_ne3,
const int src1_ne0, const int src1_ne1, const int src1_ne2, const int src1_ne3,
const int dst_ne0, const int dst_ne1, const int dst_ne2, const int dst_ne3,
const float * src0, const float * src1, float * dst) {
int global_index = threadIdx.x + blockIdx.x * blockDim.x;
if (global_index >= output_size) {
return;
}

int out_index = global_index / dst_ne0;

int accumulator = 0;

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

int -> float

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah that seems to fix the issue I was experiencing; it's bizarre that the tests still passed even in cuda mode with the types accidentally set to int Thanks so much!


for (int c = 0; c < src0_ne2; c++)
{

int idx = global_index % dst_ne0;

int kernel_offset = (src0_ne0 * src0_ne1 * c) + (out_index * src0_ne0);
int input_offset = src1_ne0 * c;

int initial_weight_idx = idx > src0_ne0 -1 ? src0_ne0-1 : idx;

for (int i = 0; i < src1_ne0; i++)
{
if (!(idx >= i*s0 && idx < i*s0 + src0_ne0))
{
continue;
}
int weight_idx = idx - i*s0;


int kernel_weight = src0[kernel_offset + weight_idx];
int input_value = src1[input_offset+i];

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

int -> float


accumulator += kernel_weight * input_value;
}
}
dst[global_index] = accumulator;
}

static void conv_transpose_1d_f32_f32_cuda(
const int s0, const int p0, const int d0, const int output_size,
const int src0_ne0, const int src0_ne1, const int src0_ne2, const int src0_ne3,
const int src1_ne0, const int src1_ne1, const int src1_ne2, const int src1_ne3,
const int dst_ne0, const int dst_ne1, const int dst_ne2, const int dst_ne3,
const float * src0, const float * src1, float * dst,
cudaStream_t stream) {

const int num_blocks = (output_size + CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE - 1) / CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE;
conv_transpose_1d_kernel<<<num_blocks,CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE, 0, stream>>>(s0,p0,d0,output_size,
src0_ne0, src0_ne1, src0_ne2, src0_ne3,
src1_ne0, src1_ne1, src1_ne2, src1_ne3,
dst_ne0, dst_ne1, dst_ne2, dst_ne3,
src0,src1, dst);
}

void ggml_cuda_op_conv_transpose_1d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
const ggml_tensor * src0 = dst->src[0];
const float * src0_d = (const float *)src0->data;

const ggml_tensor * src1 = dst->src[1];
const float * src1_d = (const float *)src1->data;

float * dst_d = (float *)dst->data;
cudaStream_t stream = ctx.stream();

GGML_ASSERT(src0->type == GGML_TYPE_F32);
GGML_ASSERT( dst->type == GGML_TYPE_F32);
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also assert contiguous src0 and src1


const int32_t * opts = (const int32_t *)dst->op_params;

const int s0 = dst->op_params[0];
const int p0 = 0;//opts[3];
const int d0 = 1;//opts[4];

const int64_t kernel_size = ggml_nelements(src0);
const int64_t input_size = ggml_nelements(src1);
const int64_t output_size = ggml_nelements(dst);


conv_transpose_1d_f32_f32_cuda( s0,p0,d0,output_size,
src0->ne[0],src0->ne[1],src0->ne[2],src0->ne[3],
src1->ne[0],src1->ne[1],src1->ne[2],src1->ne[3],
dst->ne[0],dst->ne[1],dst->ne[2],dst->ne[3],
src0_d, src1_d, dst_d, stream);
}
5 changes: 5 additions & 0 deletions src/ggml-cuda/conv-transpose-1d.cuh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#include "common.cuh"

#define CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE 256

void ggml_cuda_op_conv_transpose_1d(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
8 changes: 8 additions & 0 deletions tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -337,6 +337,14 @@ add_executable(${TEST_TARGET} ${TEST_TARGET}.c)
target_link_libraries(${TEST_TARGET} PRIVATE ggml)
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}>)

# test-conv-transpose-1d

set(TEST_TARGET test-conv-transpose-1d)
add_executable(${TEST_TARGET} ${TEST_TARGET}.cpp)
target_link_libraries(${TEST_TARGET} PRIVATE ggml)
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}>)


#
# test-dup

Expand Down