File size: 2,299 Bytes
57e3690
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
#include "common.cuh"
#include "argmax.cuh"
#include "sum.cuh"

#include <cstdint>

static __global__ void argmax_f32(
    const float * x, int32_t * dst, const int64_t ncols, const int64_t nrows) {

    int argmax_thread = 0;
    const int64_t row0 = (int64_t)blockIdx.x*WARP_SIZE;

#pragma unroll
    for (int64_t row1 = 0; row1 < WARP_SIZE; ++row1) {
        const int64_t row = row0 + row1;

        if (row >= nrows) {
            break;
        }

        float maxval = -FLT_MAX;
        int   argmax = -1;

        for (int32_t col = threadIdx.x; col < ncols; col += WARP_SIZE) {
            const float val        = x[row*ncols + col];
            const int   bigger     = val > maxval;
            const int   not_bigger = bigger ^ 0x00000001;

            maxval = maxval*not_bigger + val*bigger;
            argmax = argmax*not_bigger + col*bigger;
        }

#pragma unroll
        for (int mask = 16; mask > 0; mask >>= 1) {
            const float val        = __shfl_xor_sync(0xFFFFFFFF, maxval, mask, WARP_SIZE);
            const int   col        = __shfl_xor_sync(0xFFFFFFFF, argmax, mask, WARP_SIZE);
            const int   bigger     = val > maxval;
            const int   not_bigger = bigger ^ 0x00000001;

            maxval = maxval*not_bigger + val*bigger;
            argmax = argmax*not_bigger + col*bigger;
        }

        const int store = row1 == threadIdx.x;
        argmax_thread += store*argmax;
    }

    const int row = row0 + threadIdx.x;

    if (row >= nrows) {
        return;
    }

    dst[row] = argmax_thread;
}

void ggml_cuda_argmax(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
    const ggml_tensor * src0 = dst->src[0];

    GGML_ASSERT(src0->type == GGML_TYPE_F32);
    GGML_ASSERT( dst->type == GGML_TYPE_I32);

    GGML_ASSERT(ggml_is_contiguous(src0));

    const int64_t ne00  = src0->ne[0];
    const int64_t nrows = ggml_nrows(src0);

    const float * src0_d = (const float *) src0->data;
    int32_t     * dst_d  = (int32_t     *) dst->data;

    cudaStream_t stream = ctx.stream();

    const int64_t num_blocks = (nrows + WARP_SIZE - 1) / WARP_SIZE;

    const dim3 blocks_dim(WARP_SIZE, 1, 1);
    const dim3 blocks_num(num_blocks, 1, 1);

    argmax_f32<<<blocks_num, blocks_dim, 0, stream>>>(src0_d, dst_d, ne00, nrows);
}