Skip to content

Commit

Permalink
fix f16 mmv, 49 -> 41 failures
Browse files Browse the repository at this point in the history
  • Loading branch information
cebtenzzre committed Jan 24, 2024
1 parent 1a14099 commit 2b0f642
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 32 deletions.
48 changes: 30 additions & 18 deletions ggml-kompute.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -876,39 +876,50 @@ static void ggml_vk_diag_mask_inf(kp::Sequence& seq,
seq.record<kp::OpAlgoDispatch>(s_algo);
}

static void ggml_vk_mul_mat_f16(kp::Sequence& seq,
const std::shared_ptr<kp::Tensor>& inA,
const std::shared_ptr<kp::Tensor>& inB,
const std::shared_ptr<kp::Tensor>& out,
uint32_t inAOff, uint32_t inBOff, uint32_t outOff,
int32_t ne00, int32_t ne01, int32_t ne02,
uint32_t nb01, uint32_t nb02,
int32_t ne11, int32_t ne12,
uint32_t nb11, uint32_t nb12,
int32_t ne0, int32_t ne1) {
static void ggml_vk_mul_mat_f16(
kp::Sequence& seq,
const std::shared_ptr<kp::Tensor>& inA,
const std::shared_ptr<kp::Tensor>& inB,
const std::shared_ptr<kp::Tensor>& out,
uint32_t inAOff, uint32_t inBOff, uint32_t outOff,
int32_t ne00, int32_t ne01, int32_t ne02,
uint32_t nb00, uint32_t nb01, uint32_t nb02,
int32_t ne10, int32_t ne11, int32_t ne12, int32_t ne13,
uint32_t nb10, uint32_t nb11, uint32_t nb12,
int32_t ne0, int32_t ne1,
uint32_t r2, uint32_t r3
) {
const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_f16_comp_spv,
kp::shader_data::op_mul_mat_f16_comp_spv_len);

struct PushConstants {
uint32_t inAOff, inBOff, outOff;
int32_t ne00;
uint32_t nb01, nb02;
uint32_t nb11, nb12;
int32_t ne02, ne12;
int32_t ne00, ne01, ne02;
uint32_t nb00, nb01, nb02;
int32_t ne10, ne11, ne12;
uint32_t nb10, nb11, nb12;
int32_t ne0, ne1;
uint32_t r2, r3;
} pushConsts {
safe_divide(inAOff, 2), safe_divide(inBOff, 4), safe_divide(outOff, 4),
ne00, nb01, nb02, nb11, nb12, ne02, ne12, ne0, ne1,
ne00, ne01, ne02,
nb00, nb01, nb02,
ne10, ne11, ne12,
nb10, nb11, nb12,
ne0, ne1,
r2, r3
};

const unsigned ny = unsigned((ne11 + 4 - 1)/4);

std::shared_ptr<kp::Algorithm> s_algo = nullptr;
if (!komputeManager()->hasAlgorithm(__func__)) {
const uint32_t local_x = ggml_vk_current_device().subgroupSize * 2;
s_algo = komputeManager()->algorithm<uint32_t, PushConstants>(__func__, s_kompute_context->pool.get(), {inA, inB, out}, spirv, {unsigned(ne01), unsigned(ne11), unsigned(std::max(ne12, ne02))}, {local_x}, {pushConsts});
s_algo = komputeManager()->algorithm<uint32_t, PushConstants>(__func__, s_kompute_context->pool.get(), {inA, inB, out}, spirv, {unsigned(ne01), ny, unsigned(ne12*ne13)}, {local_x}, {pushConsts});
} else {
s_algo = komputeManager()->getAlgorithm(__func__);
s_algo->setTensors({inA, inB, out});
s_algo->setWorkgroup({unsigned(ne01), unsigned(ne11), unsigned(std::max(ne12, ne02))});
s_algo->setWorkgroup({unsigned(ne01), ny, unsigned(ne12*ne13)});
s_algo->setPushConstants<PushConstants>({pushConsts});
s_algo->updateDescriptors(s_kompute_context->pool.get());
}
Expand Down Expand Up @@ -1590,7 +1601,8 @@ void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml_cgraph
case GGML_TYPE_F16:
ggml_vk_mul_mat_f16(
seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst,
ne00, ne01, ne02, nb01, nb02, ne11, ne12, nb11, nb12, ne0, ne1
ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, ne13, nb10, nb11, nb12,
ne0, ne1, r2, r3
);
break;
case GGML_TYPE_Q8_0:
Expand Down
47 changes: 33 additions & 14 deletions kompute-shaders/op_mul_mat_f16.comp
Original file line number Diff line number Diff line change
Expand Up @@ -15,34 +15,53 @@ layout (push_constant) uniform parameter {
uint inBOff;
uint outOff;
int ne00;
int ne01;
int ne02;
uint nb00;
uint nb01;
uint nb02;
int ne10;
int ne11;
int ne12;
uint nb10;
uint nb11;
uint nb12;
uint ne02;
uint ne12;
int ne0;
int ne1;
uint r2;
uint r3;
} pcs;

#define N_F16_F32 4

void main() {
const uint r0 = gl_WorkGroupID.x;
const uint r1 = gl_WorkGroupID.y;
const uint rb = gl_WorkGroupID.y*N_F16_F32;
const uint im = gl_WorkGroupID.z;

uint bc_ab = pcs.ne12 > pcs.ne02 ? im / (pcs.ne12 / pcs.ne02) : im;
uint bc_ba = pcs.ne02 > pcs.ne12 ? im / (pcs.ne02 / pcs.ne12) : im;
const uint i12 = im%pcs.ne12;
const uint i13 = im/pcs.ne12;

const uint x = (r0*pcs.nb01 + bc_ab*pcs.nb02) / 2 + pcs.inAOff; // Based from inA
const uint y = (r1*pcs.nb11 + bc_ba*pcs.nb12) / 4 + pcs.inBOff; // based from inB
const uint offset0 = r0*pcs.nb01 + (i12/pcs.r2)*pcs.nb02 + (i13/pcs.r3)*pcs.nb02*pcs.ne02;

float sumf = 0.0f;
for (uint i = gl_SubgroupInvocationID.x; i < pcs.ne00; i += gl_SubgroupSize) {
sumf += float(inA[x+i]) * float(inB[y+i]);
}
const uint x = offset0 / 2 + pcs.inAOff; // Based from inA

for (uint row = 0; row < N_F16_F32; ++row) {
uint r1 = rb + row;
if (r1 >= pcs.ne11) {
break;
}

const uint y = (r1*pcs.nb11 + im*pcs.nb12) / 4 + pcs.inBOff; // Based from inB

float sumf = 0;
for (uint i = gl_SubgroupInvocationID.x; i < pcs.ne00; i += gl_SubgroupSize) {
sumf += float(inA[x+i]) * float(inB[y+i]);
}

const float all_sum = subgroupAdd(sumf);
if (subgroupElect()) {
out_[im*pcs.ne1*pcs.ne0 + r1*pcs.ne0 + r0 + pcs.outOff] = all_sum;
const float all_sum = subgroupAdd(sumf);
if (subgroupElect()) {
out_[im*pcs.ne1*pcs.ne0 + r1*pcs.ne0 + r0 + pcs.outOff] = all_sum;
}
}
}

0 comments on commit 2b0f642

Please sign in to comment.