-
Notifications
You must be signed in to change notification settings - Fork 139
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Jing's contribution: prototype of mixed precision gemm FP16/BF16xint4…
… GEMM (#1762) * add a prototype of int4 * clean * debug * clean * clean * move packed into dynamic_buffer * fixed coord reset * add fast pki4 to half conversion * fix * fixed reference and host_tensor * fixed tensor init * format * debug i4_to_f16_convert * format * fixed splitk * weight permute * add b tile permute * clean * weight permute with splitki * format * improve weight layout * add and_or_b32 * fixed splitk crush * add permute switch as a template * recover v3r1 * clean * failure with intrawave v2 * fixed * fixed * add ckProfiler * add bfp16 support * add bf16 example * fixed int4 to bhalf_t conversion * format * fixed int4 to bf16 conversion * clean * add instances for mem * clean * fixed host tensor size * fixed * debug * fixed * add pk_i4_t as a struct * fix * Update example/01_gemm/gemm_xdl_bf16_pk_i4_v3.cpp Co-authored-by: Adam Osewski <[email protected]> * Update example/01_gemm/gemm_xdl_bf16_pk_i4_v3.cpp Co-authored-by: Adam Osewski <[email protected]> * Update example/01_gemm/gemm_xdl_bf16_pk_i4_v3.cpp Co-authored-by: Adam Osewski <[email protected]> * revert * Update example/01_gemm/gemm_xdl_bf16_pk_i4_v3.cpp Co-authored-by: Adam Osewski <[email protected]> * Update example/01_gemm/gemm_xdl_fp16_pk_i4_v3.cpp Co-authored-by: Adam Osewski <[email protected]> * Update example/01_gemm/gemm_xdl_fp16_pk_i4_v3.cpp Co-authored-by: Adam Osewski <[email protected]> * Update example/01_gemm/gemm_xdl_fp16_pk_i4_v3.cpp Co-authored-by: Adam Osewski <[email protected]> * Update example/01_gemm/gemm_xdl_fp16_pk_i4_v3.cpp Co-authored-by: Adam Osewski <[email protected]> * fixed comments * revert * clean * revert * revert * fixed * Update CMakeLists.txt * Update script/cmake-ck-dev.sh Co-authored-by: Adam Osewski <[email protected]> * Update include/ck/tensor_operation/gpu/element/unary_element_wise_operation.hpp Co-authored-by: Adam Osewski <[email protected]> * Update CMakeLists.txt Co-authored-by: Adam Osewski <[email protected]> * fixed * fixed * fixed * revert * revert * add comments * format * fixed assert * fixed * Fix I4 define in ckProfiler * Fixed example_gemm_xdl_bf16_pk_i4_v3 test failed issue --------- Co-authored-by: Jing Zhang <[email protected]> Co-authored-by: zjing14 <[email protected]> Co-authored-by: mtgu0705 <[email protected]>
- Loading branch information
1 parent
159fa31
commit 1d8e4ec
Showing
37 changed files
with
1,583 additions
and
350 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,253 @@ | ||
// SPDX-License-Identifier: MIT | ||
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved. | ||
|
||
#include "common.hpp" | ||
|
||
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle_v3.hpp" | ||
|
||
using ADataType = ck::bhalf_t; | ||
using BDataType = ck::pk_i4_t; | ||
using AccDataType = float; | ||
using CShuffleDataType = ck::bhalf_t; | ||
using CDataType = ck::bhalf_t; | ||
|
||
using ALayout = Row; | ||
using BLayout = Col; | ||
using CLayout = Row; | ||
|
||
using AElementOp = PassThrough; | ||
using BElementOp = PassThrough; | ||
using CElementOp = PassThrough; | ||
|
||
static constexpr auto GemmDefault = ck::tensor_operation::device::GemmSpecialization::Default; | ||
static constexpr bool PermuteA = false; | ||
static constexpr bool PermuteB = true; | ||
static constexpr ck::index_t KPerBlock = 128; | ||
|
||
// clang-format off | ||
using DeviceGemmV2Instance = | ||
ck::tensor_operation::device::DeviceGemm_Xdl_CShuffleV3< | ||
ALayout, BLayout, CLayout, | ||
ADataType, BDataType, CDataType, AccDataType, CShuffleDataType, | ||
AElementOp, BElementOp, CElementOp, GemmDefault, | ||
128, | ||
16, 64, | ||
KPerBlock, 8, 32, | ||
16, 16, | ||
1, 2, | ||
S<16, 8, 1>, S<1, 0, 2>, S<1, 0, 2>, | ||
2, 8, 8, 0, | ||
S<4, 32, 1>, S<1, 0, 2>, S<1, 0, 2>, | ||
2, 32, 32, 0, | ||
1, 1, S<1, 16, 1, 8>, 4, | ||
ck::BlockGemmPipelineScheduler::Interwave, ck::BlockGemmPipelineVersion::v2, ADataType, ADataType, PermuteA, PermuteB>; | ||
|
||
// clang-format on | ||
|
||
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType, | ||
BDataType, | ||
CDataType, | ||
AccDataType, | ||
PassThrough, | ||
PassThrough, | ||
PassThrough>; | ||
template <typename ProblemType> | ||
bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config) | ||
{ | ||
using namespace ck::literals; | ||
|
||
auto M = problem_size.M; | ||
auto N = problem_size.N; | ||
auto K = problem_size.K; | ||
auto StrideA = problem_size.StrideA; | ||
auto StrideB = problem_size.StrideB; | ||
auto StrideC = problem_size.StrideC; | ||
auto KBatch = problem_size.KBatch; | ||
|
||
auto f_host_tensor_descriptor = | ||
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) { | ||
if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>) | ||
{ | ||
return HostTensorDescriptor({row, col}, {stride, 1_uz}); | ||
} | ||
else | ||
{ | ||
return HostTensorDescriptor({row, col}, {1_uz, stride}); | ||
} | ||
}; | ||
|
||
auto f_get_default_stride = | ||
[](std::size_t row, std::size_t col, ck::index_t stride, auto layout) { | ||
if(stride == -1) | ||
{ | ||
// give a chance if stride is -1, return a default packed stride | ||
if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>) | ||
{ | ||
return static_cast<std::size_t>(col); | ||
} | ||
else | ||
{ | ||
return static_cast<std::size_t>(row); | ||
} | ||
} | ||
else | ||
return static_cast<std::size_t>(stride); | ||
}; | ||
|
||
StrideA = f_get_default_stride(M, K, StrideA, ALayout{}); | ||
StrideB = f_get_default_stride(K, N, StrideB, BLayout{}); | ||
StrideC = f_get_default_stride(M, N, StrideC, CLayout{}); | ||
|
||
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{})); | ||
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{})); | ||
Tensor<BDataType> b_k_n_permute(f_host_tensor_descriptor(K, N, StrideB, BLayout{})); | ||
|
||
switch(config.init_method) | ||
{ | ||
case 0: | ||
a_m_k.GenerateTensorValue(GeneratorTensor_1<ADataType>{1}); | ||
b_k_n.GenerateTensorValue(GeneratorTensor_1<BDataType>{1}); | ||
break; | ||
case 1: | ||
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 2}); | ||
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-2, 2}); | ||
break; | ||
case 2: | ||
a_m_k.GenerateTensorValue(GeneratorTensor_1<ADataType>{1}); | ||
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-2, 2}); | ||
break; | ||
case 3: | ||
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 2}); | ||
b_k_n.GenerateTensorValue(GeneratorTensor_1<BDataType>{1}); | ||
break; | ||
default: | ||
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0, 1.0}); | ||
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-2, 2}); | ||
} | ||
|
||
Tensor<CDataType> c_m_n_host_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{})); | ||
Tensor<CDataType> c_m_n_device_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{})); | ||
|
||
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl; | ||
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl; | ||
std::cout << "c_m_n: " << c_m_n_host_result.mDesc << std::endl; | ||
|
||
DeviceMem a_m_k_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize()); | ||
DeviceMem b_k_n_device_buf(sizeof(BDataType) * b_k_n_permute.mDesc.GetElementSpaceSize()); | ||
DeviceMem c_m_n_device_buf(sizeof(CDataType) * c_m_n_device_result.mDesc.GetElementSpaceSize()); | ||
|
||
// weight permute | ||
if constexpr(PermuteB) | ||
{ | ||
int K1 = KPerBlock; | ||
int K0 = K / KPerBlock; | ||
|
||
// int K0, N, K1 | ||
for(int j = 0; j < K0; j++) | ||
{ | ||
for(int i = 0; i < N; i++) | ||
{ | ||
for(int jj = 0; jj < K1; jj++) | ||
{ | ||
b_k_n_permute(j * N * K1 + i * K1 + jj) = b_k_n(i * K + (j * K1 + jj)); | ||
} | ||
} | ||
} | ||
} | ||
else | ||
{ | ||
for(int i = 0; i < N; i++) | ||
{ | ||
for(int j = 0; j < K; j++) | ||
{ | ||
b_k_n_permute(i * K + j) = b_k_n(i * K + j); | ||
} | ||
} | ||
} | ||
|
||
a_m_k_device_buf.ToDevice(a_m_k.mData.data()); | ||
b_k_n_device_buf.ToDevice(b_k_n_permute.mData.data()); | ||
DeviceMem workspace; | ||
|
||
auto a_element_op = AElementOp{}; | ||
auto b_element_op = BElementOp{}; | ||
auto c_element_op = CElementOp{}; | ||
|
||
// do GEMM | ||
auto gemm = DeviceGemmV2Instance{}; | ||
auto invoker = gemm.MakeInvoker(); | ||
float ave_time = 0; | ||
|
||
auto argument = gemm.MakeArgument(static_cast<ADataType*>(a_m_k_device_buf.GetDeviceBuffer()), | ||
static_cast<BDataType*>(b_k_n_device_buf.GetDeviceBuffer()), | ||
static_cast<CDataType*>(c_m_n_device_buf.GetDeviceBuffer()), | ||
M, | ||
N, | ||
K, | ||
StrideA, | ||
StrideB, | ||
StrideC, | ||
KBatch, | ||
a_element_op, | ||
b_element_op, | ||
c_element_op); | ||
|
||
if(!gemm.IsSupportedArgument(argument)) | ||
{ | ||
std::cerr << gemm.GetTypeString() << " does not support this problem" << std::endl; | ||
|
||
return true; | ||
} | ||
|
||
bool pass = true; | ||
if(config.do_verification) | ||
{ | ||
auto ref_gemm = ReferenceGemmInstance{}; | ||
auto ref_invoker = ref_gemm.MakeInvoker(); | ||
|
||
auto ref_argument = ref_gemm.MakeArgument( | ||
a_m_k, b_k_n, c_m_n_host_result, PassThrough{}, PassThrough{}, PassThrough{}); | ||
|
||
ref_invoker.Run(ref_argument); | ||
|
||
ave_time = invoker.Run(argument, StreamConfig{nullptr, false, 0}); | ||
c_m_n_device_buf.FromDevice(c_m_n_device_result.mData.data()); | ||
|
||
pass &= ck::utils::check_err(c_m_n_device_result, | ||
c_m_n_host_result, | ||
"Error: Incorrect results!", | ||
get_rtol<CDataType>(), | ||
get_atol<CDataType>()); | ||
} | ||
|
||
if(config.time_kernel) | ||
{ | ||
ave_time = | ||
invoker.Run(argument, StreamConfig{nullptr, config.time_kernel, 0, 20, 50, true, 50}); | ||
|
||
std::size_t flop = 2_uz * M * N * K; | ||
std::size_t num_btype = | ||
sizeof(ADataType) * M * K + | ||
sizeof(BDataType) * K * N / | ||
(ck::is_same_v<ck::remove_cvref_t<BDataType>, ck::pk_i4_t> ? 2 : 1) + | ||
sizeof(CDataType) * M * N; | ||
|
||
float tflops = static_cast<float>(flop) / 1.E9 / ave_time; | ||
|
||
float gb_per_sec = num_btype / 1.E6 / ave_time; | ||
|
||
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec | ||
<< " GB/s, " << gemm.GetTypeString() << std::endl; | ||
} | ||
return pass; | ||
} | ||
|
||
bool run_gemm_splitk_example(int argc, char* argv[]) | ||
{ | ||
ProblemSizeSplitK problem_size; | ||
ExecutionConfig config; | ||
|
||
return parse_cmd_args(argc, argv, problem_size, config) && run_gemm(problem_size, config); | ||
} | ||
|
||
int main(int argc, char* argv[]) { return !run_gemm_splitk_example(argc, argv); } |
Oops, something went wrong.