Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New fill kernel and validation tolerances #251

Merged
merged 4 commits into from
Aug 1, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
96 changes: 96 additions & 0 deletions library/src/data_types.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,102 @@ namespace hiptensor
return;
}
}

std::string computeTypeToString(hiptensorComputeType_t computeType)
{
if(computeType == HIPTENSOR_COMPUTE_16BF)
{
return "HIPTENSOR_COMPUTE_16BF";
}
else if(computeType == HIPTENSOR_COMPUTE_16F)
{
return "HIPTENSOR_COMPUTE_16F";
}
else if(computeType == HIPTENSOR_COMPUTE_32F)
{
return "HIPTENSOR_COMPUTE_32F";
}
else if(computeType == HIPTENSOR_COMPUTE_64F)
{
return "HIPTENSOR_COMPUTE_64F";
}
else if(computeType == HIPTENSOR_COMPUTE_8I)
{
return "HIPTENSOR_COMPUTE_8I";
}
else if(computeType == HIPTENSOR_COMPUTE_8U)
{
return "HIPTENSOR_COMPUTE_8U";
}
else if(computeType == HIPTENSOR_COMPUTE_32I)
{
return "HIPTENSOR_COMPUTE_32I";
}
else if(computeType == HIPTENSOR_COMPUTE_32U)
{
return "HIPTENSOR_COMPUTE_32U";
}
else if(computeType == HIPTENSOR_COMPUTE_C32F)
{
return "HIPTENSOR_COMPUTE_C32F";
}
else if(computeType == HIPTENSOR_COMPUTE_C64F)
{
return "HIPTENSOR_COMPUTE_C64F";
}
else
{
return "HIPTENSOR_COMPUTE_NONE";
}
}

std::string hipTypeToString(hipDataType hipType)
{
if(hipType == HIP_R_16BF)
{
return "HIP_R_16BF";
}
else if(hipType == HIP_R_16F)
{
return "HIP_R_16F";
}
else if(hipType == HIP_R_32F)
{
return "HIP_R_32F";
}
else if(hipType == HIP_R_64F)
{
return "HIP_R_64F";
}
else if(hipType == HIP_R_8I)
{
return "HIP_R_8I";
}
else if(hipType == HIP_R_8U)
{
return "HIP_R_8U";
}
else if(hipType == HIP_R_32I)
{
return "HIP_R_32I";
}
else if(hipType == HIP_R_32U)
{
return "HIP_R_32U";
}
else if(hipType == HIP_C_32F)
{
return "HIP_C_32F";
}
else if(hipType == HIP_C_64F)
{
return "HIP_C_64F";
}
else
{
return "HIP_TYPE_NONE";
}
}
} // namespace hiptensor

bool operator==(hipDataType hipType, hiptensorComputeType_t computeType)
Expand Down
3 changes: 3 additions & 0 deletions library/src/include/data_types.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,9 @@ namespace hiptensor
T readVal(void const* value, hiptensorComputeType_t id);

void writeVal(void const* addr, hiptensorComputeType_t id, ScalarData value);

std::string computeTypeToString(hiptensorComputeType_t computeType);
std::string hipTypeToString(hipDataType hipType);
} // namespace hiptensor

bool operator==(hipDataType hipType, hiptensorComputeType_t computeType);
Expand Down
73 changes: 51 additions & 22 deletions test/01_contraction/contraction_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -257,14 +257,17 @@ namespace hiptensor
auto resource = getResource();
resource->resizeStorage(lengths, elementBytes);

uint32_t seed = static_cast<uint32_t>(256);

if(ADataType == HIP_R_16F && BDataType == HIP_R_16F && DDataType == HIP_R_16F)
{
// Initialize matrix data on device
fillLaunchKernel<_Float16>((_Float16*)resource->deviceA().get(), elementsA);
fillLaunchKernel<_Float16>((_Float16*)resource->deviceB().get(), elementsB);
fillLaunchKernel<_Float16>((_Float16*)resource->deviceA().get(), elementsA, seed - 1);
fillLaunchKernel<_Float16>((_Float16*)resource->deviceB().get(), elementsB, seed);
if(CDataType == HIP_R_16F)
{
fillLaunchKernel<_Float16>((_Float16*)resource->deviceC().get(), elementsCD);
printf("Filling C\n\n");
dlangbe marked this conversation as resolved.
Show resolved Hide resolved
fillLaunchKernel<_Float16>((_Float16*)resource->deviceC().get(), elementsCD, seed + 1);
}
fillValLaunchKernel<_Float16>((_Float16*)resource->deviceD().get(),
elementsCD,
Expand All @@ -273,12 +276,12 @@ namespace hiptensor
else if(ADataType == HIP_R_16BF && BDataType == HIP_R_16BF && DDataType == HIP_R_16BF)
{
// Initialize matrix data on device
fillLaunchKernel<hip_bfloat16>((hip_bfloat16*)resource->deviceA().get(), elementsA);
fillLaunchKernel<hip_bfloat16>((hip_bfloat16*)resource->deviceB().get(), elementsB);
fillLaunchKernel<hip_bfloat16>((hip_bfloat16*)resource->deviceA().get(), elementsA, seed - 1);
fillLaunchKernel<hip_bfloat16>((hip_bfloat16*)resource->deviceB().get(), elementsB, seed);
if(CDataType == HIP_R_16BF)
{
fillLaunchKernel<hip_bfloat16>((hip_bfloat16*)resource->deviceC().get(),
elementsCD);
elementsCD, seed + 1);
}
fillValLaunchKernel<hip_bfloat16>(
(hip_bfloat16*)resource->deviceD().get(),
Expand All @@ -288,11 +291,11 @@ namespace hiptensor
else if(ADataType == HIP_R_32F && BDataType == HIP_R_32F && DDataType == HIP_R_32F)
{
// Initialize matrix data on device
fillLaunchKernel<float>((float*)resource->deviceA().get(), elementsA);
fillLaunchKernel<float>((float*)resource->deviceB().get(), elementsB);
fillLaunchKernel<float>((float*)resource->deviceA().get(), elementsA, seed - 1);
fillLaunchKernel<float>((float*)resource->deviceB().get(), elementsB, seed);
if(CDataType == HIP_R_32F)
{
fillLaunchKernel<float>((float*)resource->deviceC().get(), elementsCD);
fillLaunchKernel<float>((float*)resource->deviceC().get(), elementsCD, seed + 1);
}
fillValLaunchKernel<float>((float*)resource->deviceD().get(),
elementsCD,
Expand All @@ -301,11 +304,11 @@ namespace hiptensor
else if(ADataType == HIP_R_64F && BDataType == HIP_R_64F && DDataType == HIP_R_64F)
{
// Initialize matrix data on device
fillLaunchKernel<double>((double*)resource->deviceA().get(), elementsA);
fillLaunchKernel<double>((double*)resource->deviceB().get(), elementsB);
fillLaunchKernel<double>((double*)resource->deviceA().get(), elementsA, seed - 1);
fillLaunchKernel<double>((double*)resource->deviceB().get(), elementsB, seed);
if(CDataType == HIP_R_64F)
{
fillLaunchKernel<double>((double*)resource->deviceC().get(), elementsCD);
fillLaunchKernel<double>((double*)resource->deviceC().get(), elementsCD, seed + 1);
}
fillValLaunchKernel<double>((double*)resource->deviceD().get(),
elementsCD,
Expand All @@ -315,13 +318,13 @@ namespace hiptensor
{
// Initialize matrix data on device
fillLaunchKernel<hipFloatComplex>((hipFloatComplex*)resource->deviceA().get(),
elementsA);
elementsA, seed - 1);
fillLaunchKernel<hipFloatComplex>((hipFloatComplex*)resource->deviceB().get(),
elementsB);
elementsB, seed);
if(CDataType == HIP_C_32F)
{
fillLaunchKernel<hipFloatComplex>((hipFloatComplex*)resource->deviceC().get(),
elementsCD);
elementsCD, seed + 1);
}
fillValLaunchKernel<hipFloatComplex>(
(hipFloatComplex*)resource->deviceD().get(),
Expand All @@ -332,13 +335,13 @@ namespace hiptensor
{
// Initialize matrix data on device
fillLaunchKernel<hipDoubleComplex>((hipDoubleComplex*)resource->deviceA().get(),
elementsA);
elementsA, seed - 1);
fillLaunchKernel<hipDoubleComplex>((hipDoubleComplex*)resource->deviceB().get(),
elementsB);
elementsB, seed);
if(CDataType == HIP_C_64F)
{
fillLaunchKernel<hipDoubleComplex>((hipDoubleComplex*)resource->deviceC().get(),
elementsCD);
elementsCD, seed + 1);
}
fillValLaunchKernel<hipDoubleComplex>(
(hipDoubleComplex*)resource->deviceD().get(),
Expand Down Expand Up @@ -656,13 +659,36 @@ namespace hiptensor
auto reference = resource->allocDevice(sizeD);
resource->copyData(reference, resource->hostD(), sizeD);

// Compute tolerance based on compute type
auto dimension = a_ms_ks.mLengths.size() / 2;
auto nelems_k = std::accumulate(a_ms_ks.mLengths.begin() + dimension,
a_ms_ks.mLengths.end(),
size_t{1},
std::multiplies<size_t>());

auto eps = getEpsilon(computeType);
CongMa13 marked this conversation as resolved.
Show resolved Hide resolved
double tolerance = 2 * nelems_k * eps;

// use the same default tolerance value as CK
if (computeType == HIPTENSOR_COMPUTE_16BF || DDataType == HIP_R_16BF)
{
const double epsilon = std::pow(2, -7);
tolerance += epsilon * 2;
}
else if (computeType == HIPTENSOR_COMPUTE_16F || DDataType == HIP_R_16F)
{
const double epsilon = std::pow(2, -10);
tolerance += epsilon * 2;
}

if(DDataType == HIP_R_16F)
{
std::tie(mValidationResult, mMaxRelativeError)
= compareEqualLaunchKernel<_Float16>((_Float16*)resource->deviceD().get(),
(_Float16*)reference.get(),
elementsCD,
computeType);
computeType,
tolerance);
}
else if(DDataType == HIP_R_16BF)
{
Expand All @@ -671,23 +697,26 @@ namespace hiptensor
(hip_bfloat16*)resource->deviceD().get(),
(hip_bfloat16*)reference.get(),
elementsCD,
computeType);
computeType,
tolerance);
}
else if(DDataType == HIP_R_32F || DDataType == HIP_C_32F)
{
std::tie(mValidationResult, mMaxRelativeError)
= compareEqualLaunchKernel<float>((float*)resource->deviceD().get(),
(float*)reference.get(),
elementsCD,
computeType);
computeType,
tolerance);
}
else if(DDataType == HIP_R_64F || DDataType == HIP_C_64F)
{
std::tie(mValidationResult, mMaxRelativeError)
= compareEqualLaunchKernel<double>((double*)resource->deviceD().get(),
(double*)reference.get(),
elementsCD,
computeType);
computeType,
tolerance);
}

EXPECT_TRUE(mValidationResult) << "Max relative error: " << mMaxRelativeError;
Expand Down
6 changes: 4 additions & 2 deletions test/02_permutation/permutation_resource.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,13 +96,15 @@ namespace hiptensor

void PermutationResource::fillRandToA()
{
uint32_t seed = static_cast<uint32_t>(256);

if(mCurrentDataType == HIP_R_32F)
{
fillLaunchKernel<float>((float*)deviceA().get(), mCurrentMatrixElement);
fillLaunchKernel<float>((float*)deviceA().get(), mCurrentMatrixElement, seed);
}
else
{
fillLaunchKernel<_Float16>((_Float16*)deviceA().get(), mCurrentMatrixElement);
fillLaunchKernel<_Float16>((_Float16*)deviceA().get(), mCurrentMatrixElement, seed);
}
Base::copyData(hostA(), deviceA(), getCurrentMatrixMemorySize());
}
Expand Down
10 changes: 6 additions & 4 deletions test/03_reduction/reduction_resource.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -127,21 +127,23 @@ namespace hiptensor

void ReductionResource::fillRand(HostPtrT& hostBuf, DevicePtrT& deviceBuf, size_t elementCount)
{
uint32_t seed = static_cast<uint32_t>(256);

if(mCurrentDataType == HIP_R_16F)
{
fillLaunchKernel<float16_t>((float16_t*)deviceBuf.get(), elementCount);
fillLaunchKernel<float16_t>((float16_t*)deviceBuf.get(), elementCount, seed);
}
else if(mCurrentDataType == HIP_R_16BF)
{
fillLaunchKernel<bfloat16_t>((bfloat16_t*)deviceBuf.get(), elementCount);
fillLaunchKernel<bfloat16_t>((bfloat16_t*)deviceBuf.get(), elementCount, seed);
}
else if(mCurrentDataType == HIP_R_32F)
{
fillLaunchKernel<float32_t>((float32_t*)deviceBuf.get(), elementCount);
fillLaunchKernel<float32_t>((float32_t*)deviceBuf.get(), elementCount, seed);
}
else if(mCurrentDataType == HIP_R_64F)
{
fillLaunchKernel<float64_t>((float64_t*)deviceBuf.get(), elementCount);
fillLaunchKernel<float64_t>((float64_t*)deviceBuf.get(), elementCount, seed);
}
Base::copyData(hostBuf, deviceBuf, elementCount);
}
Expand Down
23 changes: 19 additions & 4 deletions test/device/common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,28 +65,43 @@ __global__ static void
}
}

__device__ inline unsigned pcg_hash(unsigned input)
{
unsigned state = input * 747796405u + 2891336453u;
unsigned word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u;
return (word >> 22u) ^ word;
}

// gen random float in range [-range, range)
template <unsigned range = 1>
__device__ inline float gen_random_float(unsigned input)
{
return (static_cast<float>(pcg_hash(input)) / static_cast<float>(UINT_MAX) - 0.5f)
* static_cast<float>(range) * 2;
}

// fill kernel for 'elementSize' elements
template <typename DataType>
__global__ void fillKernel(DataType* data, uint32_t elementSize, uint32_t seed)
{
uint32_t index = (blockIdx.x * blockDim.x + threadIdx.x);
uint32_t seededIndex = static_cast<uint32_t>(uint64_t(index + seed) % UINT_MAX);

if(index < elementSize)
{
// Input values scaled by 10, Doing UnarySquare Operation for tensors(16F) may cause overflow.
if constexpr(std::is_same_v<DataType, hipFloatComplex>)
{
auto value = (float(index / float(RAND_MAX) - 0.5) * 10) / elementSize;
auto value = gen_random_float(seededIndex);
data[index] = make_hipFloatComplex(value, value);
}
else if constexpr(std::is_same_v<DataType, hipDoubleComplex>)
{
auto value = (double(index / double(RAND_MAX) - 0.5) * 10) / elementSize;
auto value = static_cast<double>(gen_random_float(seededIndex));
data[index] = make_hipDoubleComplex(value, value);
}
else
{
auto value = (DataType(index / double(RAND_MAX) - 0.5) * 10) / elementSize;
auto value = gen_random_float(seededIndex);
data[index] = static_cast<DataType>(value);
}
}
Expand Down
Loading
Loading