Skip to content

Commit

Permalink
Initial commit to resolve merge conflicts
Browse files Browse the repository at this point in the history
  • Loading branch information
jayfurmanek committed Dec 6, 2023
1 parent 30d0517 commit d011573
Show file tree
Hide file tree
Showing 6 changed files with 0 additions and 80 deletions.
4 changes: 0 additions & 4 deletions third_party/xla/xla/service/gpu/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -5035,14 +5035,10 @@ xla_cc_test(
xla_cc_test(
name = "determinism_test",
srcs = ["determinism_test.cc"],
<<<<<<< HEAD
tags = tf_cuda_tests_tags() + ["no_rocm"],
=======
local_defines = if_cuda_is_configured(["GOOGLE_CUDA=1"]) + if_rocm_is_configured([
"TENSORFLOW_USE_ROCM=1",
]),
tags = tf_gpu_tests_tags(),
>>>>>>> db579439eef970657f5ddbf05dc9b798cb748c51
deps = [
":autotuner_util",
"//xla:literal",
Expand Down
6 changes: 0 additions & 6 deletions third_party/xla/xla/service/gpu/buffer_comparator_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,19 +39,13 @@ namespace {
class BufferComparatorTest : public testing::Test {
protected:
BufferComparatorTest()
<<<<<<< HEAD
: platform_(
se::MultiPlatformManager::PlatformWithName(PLATFORM).ValueOrDie()),
stream_exec_(platform_->ExecutorForDevice(0).value()) {}
=======
#if GOOGLE_CUDA
: platform_(se::MultiPlatformManager::PlatformWithName("CUDA").value()),
#elif TENSORFLOW_USE_ROCM
: platform_(se::MultiPlatformManager::PlatformWithName("ROCM").value()),
#endif
stream_exec_(platform_->ExecutorForDevice(0).value()) {
}
>>>>>>> db579439eef970657f5ddbf05dc9b798cb748c51

// Take floats only for convenience. Still uses ElementType internally.
template <typename ElementType>
Expand Down
42 changes: 0 additions & 42 deletions third_party/xla/xla/stream_executor/device_description.h
Original file line number Diff line number Diff line change
Expand Up @@ -159,25 +159,15 @@ class RocmComputeCapability {
return absl::StrJoin(kSupportedGfxVersions, ", ");
}

<<<<<<< HEAD
bool has_nhwc_layout_support() const {
=======
bool gfx9_mi100_or_later() const {
>>>>>>> db579439eef970657f5ddbf05dc9b798cb748c51
static constexpr absl::string_view kList[] = {"gfx908", "gfx90a", "gfx940",
"gfx941", "gfx942"};
return absl::c_count(kList, gfx_version()) != 0;
}

<<<<<<< HEAD
bool has_bf16_dtype_support() const {
static constexpr absl::string_view kList[] = {"gfx908", "gfx90a", "gfx940",
"gfx941", "gfx942"};
=======
bool gfx9_mi200_or_later() const {
static constexpr absl::string_view kList[] = {"gfx90a", "gfx940", "gfx941",
"gfx942"};
>>>>>>> db579439eef970657f5ddbf05dc9b798cb748c51
return absl::c_count(kList, gfx_version()) != 0;
}

Expand All @@ -190,25 +180,6 @@ class RocmComputeCapability {
bool has_bf16_dtype_support() const { return gfx9_mi100_or_later(); }

bool has_fast_fp16_support() const {
<<<<<<< HEAD
static constexpr absl::string_view kList[] = {"gfx906", "gfx908", "gfx90a",
"gfx940", "gfx941", "gfx942",
"gfx1030", "gfx1100"};
return absl::c_count(kList, gfx_version()) != 0;
}

bool has_mfma_instr_support() const {
static constexpr absl::string_view kList[] = {"gfx908", "gfx90a", "gfx940",
"gfx941", "gfx942"};
return absl::c_count(kList, gfx_version()) != 0;
}

bool has_fp16_atomics_support() const {
// TODO(rocm): Check. This should be the same as has_fast_fp16_support().
static constexpr absl::string_view kList[] = {"gfx90a", "gfx940", "gfx941",
"gfx942"};
return absl::c_count(kList, gfx_version()) != 0;
=======
return gfx9_mi100_or_later() || navi21() || navi31();
}

Expand All @@ -217,7 +188,6 @@ class RocmComputeCapability {
bool has_fp16_atomics_support() const {
// TODO(rocm): Check. This should be the same as has_fast_fp16_support().
return gfx9_mi200_or_later();
>>>>>>> db579439eef970657f5ddbf05dc9b798cb748c51
}

bool fence_before_barrier() const {
Expand All @@ -240,25 +210,13 @@ class RocmComputeCapability {
std::string gcn_arch_name_ = "gfx000"; // default to invalid arch.

static constexpr absl::string_view kSupportedGfxVersions[]{
<<<<<<< HEAD
"gfx900", // MI25
"gfx906", // MI50 / MI60
"gfx908", // MI100
"gfx90a", // MI200
"gfx940", // MI300
"gfx941", // MI300
"gfx942", // MI300
"gfx1030", // Navi21
"gfx1100" // Navi31
=======
"gfx900", // MI25
"gfx906", // MI50 / MI60
"gfx908", // MI100
"gfx90a", // MI200
"gfx940", "gfx941", "gfx942",
"gfx1030", // Navi21
"gfx1100" // Navi31
>>>>>>> db579439eef970657f5ddbf05dc9b798cb748c51
};
};

Expand Down
19 changes: 0 additions & 19 deletions third_party/xla/xla/stream_executor/rocm/hip_blas_lt.cc
Original file line number Diff line number Diff line change
Expand Up @@ -478,24 +478,6 @@ tsl::Status BlasLt::MatmulPlan::ExecuteOnStream(
}

// Other data types:
<<<<<<< HEAD
TYPED_MATMUL(float, HIP_R_16BF, HIP_R_16BF, HIP_R_16BF,
HIP_R_16BF)
TYPED_MATMUL(float, HIP_R_16F, HIP_R_16F, HIP_R_16F,
HIP_R_16F)
TYPED_MATMUL(float, HIP_R_16BF, HIP_R_16BF, HIP_R_32F,
HIP_R_32F)
TYPED_MATMUL(float, HIP_R_16F, HIP_R_16F, HIP_R_32F,
HIP_R_32F)
TYPED_MATMUL(float, HIP_R_32F, HIP_R_32F, HIP_R_32F,
HIP_R_32F)
TYPED_MATMUL(double, HIP_R_64F, HIP_R_64F, HIP_R_64F,
HIP_R_64F)
TYPED_MATMUL(complex64, HIP_C_32F, HIP_C_32F, HIP_C_32F,
HIP_C_32F)
TYPED_MATMUL(complex128, HIP_C_64F, HIP_C_64F, HIP_C_64F,
HIP_C_64F)
=======
TYPED_MATMUL(float, HIP_R_16BF, HIP_R_16BF, HIP_R_16BF, HIP_R_16BF)
TYPED_MATMUL(float, HIP_R_16F, HIP_R_16F, HIP_R_16F, HIP_R_16F)
TYPED_MATMUL(float, HIP_R_16BF, HIP_R_16BF, HIP_R_32F, HIP_R_32F)
Expand All @@ -504,7 +486,6 @@ tsl::Status BlasLt::MatmulPlan::ExecuteOnStream(
TYPED_MATMUL(double, HIP_R_64F, HIP_R_64F, HIP_R_64F, HIP_R_64F)
TYPED_MATMUL(complex64, HIP_C_32F, HIP_C_32F, HIP_C_32F, HIP_C_32F)
TYPED_MATMUL(complex128, HIP_C_64F, HIP_C_64F, HIP_C_64F, HIP_C_64F)
>>>>>>> db579439eef970657f5ddbf05dc9b798cb748c51

#undef TYPED_MATMUL

Expand Down
5 changes: 0 additions & 5 deletions third_party/xla/xla/stream_executor/rocm/hip_blas_lt.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,12 +72,7 @@ class BlasLt : public gpu::BlasLt {
hipblasLtMatmulDesc_t get() const { return handle_.get(); }

private:
<<<<<<< HEAD
MatmulDesc(hipblasLtMatmulDesc_t handle,
hipblasComputeType_t compute_type,
=======
MatmulDesc(hipblasLtMatmulDesc_t handle, hipblasComputeType_t compute_type,
>>>>>>> db579439eef970657f5ddbf05dc9b798cb748c51
hipDataType datatype)
: handle_(handle, wrap::hipblasLtMatmulDescDestroy),
compute_type_(compute_type),
Expand Down
4 changes: 0 additions & 4 deletions third_party/xla/xla/tests/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -777,13 +777,9 @@ xla_test(
xla_test(
name = "array_elementwise_ops_test",
srcs = ["array_elementwise_ops_test.cc"],
<<<<<<< HEAD
tags = ["no_rocm"],
=======
local_defines = if_cuda_is_configured(["GOOGLE_CUDA=1"]) + if_rocm_is_configured([
"TENSORFLOW_USE_ROCM=1",
]),
>>>>>>> db579439eef970657f5ddbf05dc9b798cb748c51
shard_count = 25,
deps = [
":client_library_test_base",
Expand Down

0 comments on commit d011573

Please sign in to comment.