Skip to content

Commit

Permalink
Make GetDebugOptionsForTest const.
Browse files Browse the repository at this point in the history
GetDebugOptionsForTest does not need mutating access to `this`.

PiperOrigin-RevId: 688974471
  • Loading branch information
nvgrw authored and Google-ML-Automation committed Oct 23, 2024
1 parent 1bd1fdd commit dc3c3bd
Show file tree
Hide file tree
Showing 62 changed files with 100 additions and 98 deletions.
2 changes: 1 addition & 1 deletion xla/hlo/testlib/hlo_hardware_independent_test_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ void HloHardwareIndependentTestBase::SetAotFastMathDebugOptions(
options->set_xla_cpu_fast_math_honor_division(false);
}

DebugOptions HloHardwareIndependentTestBase::GetDebugOptionsForTest() {
DebugOptions HloHardwareIndependentTestBase::GetDebugOptionsForTest() const {
auto debug_options = GetDebugOptionsFromFlags();
// TODO(b/38354253): Change tests to use Parameters instead of Constants.
debug_options.add_xla_disable_hlo_passes("constant_folding");
Expand Down
2 changes: 1 addition & 1 deletion xla/hlo/testlib/hlo_hardware_independent_test_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ class HloHardwareIndependentTestBase : public ::testing::Test {
//
// This function is virtual so tests can specify an alternative set of debug
// options (e.g. disabling additional passes).
virtual DebugOptions GetDebugOptionsForTest();
virtual DebugOptions GetDebugOptionsForTest() const;

// Gets an HloModuleConfig with options appropriate for tests.
HloModuleConfig GetModuleConfigForTest(int64_t replica_count = 1,
Expand Down
2 changes: 1 addition & 1 deletion xla/service/cpu/tests/cpu_fusion_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ class CpuFusionTest : public HloTestBase {
ErrorSpec error_spec_{0.0001, 1e-5};

private:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.add_xla_disable_hlo_passes("layout-assignment");
return debug_options;
Expand Down
2 changes: 1 addition & 1 deletion xla/service/cpu/tests/cpu_intrinsic_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ class CpuUnaryIntrinsicTest
}

private:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
HloTestBase::SetAotFastMathDebugOptions(&debug_options);
return debug_options;
Expand Down
4 changes: 2 additions & 2 deletions xla/service/cpu/tests/cpu_vectorization_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ class CpuVectorizationTest
}

private:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
HloTestBase::SetAotFastMathDebugOptions(&debug_options);
return debug_options;
Expand Down Expand Up @@ -209,7 +209,7 @@ class JitVectorizationTest
}

private:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
JitVectorizationTestSpec spec = GetParam();
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_cpu_max_isa(spec.max_isa);
Expand Down
2 changes: 1 addition & 1 deletion xla/service/cpu/tests/onednn_convolution_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ namespace cpu {

class ConvolutionTest : public HloTestBase {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_cpu_use_thunk_runtime(false);
return debug_options;
Expand Down
2 changes: 1 addition & 1 deletion xla/service/cpu/tests/onednn_layer_norm_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ namespace {

class LayerNormTest : public HloTestBase {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_cpu_use_thunk_runtime(false);
return debug_options;
Expand Down
2 changes: 1 addition & 1 deletion xla/service/cpu/tests/onednn_matmul_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ namespace cpu {

class MatmulTest : public HloTestBase {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_cpu_use_thunk_runtime(false);
return debug_options;
Expand Down
2 changes: 1 addition & 1 deletion xla/service/cpu/tests/onednn_softmax_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ class OneDnnSoftmaxTest
: public HloTestBase,
public ::testing::WithParamInterface<std::tuple<PrimitiveType, int>> {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_cpu_use_thunk_runtime(false);
return debug_options;
Expand Down
2 changes: 1 addition & 1 deletion xla/service/elemental_ir_emitter_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ class ElementalIrEmitterExecutionTest : public HloTestBase {
class ElementalIrEmitterExecutionTestWithoutFastMinMax
: public ElementalIrEmitterExecutionTest {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options =
ElementalIrEmitterExecutionTest::GetDebugOptionsForTest();
debug_options.set_xla_cpu_enable_fast_min_max(false);
Expand Down
2 changes: 1 addition & 1 deletion xla/service/gpu/autotuning/gemm_algorithm_picker_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class GemmAlgorithmPickerTest : public HloTestBase,
public:
GemmAlgorithmPickerTest() { AutotunerUtil::ClearAutotuneResults(); }

DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_cublaslt(GetParam());
debug_options.set_xla_gpu_enable_triton_gemm(false);
Expand Down
10 changes: 5 additions & 5 deletions xla/service/gpu/autotuning/gemm_fusion_autotuner_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ TEST_F(StatelessAutotunerTest,

class GemmFusionAutotunerTest : public StatelessAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options =
StatelessAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_triton_gemm(true);
Expand Down Expand Up @@ -349,7 +349,7 @@ class GemmFusionAutotunerTest : public StatelessAutotunerTest {
class GemmFusionAutotunerTestWithMorePreciseReduction
: public GemmFusionAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options =
GemmFusionAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_gemm_disable_reduced_precision_reduction(
Expand Down Expand Up @@ -850,7 +850,7 @@ ENTRY e {
class GemmFusionAutotunerLevelTest : public StatelessAutotunerTest,
public ::testing::WithParamInterface<int> {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options =
StatelessAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_autotune_level(GetParam());
Expand Down Expand Up @@ -937,7 +937,7 @@ INSTANTIATE_TEST_SUITE_P(GemmFusionAutotunerLevelSweep,

class GemmFusionAutotunerExhaustiveTest : public GemmFusionAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options =
GemmFusionAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_exhaustive_tiling_search(true);
Expand Down Expand Up @@ -995,7 +995,7 @@ ENTRY e {

class GemmFusionAutotunerDisableSplitK : public GemmFusionAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options =
GemmFusionAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_split_k_autotuning(false);
Expand Down
4 changes: 3 additions & 1 deletion xla/service/gpu/determinism_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,9 @@ class DeterminismTest : public GpuCodegenTest {
}
}

DebugOptions GetDebugOptionsForTest() override { return debug_options_; }
DebugOptions GetDebugOptionsForTest() const override {
return debug_options_;
}

DebugOptions debug_options_;

Expand Down
2 changes: 1 addition & 1 deletion xla/service/gpu/dot_algorithm_support_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ class DotAlgorithmSupportTest
return GetDeviceDescription().gpu_compute_capability();
}

DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
// Setting this explicitly to make sure that we also test the case when the
// dot's dimensions are under the rewrite size threshold:
Expand Down
4 changes: 2 additions & 2 deletions xla/service/gpu/float_support_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class FloatSupportTest : public HloTestBase {

class FloatSupportTestWithCublas : public FloatSupportTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = FloatSupportTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_triton_gemm(false);
return debug_options;
Expand All @@ -48,7 +48,7 @@ class FloatSupportTestWithCublas : public FloatSupportTest {

class FloatSupportTestWithTriton : public FloatSupportTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = FloatSupportTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_triton_gemm(true);
debug_options.set_xla_gpu_triton_gemm_any(true);
Expand Down
12 changes: 6 additions & 6 deletions xla/service/gpu/fusions/cudnn_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ namespace {

class CuDnnFusionTest : public GpuCodegenTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest();
// Let this group of tests just use first available plan skipping
// autotuning.
Expand Down Expand Up @@ -97,7 +97,7 @@ class CuDnnFusionFileCheckTest : public CuDnnFusionTest {
}
}

DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions options = CuDnnFusionTest::GetDebugOptionsForTest();
options.set_xla_dump_to(output_directory_);
return options;
Expand Down Expand Up @@ -556,7 +556,7 @@ ENTRY e {

class CuDnnFusionCommandBufferTest : public CuDnnFusionTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = CuDnnFusionTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_graph_min_graph_size(1);
return debug_options;
Expand Down Expand Up @@ -611,7 +611,7 @@ ENTRY e {

class CuDnnFusionLevel2Test : public CuDnnFusionExecutionTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options =
CuDnnFusionExecutionTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_cudnn_gemm_fusion_level(2);
Expand Down Expand Up @@ -836,7 +836,7 @@ ENTRY e {

class CuDnnFusionLevel3Test : public CuDnnFusionExecutionTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options =
CuDnnFusionExecutionTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_cudnn_gemm_fusion_level(3);
Expand Down Expand Up @@ -1093,7 +1093,7 @@ INSTANTIATE_TEST_SUITE_P(SelectTestSuite, SelectTest,

class CuDnnFusionRewriteTest : public CuDnnFusionTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = CuDnnFusionTest::GetDebugOptionsForTest();
// Reset autotuning level to default.
debug_options.set_xla_gpu_autotune_level(
Expand Down
2 changes: 1 addition & 1 deletion xla/service/gpu/fusions/legacy/concatenate_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ namespace {

class ConcatenateTest : public HloTestBase {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
auto opts = HloTestBase::GetDebugOptionsForTest();
opts.set_xla_gpu_mlir_emitter_level(0);
return opts;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ namespace {

class InPlaceDynamicUpdateSliceFusionTest : public HloTestBase {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
auto opts = HloTestBase::GetDebugOptionsForTest();
opts.set_xla_gpu_mlir_emitter_level(0);
return opts;
Expand Down
2 changes: 1 addition & 1 deletion xla/service/gpu/fusions/legacy/input_slices_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ namespace {

class InputSlicesTest : public HloTestBase {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
auto opts = HloTestBase::GetDebugOptionsForTest();
opts.set_xla_gpu_mlir_emitter_level(0);
return opts;
Expand Down
2 changes: 1 addition & 1 deletion xla/service/gpu/fusions/legacy/scatter_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ namespace gpu {
namespace {

class ScatterFusionTest : public HloTestBase {
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
auto opts = HloTestBase::GetDebugOptionsForTest();
opts.set_xla_gpu_mlir_emitter_level(0);
return opts;
Expand Down
2 changes: 1 addition & 1 deletion xla/service/gpu/fusions/legacy/transpose_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ namespace {

class TransposeTest : public HloTestBase {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
auto opts = HloTestBase::GetDebugOptionsForTest();
opts.set_xla_gpu_mlir_emitter_level(0);
return opts;
Expand Down
14 changes: 7 additions & 7 deletions xla/service/gpu/fusions/triton/dot_algorithms_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ namespace {

class AlgorithmTest : public GpuCodegenTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest();
debug_options.set_xla_dump_to("sponge");
debug_options.set_xla_dump_hlo_pass_re(".*");
Expand Down Expand Up @@ -93,7 +93,7 @@ class AlgorithmTest : public GpuCodegenTest {
// algorithm.
class Triton6xBF16GemmTest : public AlgorithmTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = AlgorithmTest::GetDebugOptionsForTest();
// These 2 flags are not strictly necessary now, but we're adding them to be
// on the safe side against future flakiness.
Expand Down Expand Up @@ -121,7 +121,7 @@ class Triton6xBF16GemmTest : public AlgorithmTest {
// flag after we will support the algorithm values through the entire stack.
class Triton6xBF16GemmTestWithFlag : public AlgorithmTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = AlgorithmTest::GetDebugOptionsForTest();
// Do not fall back to cuBLAS, we are testing Triton.
debug_options.set_xla_gpu_cublas_fallback(false);
Expand All @@ -136,7 +136,7 @@ class Triton6xBF16GemmTestWithFlag : public AlgorithmTest {

class BlasAlgorithmTest : public AlgorithmTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = AlgorithmTest::GetDebugOptionsForTest();
// Do not autotune split-k by default, since this prevents deterministically
// matching the optimized HLO.
Expand All @@ -148,7 +148,7 @@ class BlasAlgorithmTest : public AlgorithmTest {

class TritonAlgorithmTest : public AlgorithmTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = AlgorithmTest::GetDebugOptionsForTest();
// Do not fall back to cuBLAS, we are testing Triton.
debug_options.set_xla_gpu_cublas_fallback(false);
Expand Down Expand Up @@ -711,7 +711,7 @@ CHECK-NOT: mma.sync.aligned.{{.*}}.row.col.f32.tf32.tf32.f32
// algorithm.
class Triton3xBF16GemmTest : public AlgorithmTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = AlgorithmTest::GetDebugOptionsForTest();
// These 2 flags are not strictly necessary now, but we're adding them the
// to be on the safe side against future flakiness.
Expand All @@ -734,7 +734,7 @@ class Triton3xBF16GemmTest : public AlgorithmTest {
// flag after we will support the algorithm values through the entire stack.
class Triton3xBF16GemmTestWithFlag : public AlgorithmTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions GetDebugOptionsForTest() const override {
DebugOptions debug_options = AlgorithmTest::GetDebugOptionsForTest();
// Enable triton fusion for all supported GEMMs.
debug_options.set_xla_gpu_triton_gemm_any(true);
Expand Down
Loading

0 comments on commit dc3c3bd

Please sign in to comment.