From a47b5fe0ffe0c783b235bfc212da8a3f1387b307 Mon Sep 17 00:00:00 2001 From: Anurag Dixit Date: Tue, 11 Jul 2023 15:29:50 -0700 Subject: [PATCH] chore: Removing redundant test cases Signed-off-by: Anurag Dixit --- tests/cpp/test_dynamic_size.cpp | 56 --------------------------------- 1 file changed, 56 deletions(-) diff --git a/tests/cpp/test_dynamic_size.cpp b/tests/cpp/test_dynamic_size.cpp index e8765d6570..c1edff849d 100644 --- a/tests/cpp/test_dynamic_size.cpp +++ b/tests/cpp/test_dynamic_size.cpp @@ -126,62 +126,6 @@ TEST(Converters, ATenResizeGetItemDynShapeMulCorrectly) { ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt, 2e-6)); } -TEST(Converters, ATenUnflattenDynShapeShapeCorrectly) { - const auto graph = R"IR( - graph(%x.1 : Tensor): - %2 : int = prim::Constant[value=1]() - %3 : int = prim::Constant[value=512]() - %4 : int = prim::Constant[value=1]() - %5 : int = prim::Constant[value=1]() - %6 : int[] = prim::ListConstruct(%3, %4, %5) - %7 : Tensor = aten::unflatten(%x.1, %2, %6) - return (%7))IR"; - - auto g = std::make_shared(); - - torch::jit::parseIR(graph, g.get()); - - auto in = at::randint(0, 10, {1, 512}, {at::kCUDA}); - - auto jit_in = at::clone(in); - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in}); - - auto trt_in = at::clone(in); - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true); - - ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6)); -} - -TEST(Converters, ATenUnflattenDynShapeNegativeDimsShapeCorrectly) { - const auto graph = R"IR( - graph(%x.1 : Tensor): - %2 : int = prim::Constant[value=-2]() - %3 : int = prim::Constant[value=512]() - %4 : int = prim::Constant[value=1]() - %5 : int = prim::Constant[value=1]() - %6 : int[] = prim::ListConstruct(%3, %4, %5) - %7 : Tensor = aten::unflatten(%x.1, %2, %6) - return (%7))IR"; - - auto g = std::make_shared(); - - torch::jit::parseIR(graph, g.get()); - - auto in = at::randint(0, 10, {1, 512, 2}, {at::kCUDA}); - - auto jit_in = at::clone(in); - auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in}); - - auto trt_in = at::clone(in); - params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); - auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true); - - ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6)); -} - TEST(Converters, ATenUnflattenDynShapeITensorShapeCorrectly) { const auto graph = R"IR( graph(%x.1 : Tensor):