Skip to content

Commit

Permalink
chore: Removing redundant test cases
Browse files Browse the repository at this point in the history
Signed-off-by: Anurag Dixit <anurag.dixit@getcruise.com>
  • Loading branch information
Anurag Dixit committed Jul 11, 2023
1 parent 948dc58 commit a47b5fe
Showing 1 changed file with 0 additions and 56 deletions.
56 changes: 0 additions & 56 deletions tests/cpp/test_dynamic_size.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -126,62 +126,6 @@ TEST(Converters, ATenResizeGetItemDynShapeMulCorrectly) {
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt, 2e-6));
}

TEST(Converters, ATenUnflattenDynShapeShapeCorrectly) {
const auto graph = R"IR(
graph(%x.1 : Tensor):
%2 : int = prim::Constant[value=1]()
%3 : int = prim::Constant[value=512]()
%4 : int = prim::Constant[value=1]()
%5 : int = prim::Constant[value=1]()
%6 : int[] = prim::ListConstruct(%3, %4, %5)
%7 : Tensor = aten::unflatten(%x.1, %2, %6)
return (%7))IR";

auto g = std::make_shared<torch::jit::Graph>();

torch::jit::parseIR(graph, g.get());

auto in = at::randint(0, 10, {1, 512}, {at::kCUDA});

auto jit_in = at::clone(in);
auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in});

auto trt_in = at::clone(in);
params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true);

ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6));
}

TEST(Converters, ATenUnflattenDynShapeNegativeDimsShapeCorrectly) {
const auto graph = R"IR(
graph(%x.1 : Tensor):
%2 : int = prim::Constant[value=-2]()
%3 : int = prim::Constant[value=512]()
%4 : int = prim::Constant[value=1]()
%5 : int = prim::Constant[value=1]()
%6 : int[] = prim::ListConstruct(%3, %4, %5)
%7 : Tensor = aten::unflatten(%x.1, %2, %6)
return (%7))IR";

auto g = std::make_shared<torch::jit::Graph>();

torch::jit::parseIR(graph, g.get());

auto in = at::randint(0, 10, {1, 512, 2}, {at::kCUDA});

auto jit_in = at::clone(in);
auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in});

auto trt_in = at::clone(in);
params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true);

ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6));
}

TEST(Converters, ATenUnflattenDynShapeITensorShapeCorrectly) {
const auto graph = R"IR(
graph(%x.1 : Tensor):
Expand Down

0 comments on commit a47b5fe

Please sign in to comment.