From 0d4a8d656e8427182b11405482d00bfd229f058e Mon Sep 17 00:00:00 2001 From: Taekyung Heo <7621438+TaekyungHeo@users.noreply.github.com> Date: Tue, 2 Jul 2024 17:20:46 -0400 Subject: [PATCH] Add unit tests for KinetoOperator methods --- src/trace_link/kineto_operator.py | 36 +++---- tests/trace_link/test_kineto_operator.py | 121 +++++++++++++++++++++++ 2 files changed, 139 insertions(+), 18 deletions(-) diff --git a/src/trace_link/kineto_operator.py b/src/trace_link/kineto_operator.py index 7bbf2d05..b7c0ea1b 100644 --- a/src/trace_link/kineto_operator.py +++ b/src/trace_link/kineto_operator.py @@ -108,6 +108,24 @@ def is_cuda_driver_op(self) -> bool: """ return self.category == "cuda_driver" + def is_ac2g_op(self) -> bool: + """ + Check if the operator is categorized as 'ac2g', which stands for arrows from CPU to GPU. + + Excerpt from https://pytorch.org/docs/stable/torch.compiler_profiling_torch_compile.html + ``` + Every kernel on the GPU occurs after being launched by code running on the CPU. The profiler can draw + connections (i.e. "flows") between the GPU and CPU events to show which CPU event launched a GPU kernel. + This is particularly helpful because, with a few exceptions, GPU kernels are launched asynchronously. + + To view a flow connection, click on a GPU kernel and click "ac2g". + ```` + + Returns + bool: True if the operator is an 'ac2g' type, otherwise False. + """ + return self.category == "ac2g" + def is_kernel_launch_op(self) -> bool: """ Determine whether the operator is a kernel-launching CUDA runtime operator. @@ -137,21 +155,3 @@ def is_gpu_op(self) -> bool: """ gpu_categories = {"kernel", "gpu_memcpy"} return self.category in gpu_categories - - def is_ac2g_op(self) -> bool: - """ - Check if the operator is categorized as 'ac2g', which stands for arrows from CPU to GPU. - - Excerpt from https://pytorch.org/docs/stable/torch.compiler_profiling_torch_compile.html - ``` - Every kernel on the GPU occurs after being launched by code running on the CPU. The profiler can draw - connections (i.e. "flows") between the GPU and CPU events to show which CPU event launched a GPU kernel. - This is particularly helpful because, with a few exceptions, GPU kernels are launched asynchronously. - - To view a flow connection, click on a GPU kernel and click "ac2g". - ```` - - Returns - bool: True if the operator is an 'ac2g' type, otherwise False. - """ - return self.category == "ac2g" diff --git a/tests/trace_link/test_kineto_operator.py b/tests/trace_link/test_kineto_operator.py index 1c5e4ca2..d5984586 100644 --- a/tests/trace_link/test_kineto_operator.py +++ b/tests/trace_link/test_kineto_operator.py @@ -49,6 +49,103 @@ def test_repr_method(sample_operator_data): assert repr(operator) == expected_repr +@pytest.mark.parametrize( + "category, expected", + [ + ("cpu_op", True), + ("user_annotation", True), + ("ProfilerStep", False), + ("cuda_runtime", False), + ("cuda_driver", False), + ], +) +def test_is_cpu_op(category, expected): + """Test the is_cpu_op method with various inputs.""" + operator_data = { + "cat": category, + "name": "someOperation", + "ph": "X", + "dur": 100, + "ts": 1590000000, + "tid": 1234, + "args": {"External id": "123", "Ev Idx": "456", "stream": 7, "Record function id": 12, "correlation": 99}, + } + operator = KinetoOperator(operator_data) + assert operator.is_cpu_op() == expected + + +@pytest.mark.parametrize( + "category, expected", + [ + ("cuda_runtime", True), + ("kernel", False), + ("cuda_driver", False), + ("cpu_op", False), + ], +) +def test_is_cuda_runtime_op(category, expected): + """Test the is_cuda_runtime_op method with various inputs.""" + operator_data = { + "cat": category, + "name": "someOperation", + "ph": "X", + "dur": 100, + "ts": 1590000000, + "tid": 1234, + "args": {"External id": "123", "Ev Idx": "456", "stream": 7, "Record function id": 12, "correlation": 99}, + } + operator = KinetoOperator(operator_data) + assert operator.is_cuda_runtime_op() == expected + + +@pytest.mark.parametrize( + "category, expected", + [ + ("cuda_driver", True), + ("kernel", False), + ("cuda_runtime", False), + ("cpu_op", False), + ], +) +def test_is_cuda_driver_op(category, expected): + """Test the is_cuda_driver_op method with various inputs.""" + operator_data = { + "cat": category, + "name": "someOperation", + "ph": "X", + "dur": 100, + "ts": 1590000000, + "tid": 1234, + "args": {"External id": "123", "Ev Idx": "456", "stream": 7, "Record function id": 12, "correlation": 99}, + } + operator = KinetoOperator(operator_data) + assert operator.is_cuda_driver_op() == expected + + +@pytest.mark.parametrize( + "category, expected", + [ + ("ac2g", True), + ("kernel", False), + ("cuda_runtime", False), + ("cpu_op", False), + ], +) +def test_is_ac2g_op(category, expected): + """Test the is_ac2g_op method with various inputs.""" + operator_data = { + "cat": category, + "name": "someOperation", + "ph": "X", + "dur": 100, + "ts": 1590000000, + "tid": 1234, + "args": {"External id": "123", "Ev Idx": "456", "stream": 7, "Record function id": 12, "correlation": 99}, + } + operator = KinetoOperator(operator_data) + assert operator.is_ac2g_op() == expected + + @pytest.mark.parametrize( "category, name, expected", [ @@ -82,3 +179,27 @@ def test_is_kernel_launch_op(category, name, expected): } operator = KinetoOperator(operator_data) assert operator.is_kernel_launch_op() == expected + + +@pytest.mark.parametrize( + "category, expected", + [ + ("kernel", True), + ("gpu_memcpy", True), + ("cuda_runtime", False), + ("cpu_op", False), + ], +) +def test_is_gpu_op(category, expected): + """Test the is_gpu_op method with various inputs.""" + operator_data = { + "cat": category, + "name": "someOperation", + "ph": "X", + "dur": 100, + "ts": 1590000000, + "tid": 1234, + "args": {"External id": "123", "Ev Idx": "456", "stream": 7, "Record function id": 12, "correlation": 99}, + } + operator = KinetoOperator(operator_data) + assert operator.is_gpu_op() == expected