Skip to content

Commit

Permalink
Add framework-agnostic tests for common components (#1575)
Browse files Browse the repository at this point in the history
Signed-off-by: yiliu30 <yi4.liu@intel.com>
Co-authored-by: chensuyue <suyue.chen@intel.com>
  • Loading branch information
yiliu30 and chensuyue authored Jan 29, 2024
1 parent f9bc76b commit 9a549c3
Show file tree
Hide file tree
Showing 9 changed files with 178 additions and 29 deletions.
1 change: 1 addition & 0 deletions .azure-pipelines/scripts/ut/3x/run_3x_ort.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/3x/coverag
inc_path=$(python -c 'import neural_compressor; print(neural_compressor.__path__[0])')
cd /neural-compressor/test || exit 1
find ./3x/onnxrt/* -name "test*.py" | sed 's,\.\/,coverage run --source='"${inc_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh
find ./3x/common/* -name "test*.py" | sed 's,\.\/,coverage run --source='"${inc_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh

LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
Expand Down
1 change: 1 addition & 0 deletions .azure-pipelines/scripts/ut/3x/run_3x_pt.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/3x/coverag
inc_path=$(python -c 'import neural_compressor; print(neural_compressor.__path__[0])')
cd /neural-compressor/test || exit 1
find ./3x/torch/* -name "test*.py" | sed 's,\.\/,coverage run --source='"${inc_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh
find ./3x/common/* -name "test*.py" | sed 's,\.\/,coverage run --source='"${inc_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh

LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
Expand Down
1 change: 1 addition & 0 deletions .azure-pipelines/scripts/ut/3x/run_3x_tf.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/3x/coverag
inc_path=$(python -c 'import neural_compressor; print(neural_compressor.__path__[0])')
cd /neural-compressor/test || exit 1
find ./3x/tensorflow/* -name "test*.py" | sed 's,\.\/,coverage run --source='"${inc_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh
find ./3x/common/* -name "test*.py" | sed 's,\.\/,coverage run --source='"${inc_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh

LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
Expand Down
1 change: 1 addition & 0 deletions .azure-pipelines/ut-3x-pt.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ pr:
- neural_compressor/common
- neural_compressor/torch
- test/3x/torch
- test/3x/common
- setup.py
- requirements_pt.txt
- .azure-pipelines/scripts/ut/3x/collect_log_3x.sh
Expand Down
18 changes: 15 additions & 3 deletions neural_compressor/common/base_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,13 @@
logger = Logger().get_logger()

__all__ = [
"ConfigRegistry",
"options",
"register_config",
"get_all_config_set_from_config_registry",
"register_supported_configs_for_fwk",
"BaseConfig",
"ConfigRegistry",
"ComposableConfig",
"get_all_config_set_from_config_registry",
"options",
]


Expand Down Expand Up @@ -444,6 +445,17 @@ def get_all_config_set_from_config_registry(fwk_name: str) -> Union[BaseConfig,
return config_set


def register_supported_configs_for_fwk(fwk_name: str):
"""Register supported configs for specific framework.
Args:
fwk_name: the framework name.
"""
all_registered_config_cls: List[BaseConfig] = config_registry.get_all_config_cls_by_fwk_name(fwk_name)
for config_cls in all_registered_config_cls:
config_cls.register_supported_configs()


#######################################################
#### Options
#######################################################
Expand Down
5 changes: 2 additions & 3 deletions neural_compressor/onnxrt/quantization/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
import onnx

from neural_compressor.common import Logger
from neural_compressor.common.base_config import BaseConfig, register_config
from neural_compressor.common.base_config import BaseConfig, register_config, register_supported_configs_for_fwk
from neural_compressor.common.utils import DEFAULT_WHITE_LIST, OP_NAME_OR_MODULE_TYPE, RTN

logger = Logger().get_logger()
Expand Down Expand Up @@ -150,8 +150,7 @@ def get_config_set_for_tuning(cls) -> Union[None, "RTNConfig", List["RTNConfig"]
return RTNConfig(weight_bits=[4, 6])


# TODO(Yi) run `register_supported_configs` for all registered config.
RTNConfig.register_supported_configs()
register_supported_configs_for_fwk(fwk_name=FRAMEWORK_NAME)


def get_default_rtn_config() -> RTNConfig:
Expand Down
10 changes: 7 additions & 3 deletions neural_compressor/tensorflow/quantization/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,12 @@

import tensorflow as tf

from neural_compressor.common.base_config import BaseConfig, config_registry, register_config
from neural_compressor.common.base_config import (
BaseConfig,
config_registry,
register_config,
register_supported_configs_for_fwk,
)
from neural_compressor.common.utils import DEFAULT_WHITE_LIST, OP_NAME_OR_MODULE_TYPE, STATIC_QUANT

FRAMEWORK_NAME = "keras"
Expand Down Expand Up @@ -111,8 +116,7 @@ def get_config_set_for_tuning(
return StaticQuantConfig(weight_sym=[True, False])


# TODO(Yi) run `register_supported_configs` for all registered config.
StaticQuantConfig.register_supported_configs()
register_supported_configs_for_fwk(fwk_name=FRAMEWORK_NAME)


def get_all_registered_configs() -> Dict[str, BaseConfig]:
Expand Down
29 changes: 9 additions & 20 deletions neural_compressor/torch/quantization/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,12 @@

import torch

from neural_compressor.common.base_config import BaseConfig, config_registry, register_config
from neural_compressor.common.base_config import (
BaseConfig,
config_registry,
register_config,
register_supported_configs_for_fwk,
)
from neural_compressor.common.utils import (
DEFAULT_WHITE_LIST,
FP8_QUANT,
Expand Down Expand Up @@ -167,10 +172,6 @@ def get_config_set_for_tuning(cls) -> Union[None, "RTNConfig", List["RTNConfig"]
return RTNConfig(weight_bits=[4, 6])


# TODO(Yi) run `register_supported_configs` for all registered config.
RTNConfig.register_supported_configs()


def get_default_rtn_config() -> RTNConfig:
"""Generate the default rtn config.
Expand Down Expand Up @@ -295,10 +296,6 @@ def get_config_set_for_tuning(cls) -> Union[None, "GPTQConfig", List["GPTQConfig
return GPTQConfig(weight_bits=[4, 6])


# TODO(Yi) run `register_supported_configs` for all registered config.
GPTQConfig.register_supported_configs()


def get_default_gptq_config() -> GPTQConfig:
"""Generate the default gptq config.
Expand Down Expand Up @@ -376,10 +373,6 @@ def get_config_set_for_tuning(cls) -> Union[None, "StaticQuantConfig", List["Sta
return StaticQuantConfig(w_sym=[True, False])


# TODO(Yi) run `register_supported_configs` for all registered config.
StaticQuantConfig.register_supported_configs()


def get_default_static_config() -> StaticQuantConfig:
"""Generate the default static quant config.
Expand Down Expand Up @@ -490,10 +483,6 @@ def get_config_set_for_tuning(cls) -> Union[None, "SmoothQuantConfig", List["Smo
return SmoothQuantConfig(alpha=[0.1, 0.5])


# TODO(Yi) run `register_supported_configs` for all registered config.
SmoothQuantConfig.register_supported_configs()


def get_default_sq_config() -> SmoothQuantConfig:
"""Generate the default smoothquant config.
Expand Down Expand Up @@ -574,9 +563,6 @@ def get_config_set_for_tuning(cls) -> Union[None, "FP8QConfig", List["FP8QConfig
# TODO fwk owner needs to update it.
return FP8QConfig(act_dtype=[torch.float8_e4m3fn])

# TODO(Yi) run `register_supported_configs` for all registered config.
FP8QConfig.register_supported_configs()

def get_default_fp8_qconfig() -> FP8QConfig:
"""Generate the default gptq config.
Expand All @@ -588,6 +574,9 @@ def get_default_fp8_qconfig() -> FP8QConfig:
##################### Algo Configs End ###################################


register_supported_configs_for_fwk(fwk_name=FRAMEWORK_NAME)


def get_all_registered_configs() -> Dict[str, BaseConfig]:
registered_configs = config_registry.get_all_configs()
return registered_configs.get(FRAMEWORK_NAME, {})
141 changes: 141 additions & 0 deletions test/3x/common/test_common.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
"""Tests for common components.
!!! Please do not import any framework-specific modules in this file. !!!
* Note, we may need to add some auto check mechanisms to ensure this.
These tests aim to assess the fundamental functionalities of common components and enhance code coverage.
All tests will be included for each framework CI.
* Note
The folder structure:
.
├── 3x
│ ├── common
│ ├── onnxrt
│ ├── tensorflow
│ └── torch
For each fwk CI:
onnxrt_included_folder:
├── 3x
│ ├── common
│ ├── onnxrt
tensorflow_included_folder:
├── 3x
│ ├── common
│ ├── tensorflow
torch_included_folder:
├── 3x
│ ├── common
│ ├── torch
"""

import unittest

from neural_compressor.common import Logger

logger = Logger().get_logger()

from typing import Any, Callable, List, Optional, Tuple, Union

from neural_compressor.common.base_config import BaseConfig, get_all_config_set_from_config_registry, register_config
from neural_compressor.common.utils import DEFAULT_WHITE_LIST, OP_NAME_OR_MODULE_TYPE

PRIORITY_FAKE_ALGO = 100
FAKE_CONFIG_NAME = "fake"
DEFAULT_WEIGHT_BITS = [4, 6]

FAKE_FRAMEWORK_NAME = "FAKE_FWK"


@register_config(framework_name=FAKE_FRAMEWORK_NAME, algo_name=FAKE_CONFIG_NAME, priority=PRIORITY_FAKE_ALGO)
class FakeAlgoConfig(BaseConfig):
"""Config class for fake algo."""

supported_configs: List = []
params_list = [
"weight_dtype",
"weight_bits",
]
name = FAKE_CONFIG_NAME

def __init__(
self,
weight_dtype: str = "int",
weight_bits: int = 4,
white_list: Optional[List[OP_NAME_OR_MODULE_TYPE]] = DEFAULT_WHITE_LIST,
):
"""Init fake config.
Args:
weight_dtype (str): Data type for weights, default is "int".
weight_bits (int): Number of bits used to represent weights, default is 4.
"""
super().__init__(white_list=white_list)
self.weight_bits = weight_bits
self.weight_dtype = weight_dtype
self._post_init()

def to_dict(self):
return super().to_dict()

@classmethod
def from_dict(cls, config_dict):
return super(FakeAlgoConfig, cls).from_dict(config_dict=config_dict)

@classmethod
def register_supported_configs(cls) -> List:
pass

@staticmethod
def get_model_info(model: Any) -> List[Tuple[str, Callable]]:
pass

@classmethod
def get_config_set_for_tuning(cls) -> Union[None, "FakeAlgoConfig", List["FakeAlgoConfig"]]:
return FakeAlgoConfig(weight_bits=DEFAULT_WEIGHT_BITS)


FakeAlgoConfig.register_supported_configs()


def get_default_fake_config() -> FakeAlgoConfig:
"""Generate the default fake config.
Returns:
the default fake config.
"""
return FakeAlgoConfig()


def get_all_config_set() -> Union[BaseConfig, List[BaseConfig]]:
return get_all_config_set_from_config_registry(fwk_name=FAKE_FRAMEWORK_NAME)


class TestBaseConfig(unittest.TestCase):
@classmethod
def setUpClass(self):
pass

@classmethod
def tearDownClass(self):
pass

def setUp(self):
# print the test name
logger.info(f"Running TestBaseConfig test: {self.id()}")

def test_api(self):
fake_default_config = get_default_fake_config()
self.assertEqual(fake_default_config.weight_dtype, "int")
config_set = get_all_config_set()
self.assertEqual(len(config_set), 1)
self.assertEqual(config_set[0].weight_bits, DEFAULT_WEIGHT_BITS)


if __name__ == "__main__":
unittest.main()

0 comments on commit 9a549c3

Please sign in to comment.