From 67b1711c3e1f23d070321b44ca3e1c87cae72da3 Mon Sep 17 00:00:00 2001 From: WeiweiZhang1 Date: Fri, 18 Oct 2024 09:40:52 +0000 Subject: [PATCH] deploy: 795aeb5bb6950a357da5e339ed97277bc73b4c5c --- .../torch/utils/utility/index.rst.txt | 129 ++++++++++++ .../adaptor/mxnet_utils/index.html | 2 +- .../adaptor/mxnet_utils/util/index.html | 2 +- .../adaptor/ox_utils/calibration/index.html | 2 +- .../adaptor/ox_utils/calibrator/index.html | 2 +- .../adaptor/ox_utils/index.html | 2 +- .../ox_utils/operators/activation/index.html | 2 +- .../ox_utils/operators/argmax/index.html | 2 +- .../ox_utils/operators/attention/index.html | 2 +- .../ox_utils/operators/binary_op/index.html | 2 +- .../ox_utils/operators/concat/index.html | 2 +- .../ox_utils/operators/conv/index.html | 2 +- .../ox_utils/operators/direct_q8/index.html | 2 +- .../operators/embed_layernorm/index.html | 2 +- .../ox_utils/operators/gather/index.html | 2 +- .../ox_utils/operators/gavgpool/index.html | 2 +- .../ox_utils/operators/gemm/index.html | 2 +- .../adaptor/ox_utils/operators/index.html | 2 +- .../ox_utils/operators/lstm/index.html | 2 +- .../ox_utils/operators/matmul/index.html | 2 +- .../ox_utils/operators/maxpool/index.html | 2 +- .../ox_utils/operators/norm/index.html | 2 +- .../adaptor/ox_utils/operators/ops/index.html | 2 +- .../adaptor/ox_utils/operators/pad/index.html | 2 +- .../ox_utils/operators/pooling/index.html | 2 +- .../ox_utils/operators/reduce/index.html | 2 +- .../ox_utils/operators/resize/index.html | 2 +- .../ox_utils/operators/split/index.html | 2 +- .../ox_utils/operators/unary_op/index.html | 2 +- .../adaptor/ox_utils/quantizer/index.html | 2 +- .../adaptor/ox_utils/smooth_quant/index.html | 2 +- .../adaptor/ox_utils/util/index.html | 2 +- .../adaptor/ox_utils/weight_only/index.html | 2 +- .../adaptor/tensorflow/index.html | 2 +- .../tf_utils/graph_converter/index.html | 2 +- .../graph_converter_without_calib/index.html | 2 +- .../bf16/bf16_convert/index.html | 2 +- .../bf16/dequantize_cast_optimizer/index.html | 2 +- .../tf_utils/graph_rewriter/bf16/index.html | 2 +- .../generic/convert_add_to_biasadd/index.html | 2 +- .../generic/convert_layout/index.html | 2 +- .../generic/convert_leakyrelu/index.html | 2 +- .../generic/convert_nan_to_random/index.html | 2 +- .../convert_placeholder_to_const/index.html | 2 +- .../generic/dilated_contraction/index.html | 2 +- .../generic/dummy_biasadd/index.html | 2 +- .../generic/expanddims_optimizer/index.html | 2 +- .../fetch_weight_from_reshape/index.html | 2 +- .../generic/fold_batch_norm/index.html | 2 +- .../generic/fold_constant/index.html | 2 +- .../generic/fuse_biasadd_add/index.html | 2 +- .../generic/fuse_column_wise_mul/index.html | 2 +- .../generic/fuse_conv_with_math/index.html | 2 +- .../generic/fuse_decomposed_bn/index.html | 2 +- .../generic/fuse_decomposed_in/index.html | 2 +- .../generic/fuse_gelu/index.html | 2 +- .../generic/fuse_layer_norm/index.html | 2 +- .../generic/fuse_pad_with_conv/index.html | 2 +- .../fuse_pad_with_fp32_conv/index.html | 2 +- .../generic/fuse_reshape_transpose/index.html | 2 +- .../generic/graph_cse_optimizer/index.html | 2 +- .../generic/grappler_pass/index.html | 2 +- .../graph_rewriter/generic/index.html | 2 +- .../generic/insert_print_node/index.html | 2 +- .../move_squeeze_after_relu/index.html | 2 +- .../generic/pre_optimize/index.html | 2 +- .../generic/remove_training_nodes/index.html | 2 +- .../generic/rename_batch_norm/index.html | 2 +- .../generic/split_shared_input/index.html | 2 +- .../generic/strip_equivalent_nodes/index.html | 2 +- .../generic/strip_unused_nodes/index.html | 2 +- .../generic/switch_optimizer/index.html | 2 +- .../graph_rewriter/graph_base/index.html | 2 +- .../tf_utils/graph_rewriter/index.html | 2 +- .../int8/freeze_fake_quant/index.html | 2 +- .../int8/freeze_value/index.html | 2 +- .../freeze_value_without_calib/index.html | 2 +- .../fuse_conv_redundant_dequantize/index.html | 2 +- .../int8/fuse_conv_requantize/index.html | 2 +- .../index.html | 2 +- .../int8/fuse_matmul_requantize/index.html | 2 +- .../tf_utils/graph_rewriter/int8/index.html | 2 +- .../int8/meta_op_optimizer/index.html | 2 +- .../int8/post_hostconst_converter/index.html | 2 +- .../int8/post_quantized_op_cse/index.html | 2 +- .../int8/rnn_convert/index.html | 2 +- .../int8/scale_propagation/index.html | 2 +- .../tf_utils/graph_rewriter/onnx/index.html | 2 +- .../graph_rewriter/onnx/onnx_graph/index.html | 2 +- .../graph_rewriter/onnx/onnx_node/index.html | 2 +- .../onnx/onnx_schema/index.html | 2 +- .../onnx/tf2onnx_utils/index.html | 2 +- .../tf_utils/graph_rewriter/qdq/index.html | 2 +- .../qdq/insert_qdq_pattern/index.html | 2 +- .../qdq/merge_duplicated_qdq/index.html | 2 +- .../qdq/share_qdq_y_pattern/index.html | 2 +- .../adaptor/tf_utils/graph_util/index.html | 2 +- .../adaptor/tf_utils/index.html | 2 +- .../tf_utils/quantize_graph/index.html | 2 +- .../qat/fake_quantize/index.html | 2 +- .../tf_utils/quantize_graph/qat/index.html | 2 +- .../qat/quantize_config/index.html | 2 +- .../qat/quantize_helper/index.html | 2 +- .../qat/quantize_layers/index.html | 2 +- .../quantize_layers/optimize_layer/index.html | 2 +- .../quantize_layer_add/index.html | 2 +- .../quantize_layer_base/index.html | 2 +- .../quantize_layer_bn/index.html | 2 +- .../qat/quantize_wrapper/index.html | 2 +- .../quantize_graph/qdq/fuse_qdq_bn/index.html | 2 +- .../qdq/fuse_qdq_concatv2/index.html | 2 +- .../qdq/fuse_qdq_conv/index.html | 2 +- .../qdq/fuse_qdq_deconv/index.html | 2 +- .../quantize_graph/qdq/fuse_qdq_in/index.html | 2 +- .../qdq/fuse_qdq_matmul/index.html | 2 +- .../qdq/fuse_qdq_pooling/index.html | 2 +- .../tf_utils/quantize_graph/qdq/index.html | 2 +- .../qdq/optimize_qdq/index.html | 2 +- .../quantize_graph_base/index.html | 2 +- .../quantize_graph_bn/index.html | 2 +- .../quantize_graph_concatv2/index.html | 2 +- .../quantize_graph_conv/index.html | 2 +- .../quantize_graph_for_intel_cpu/index.html | 2 +- .../quantize_graph_matmul/index.html | 2 +- .../quantize_graph_pooling/index.html | 2 +- .../tf_utils/quantize_graph_common/index.html | 2 +- .../smooth_quant_calibration/index.html | 2 +- .../tf_utils/smooth_quant_scaler/index.html | 2 +- .../tf_utils/tf2onnx_converter/index.html | 2 +- .../bias_correction/index.html | 2 +- .../graph_transform_base/index.html | 2 +- .../tf_utils/transform_graph/index.html | 2 +- .../transform_graph/insert_logging/index.html | 2 +- .../rerange_quantized_concat/index.html | 2 +- .../adaptor/tf_utils/util/index.html | 2 +- .../torch_utils/bf16_convert/index.html | 2 +- .../torch_utils/hawq_metric/index.html | 2 +- .../adaptor/torch_utils/index.html | 2 +- .../torch_utils/layer_wise_quant/index.html | 2 +- .../modified_pickle/index.html | 2 +- .../layer_wise_quant/quantize/index.html | 2 +- .../layer_wise_quant/torch_load/index.html | 2 +- .../layer_wise_quant/utils/index.html | 2 +- .../torch_utils/model_wrapper/index.html | 2 +- .../torch_utils/pattern_detector/index.html | 2 +- .../torch_utils/symbolic_trace/index.html | 2 +- .../adaptor/torch_utils/util/index.html | 2 +- .../algorithm/algorithm/index.html | 2 +- .../algorithm/fast_bias_correction/index.html | 2 +- .../neural_compressor/algorithm/index.html | 2 +- .../algorithm/smooth_quant/index.html | 2 +- .../algorithm/weight_correction/index.html | 2 +- .../neural_compressor/benchmark/index.html | 2 +- .../common/base_config/index.html | 2 +- .../common/base_tuning/index.html | 2 +- .../common/benchmark/index.html | 2 +- .../neural_compressor/common/index.html | 2 +- .../common/tuning_param/index.html | 2 +- .../common/utils/constants/index.html | 2 +- .../neural_compressor/common/utils/index.html | 2 +- .../common/utils/logger/index.html | 2 +- .../common/utils/save_load/index.html | 2 +- .../common/utils/utility/index.html | 2 +- .../compression/callbacks/index.html | 2 +- .../distillation/criterions/index.html | 2 +- .../compression/distillation/index.html | 2 +- .../distillation/optimizers/index.html | 2 +- .../distillation/utility/index.html | 2 +- .../compression/hpo/index.html | 2 +- .../compression/hpo/sa_optimizer/index.html | 2 +- .../compression/pruner/criteria/index.html | 2 +- .../compression/pruner/index.html | 2 +- .../pruner/model_slim/auto_slim/index.html | 2 +- .../compression/pruner/model_slim/index.html | 2 +- .../model_slim/pattern_analyzer/index.html | 2 +- .../pruner/model_slim/weight_slim/index.html | 2 +- .../pruner/patterns/base/index.html | 2 +- .../compression/pruner/patterns/index.html | 2 +- .../pruner/patterns/mha/index.html | 2 +- .../pruner/patterns/ninm/index.html | 2 +- .../pruner/patterns/nxm/index.html | 2 +- .../pruner/pruners/base/index.html | 2 +- .../pruner/pruners/basic/index.html | 2 +- .../pruner/pruners/block_mask/index.html | 2 +- .../compression/pruner/pruners/index.html | 2 +- .../compression/pruner/pruners/mha/index.html | 2 +- .../pruner/pruners/pattern_lock/index.html | 2 +- .../pruner/pruners/progressive/index.html | 2 +- .../pruner/pruners/retrain_free/index.html | 2 +- .../compression/pruner/pruning/index.html | 2 +- .../compression/pruner/regs/index.html | 2 +- .../compression/pruner/schedulers/index.html | 2 +- .../compression/pruner/tf_criteria/index.html | 2 +- .../compression/pruner/utils/index.html | 2 +- .../compression/pruner/wanda/index.html | 2 +- .../compression/pruner/wanda/utils/index.html | 2 +- .../neural_compressor/config/index.html | 2 +- .../neural_compressor/contrib/index.html | 2 +- .../contrib/strategy/index.html | 2 +- .../contrib/strategy/sigopt/index.html | 2 +- .../contrib/strategy/tpe/index.html | 2 +- .../dataloaders/base_dataloader/index.html | 2 +- .../data/dataloaders/dataloader/index.html | 2 +- .../dataloaders/default_dataloader/index.html | 2 +- .../data/dataloaders/fetcher/index.html | 2 +- .../dataloaders/mxnet_dataloader/index.html | 2 +- .../dataloaders/onnxrt_dataloader/index.html | 2 +- .../dataloaders/pytorch_dataloader/index.html | 2 +- .../data/dataloaders/sampler/index.html | 2 +- .../tensorflow_dataloader/index.html | 2 +- .../data/datasets/bert_dataset/index.html | 2 +- .../data/datasets/coco_dataset/index.html | 2 +- .../data/datasets/dataset/index.html | 2 +- .../data/datasets/dummy_dataset/index.html | 2 +- .../data/datasets/dummy_dataset_v2/index.html | 2 +- .../data/datasets/imagenet_dataset/index.html | 2 +- .../data/datasets/index.html | 2 +- .../style_transfer_dataset/index.html | 2 +- .../data/filters/coco_filter/index.html | 2 +- .../data/filters/filter/index.html | 2 +- .../neural_compressor/data/filters/index.html | 2 +- .../autoapi/neural_compressor/data/index.html | 2 +- .../transforms/imagenet_transform/index.html | 2 +- .../data/transforms/index.html | 2 +- .../data/transforms/postprocess/index.html | 2 +- .../data/transforms/tokenization/index.html | 2 +- .../data/transforms/transform/index.html | 2 +- latest/autoapi/neural_compressor/index.html | 2 +- .../neural_compressor/metric/bleu/index.html | 2 +- .../metric/bleu_util/index.html | 2 +- .../metric/coco_label_map/index.html | 2 +- .../metric/coco_tools/index.html | 2 +- .../metric/evaluate_squad/index.html | 2 +- .../neural_compressor/metric/f1/index.html | 2 +- .../neural_compressor/metric/index.html | 2 +- .../metric/metric/index.html | 2 +- .../mix_precision/index.html | 2 +- .../model/base_model/index.html | 2 +- .../neural_compressor/model/index.html | 2 +- .../model/keras_model/index.html | 2 +- .../neural_compressor/model/model/index.html | 2 +- .../model/mxnet_model/index.html | 2 +- .../model/nets_factory/index.html | 2 +- .../model/onnx_model/index.html | 2 +- .../model/tensorflow_model/index.html | 2 +- .../model/torch_model/index.html | 2 +- .../neural_compressor/objective/index.html | 2 +- .../neural_compressor/profiling/index.html | 2 +- .../profiling/parser/factory/index.html | 2 +- .../parser/onnx_parser/factory/index.html | 2 +- .../parser/onnx_parser/parser/index.html | 2 +- .../profiling/parser/parser/index.html | 2 +- .../profiling/parser/result/index.html | 2 +- .../tensorflow_parser/factory/index.html | 2 +- .../tensorflow_parser/parser/index.html | 2 +- .../profiling/profiler/factory/index.html | 2 +- .../onnxrt_profiler/factory/index.html | 2 +- .../onnxrt_profiler/profiler/index.html | 2 +- .../profiler/onnxrt_profiler/utils/index.html | 2 +- .../profiling/profiler/profiler/index.html | 2 +- .../tensorflow_profiler/factory/index.html | 2 +- .../tensorflow_profiler/profiler/index.html | 2 +- .../tensorflow_profiler/utils/index.html | 2 +- .../neural_compressor/quantization/index.html | 2 +- .../strategy/auto/index.html | 2 +- .../strategy/auto_mixed_precision/index.html | 2 +- .../strategy/basic/index.html | 2 +- .../strategy/bayesian/index.html | 2 +- .../strategy/conservative/index.html | 2 +- .../strategy/exhaustive/index.html | 2 +- .../strategy/hawq_v2/index.html | 2 +- .../neural_compressor/strategy/index.html | 2 +- .../neural_compressor/strategy/mse/index.html | 2 +- .../strategy/mse_v2/index.html | 2 +- .../strategy/random/index.html | 2 +- .../strategy/strategy/index.html | 2 +- .../strategy/utils/constant/index.html | 2 +- .../strategy/utils/index.html | 2 +- .../strategy/utils/tuning_sampler/index.html | 2 +- .../strategy/utils/tuning_space/index.html | 2 +- .../strategy/utils/tuning_structs/index.html | 2 +- .../strategy/utils/utility/index.html | 2 +- .../template/api_doc_example/index.html | 2 +- .../tensorflow/algorithms/index.html | 2 +- .../smoother/calibration/index.html | 2 +- .../algorithms/smoother/core/index.html | 2 +- .../tensorflow/algorithms/smoother/index.html | 2 +- .../algorithms/smoother/scaler/index.html | 2 +- .../algorithms/static_quant/index.html | 2 +- .../algorithms/static_quant/keras/index.html | 2 +- .../static_quant/tensorflow/index.html | 2 +- .../neural_compressor/tensorflow/index.html | 2 +- .../tensorflow/keras/index.html | 2 +- .../tensorflow/keras/layers/conv2d/index.html | 2 +- .../tensorflow/keras/layers/dense/index.html | 2 +- .../keras/layers/depthwise_conv2d/index.html | 2 +- .../tensorflow/keras/layers/index.html | 2 +- .../keras/layers/layer_initializer/index.html | 2 +- .../tensorflow/keras/layers/pool2d/index.html | 2 +- .../keras/layers/separable_conv2d/index.html | 2 +- .../keras/quantization/config/index.html | 2 +- .../tensorflow/keras/quantization/index.html | 2 +- .../quantization/algorithm_entry/index.html | 2 +- .../quantization/autotune/index.html | 2 +- .../tensorflow/quantization/config/index.html | 2 +- .../tensorflow/quantization/index.html | 2 +- .../quantization/quantize/index.html | 2 +- .../utils/graph_converter/index.html | 2 +- .../bf16/bf16_convert/index.html | 2 +- .../bf16/dequantize_cast_optimizer/index.html | 2 +- .../utils/graph_rewriter/bf16/index.html | 2 +- .../generic/convert_add_to_biasadd/index.html | 2 +- .../generic/convert_layout/index.html | 2 +- .../generic/convert_leakyrelu/index.html | 2 +- .../generic/convert_nan_to_random/index.html | 2 +- .../convert_placeholder_to_const/index.html | 2 +- .../generic/dilated_contraction/index.html | 2 +- .../generic/dummy_biasadd/index.html | 2 +- .../generic/expanddims_optimizer/index.html | 2 +- .../fetch_weight_from_reshape/index.html | 2 +- .../generic/fold_batch_norm/index.html | 2 +- .../generic/fold_constant/index.html | 2 +- .../generic/fuse_biasadd_add/index.html | 2 +- .../generic/fuse_column_wise_mul/index.html | 2 +- .../generic/fuse_conv_with_math/index.html | 2 +- .../generic/fuse_decomposed_bn/index.html | 2 +- .../generic/fuse_decomposed_in/index.html | 2 +- .../generic/fuse_gelu/index.html | 2 +- .../generic/fuse_layer_norm/index.html | 2 +- .../generic/fuse_pad_with_conv/index.html | 2 +- .../fuse_pad_with_fp32_conv/index.html | 2 +- .../generic/fuse_reshape_transpose/index.html | 2 +- .../generic/graph_cse_optimizer/index.html | 2 +- .../generic/grappler_pass/index.html | 2 +- .../utils/graph_rewriter/generic/index.html | 2 +- .../generic/insert_print_node/index.html | 2 +- .../move_squeeze_after_relu/index.html | 2 +- .../generic/pre_optimize/index.html | 2 +- .../generic/remove_training_nodes/index.html | 2 +- .../generic/rename_batch_norm/index.html | 2 +- .../generic/split_shared_input/index.html | 2 +- .../generic/strip_equivalent_nodes/index.html | 2 +- .../generic/strip_unused_nodes/index.html | 2 +- .../generic/switch_optimizer/index.html | 2 +- .../graph_rewriter/graph_base/index.html | 2 +- .../utils/graph_rewriter/index.html | 2 +- .../int8/freeze_fake_quant/index.html | 2 +- .../int8/freeze_value/index.html | 2 +- .../fuse_conv_redundant_dequantize/index.html | 2 +- .../int8/fuse_conv_requantize/index.html | 2 +- .../index.html | 2 +- .../int8/fuse_matmul_requantize/index.html | 2 +- .../utils/graph_rewriter/int8/index.html | 2 +- .../int8/meta_op_optimizer/index.html | 2 +- .../int8/post_hostconst_converter/index.html | 2 +- .../int8/post_quantized_op_cse/index.html | 2 +- .../int8/scale_propagation/index.html | 2 +- .../utils/graph_rewriter/qdq/index.html | 2 +- .../qdq/insert_qdq_pattern/index.html | 2 +- .../qdq/merge_duplicated_qdq/index.html | 2 +- .../qdq/share_qdq_y_pattern/index.html | 2 +- .../quantization/utils/graph_util/index.html | 2 +- .../tensorflow/quantization/utils/index.html | 2 +- .../utils/quantize_graph/index.html | 2 +- .../quantize_graph/qdq/fuse_qdq_bn/index.html | 2 +- .../qdq/fuse_qdq_concatv2/index.html | 2 +- .../qdq/fuse_qdq_conv/index.html | 2 +- .../qdq/fuse_qdq_deconv/index.html | 2 +- .../quantize_graph/qdq/fuse_qdq_in/index.html | 2 +- .../qdq/fuse_qdq_matmul/index.html | 2 +- .../qdq/fuse_qdq_pooling/index.html | 2 +- .../utils/quantize_graph/qdq/index.html | 2 +- .../qdq/optimize_qdq/index.html | 2 +- .../quantize_graph_base/index.html | 2 +- .../quantize_graph_bn/index.html | 2 +- .../quantize_graph_concatv2/index.html | 2 +- .../quantize_graph_conv/index.html | 2 +- .../quantize_graph_for_intel_cpu/index.html | 2 +- .../quantize_graph_matmul/index.html | 2 +- .../quantize_graph_pooling/index.html | 2 +- .../utils/quantize_graph_common/index.html | 2 +- .../bias_correction/index.html | 2 +- .../graph_transform_base/index.html | 2 +- .../utils/transform_graph/index.html | 2 +- .../transform_graph/insert_logging/index.html | 2 +- .../rerange_quantized_concat/index.html | 2 +- .../quantization/utils/utility/index.html | 2 +- .../tensorflow/utils/constants/index.html | 2 +- .../tensorflow/utils/data/index.html | 2 +- .../tensorflow/utils/index.html | 2 +- .../tensorflow/utils/model/index.html | 2 +- .../utils/model_wrappers/index.html | 2 +- .../tensorflow/utils/utility/index.html | 2 +- .../algorithms/base_algorithm/index.html | 2 +- .../fp8_quant/utils/logger/index.html | 2 +- .../torch/algorithms/index.html | 2 +- .../torch/algorithms/layer_wise/index.html | 2 +- .../algorithms/layer_wise/load/index.html | 2 +- .../layer_wise/modified_pickle/index.html | 2 +- .../algorithms/layer_wise/utils/index.html | 2 +- .../half_precision_convert/index.html | 2 +- .../algorithms/mixed_precision/index.html | 2 +- .../module_wrappers/index.html | 2 +- .../torch/algorithms/mx_quant/index.html | 2 +- .../torch/algorithms/mx_quant/mx/index.html | 2 +- .../algorithms/mx_quant/utils/index.html | 2 +- .../algorithms/pt2e_quant/core/index.html | 2 +- .../half_precision_rewriter/index.html | 2 +- .../torch/algorithms/pt2e_quant/index.html | 2 +- .../pt2e_quant/save_load/index.html | 2 +- .../algorithms/pt2e_quant/utility/index.html | 2 +- .../torch/algorithms/smooth_quant/index.html | 2 +- .../smooth_quant/save_load/index.html | 2 +- .../smooth_quant/smooth_quant/index.html | 2 +- .../smooth_quant/utility/index.html | 2 +- .../torch/algorithms/static_quant/index.html | 2 +- .../static_quant/save_load/index.html | 2 +- .../static_quant/static_quant/index.html | 2 +- .../static_quant/utility/index.html | 2 +- .../weight_only/autoround/index.html | 2 +- .../algorithms/weight_only/awq/index.html | 2 +- .../algorithms/weight_only/gptq/index.html | 2 +- .../weight_only/hqq/bitpack/index.html | 2 +- .../weight_only/hqq/config/index.html | 2 +- .../weight_only/hqq/core/index.html | 2 +- .../algorithms/weight_only/hqq/index.html | 2 +- .../weight_only/hqq/optimizer/index.html | 2 +- .../weight_only/hqq/qtensor/index.html | 2 +- .../weight_only/hqq/quantizer/index.html | 2 +- .../torch/algorithms/weight_only/index.html | 2 +- .../algorithms/weight_only/modules/index.html | 2 +- .../algorithms/weight_only/rtn/index.html | 2 +- .../weight_only/save_load/index.html | 2 +- .../algorithms/weight_only/teq/index.html | 2 +- .../algorithms/weight_only/utility/index.html | 2 +- .../neural_compressor/torch/export/index.html | 2 +- .../torch/export/pt2e_export/index.html | 2 +- .../neural_compressor/torch/index.html | 2 +- .../quantization/algorithm_entry/index.html | 2 +- .../torch/quantization/autotune/index.html | 2 +- .../torch/quantization/config/index.html | 2 +- .../torch/quantization/index.html | 2 +- .../torch/quantization/load_entry/index.html | 2 +- .../torch/quantization/quantize/index.html | 2 +- .../torch/utils/auto_accelerator/index.html | 2 +- .../torch/utils/bit_packer/index.html | 2 +- .../torch/utils/constants/index.html | 2 +- .../torch/utils/environ/index.html | 2 +- .../neural_compressor/torch/utils/index.html | 2 +- .../torch/utils/utility/index.html | 199 ++++++++++++++++-- .../neural_compressor/training/index.html | 2 +- .../quantization/utils/index.html | 2 +- .../transformers/utils/index.html | 2 +- .../utils/quantization_config/index.html | 2 +- .../utils/collect_layer_histogram/index.html | 2 +- .../utils/constant/index.html | 2 +- .../utils/create_obj_from_config/index.html | 2 +- .../neural_compressor/utils/export/index.html | 2 +- .../utils/export/qlinear2qdq/index.html | 2 +- .../utils/export/tf2onnx/index.html | 2 +- .../utils/export/torch2onnx/index.html | 2 +- .../neural_compressor/utils/index.html | 2 +- .../utils/kl_divergence/index.html | 2 +- .../utils/load_huggingface/index.html | 2 +- .../neural_compressor/utils/logger/index.html | 2 +- .../utils/options/index.html | 2 +- .../utils/pytorch/index.html | 2 +- .../utils/utility/index.html | 2 +- .../utils/weights_details/index.html | 2 +- .../neural_compressor/version/index.html | 2 +- latest/docs/build_docs/source/index.html | 2 +- latest/docs/source/2x_user_guide.html | 2 +- latest/docs/source/3x/PT_DynamicQuant.html | 2 +- latest/docs/source/3x/PT_FP8Quant.html | 2 +- latest/docs/source/3x/PT_MXQuant.html | 2 +- latest/docs/source/3x/PT_MixedPrecision.html | 2 +- latest/docs/source/3x/PT_SmoothQuant.html | 2 +- latest/docs/source/3x/PT_StaticQuant.html | 2 +- latest/docs/source/3x/PT_WeightOnlyQuant.html | 2 +- latest/docs/source/3x/PyTorch.html | 2 +- latest/docs/source/3x/TF_Quant.html | 2 +- latest/docs/source/3x/TF_SQ.html | 2 +- latest/docs/source/3x/TensorFlow.html | 2 +- latest/docs/source/3x/autotune.html | 2 +- latest/docs/source/3x/benchmark.html | 2 +- latest/docs/source/3x/client_quant.html | 2 +- latest/docs/source/3x/design.html | 2 +- latest/docs/source/3x/gaudi_version_map.html | 2 +- latest/docs/source/3x/llm_recipes.html | 2 +- latest/docs/source/3x/quantization.html | 2 +- .../docs/source/3x/transformers_like_api.html | 2 +- latest/docs/source/CODE_OF_CONDUCT.html | 2 +- latest/docs/source/CONTRIBUTING.html | 2 +- latest/docs/source/FX.html | 2 +- latest/docs/source/SECURITY.html | 2 +- latest/docs/source/Welcome.html | 2 +- latest/docs/source/adaptor.html | 2 +- latest/docs/source/add_new_adaptor.html | 2 +- latest/docs/source/add_new_data_type.html | 2 +- latest/docs/source/api-doc/adaptor.html | 2 +- .../docs/source/api-doc/adaptor/onnxrt.html | 2 +- .../source/api-doc/adaptor/torch_utils.html | 2 +- latest/docs/source/api-doc/api_2.html | 2 +- latest/docs/source/api-doc/api_3.html | 2 +- .../docs/source/api-doc/api_doc_example.html | 2 +- latest/docs/source/api-doc/apis.html | 2 +- latest/docs/source/api-doc/benchmark.html | 2 +- latest/docs/source/api-doc/compression.html | 2 +- latest/docs/source/api-doc/config.html | 2 +- latest/docs/source/api-doc/mix_precision.html | 2 +- latest/docs/source/api-doc/model.html | 2 +- latest/docs/source/api-doc/objective.html | 2 +- latest/docs/source/api-doc/quantization.html | 2 +- latest/docs/source/api-doc/strategy.html | 2 +- .../api-doc/tf_quantization_autotune.html | 2 +- .../api-doc/tf_quantization_common.html | 2 +- .../api-doc/tf_quantization_config.html | 2 +- .../api-doc/torch_quantization_autotune.html | 2 +- .../api-doc/torch_quantization_common.html | 2 +- .../api-doc/torch_quantization_config.html | 2 +- latest/docs/source/api-doc/training.html | 2 +- latest/docs/source/benchmark.html | 2 +- latest/docs/source/calibration.html | 2 +- latest/docs/source/coding_style.html | 2 +- latest/docs/source/dataloader.html | 2 +- latest/docs/source/design.html | 2 +- .../source/distillation_quantization.html | 2 +- latest/docs/source/distributed.html | 2 +- latest/docs/source/examples_readme.html | 2 +- latest/docs/source/export.html | 2 +- latest/docs/source/faq.html | 2 +- latest/docs/source/framework_yaml.html | 2 +- latest/docs/source/get_started.html | 2 +- latest/docs/source/incompatible_changes.html | 2 +- latest/docs/source/infrastructure.html | 2 +- latest/docs/source/installation_guide.html | 2 +- latest/docs/source/legal_information.html | 2 +- latest/docs/source/llm_recipes.html | 2 +- latest/docs/source/metric.html | 2 +- latest/docs/source/migration.html | 2 +- latest/docs/source/mixed_precision.html | 2 +- latest/docs/source/model.html | 2 +- latest/docs/source/mx_quantization.html | 2 +- latest/docs/source/objective.html | 2 +- latest/docs/source/orchestration.html | 2 +- latest/docs/source/pruning.html | 2 +- latest/docs/source/publication_list.html | 2 +- latest/docs/source/quantization.html | 2 +- .../docs/source/quantization_layer_wise.html | 2 +- .../source/quantization_mixed_precision.html | 2 +- .../docs/source/quantization_weight_only.html | 2 +- latest/docs/source/releases_info.html | 2 +- latest/docs/source/sigopt_strategy.html | 2 +- latest/docs/source/smooth_quant.html | 2 +- latest/docs/source/transform.html | 2 +- latest/docs/source/tuning_strategies.html | 2 +- latest/docs/source/validated_model_list.html | 2 +- latest/genindex.html | 34 ++- latest/objects.inv | Bin 30524 -> 30612 bytes latest/py-modindex.html | 2 +- latest/search.html | 2 +- latest/searchindex.js | 2 +- 562 files changed, 900 insertions(+), 578 deletions(-) diff --git a/latest/_sources/autoapi/neural_compressor/torch/utils/utility/index.rst.txt b/latest/_sources/autoapi/neural_compressor/torch/utils/utility/index.rst.txt index f7707cc39ad..8e2b420ed23 100644 --- a/latest/_sources/autoapi/neural_compressor/torch/utils/utility/index.rst.txt +++ b/latest/_sources/autoapi/neural_compressor/torch/utils/utility/index.rst.txt @@ -14,6 +14,7 @@ Functions .. autoapisummary:: + neural_compressor.torch.utils.utility.is_optimum_habana_available neural_compressor.torch.utils.utility.register_algo neural_compressor.torch.utils.utility.fetch_module neural_compressor.torch.utils.utility.set_module @@ -26,11 +27,32 @@ Functions neural_compressor.torch.utils.utility.get_processor_type_from_user_config neural_compressor.torch.utils.utility.dowload_hf_model neural_compressor.torch.utils.utility.load_empty_model + neural_compressor.torch.utils.utility.get_module + neural_compressor.torch.utils.utility.get_layer_names_in_block + neural_compressor.torch.utils.utility.to_dtype + neural_compressor.torch.utils.utility.to_device + neural_compressor.torch.utils.utility.get_block_names + neural_compressor.torch.utils.utility.validate_modules + neural_compressor.torch.utils.utility.get_multimodal_block_names + neural_compressor.torch.utils.utility.detect_device + neural_compressor.torch.utils.utility.run_fn_for_vlm_autoround Module Contents --------------- +.. py:function:: is_optimum_habana_available() + + Checks if the Optimum Habana module is available for use with the transformers library. + + This function checks two conditions: + 1. If the `optimum` package is available using `transformers.utils.import_utils.is_optimum_available`. + 2. If the `optimum.habana` module can be found using `importlib.util.find_spec`. + + :returns: True if Optimum Habana is available, False otherwise. + :rtype: bool + + .. py:function:: register_algo(name) Decorator function to register algorithms in the algos_mapping dictionary. @@ -167,3 +189,110 @@ Module Contents Load a empty model. +.. py:function:: get_module(module, key) + + Get module from model by key name. + + :param module: original model + :type module: torch.nn.Module + :param key: module name to be replaced + :type key: str + + +.. py:function:: get_layer_names_in_block(model, supported_types=[torch.nn.Linear, transformers.modeling_utils.Conv1D], quant_block_list=None) + + Retrieves the names of layers within each block of the model. + + :returns: + + A list of strings, where each string is the name of a layer + within a block of the model. + :rtype: list + + +.. py:function:: to_dtype(input, dtype=torch.float32) + + Moves input data to the specified data type. + + Args: + input: The input data to be moved. + dtype: The target data type. + + Returns: + The input data on the specified data type. + + +.. py:function:: to_device(input, device=torch.device('cpu')) + + Moves input data to the specified device. + + Args: + input: The input data to be moved. + device: The target device. + + Returns: + The input data on the specified device. + + +.. py:function:: get_block_names(model) + + Get the block names for transformers-like networks. + + Args: + model: The model. + + Returns: + block_names: A list whose elements are list of block's layer names + + +.. py:function:: validate_modules(module_names) + + Test a list of modules' validity. + + Args: + modules (list of str): List of strings to be validated. + + Returns: + bool: True if all modules have equal length or not dependent, otherwise False. + + +.. py:function:: get_multimodal_block_names(model, quant_vision=False) + + Get the multimodal model block names for transformers-like networks. + + Args: + model: The model. + + Returns: + block_names: A list whose elements are list of block's layer names + + +.. py:function:: detect_device(device=None) + + Detects the device to use for model execution (GPU, HPU, or CPU). + + :param device: + - If a string ('cuda', 'cpu', or 'hpu') or torch.device is provided, that device is selected. + - If an integer is provided, it treats it as a GPU device index. + - If None or 'auto', it automatically selects 'cuda' if available, 'hpu' if Habana is available, + or falls back to 'cpu'. + :type device: str, int, torch.device, optional + + :returns: The selected device in string format ('cuda:X', 'hpu', or 'cpu'). + :rtype: str + + +.. py:function:: run_fn_for_vlm_autoround(model, dataloader, seqlen=512, nsamples=512) + + Runs a model on a provided dataset with automatic device detection for vector-language models. + + :param model: The model to run. + :param dataloader: A PyTorch dataloader providing the input data for the model. + :param seqlen: The minimum sequence length of input data to process. Defaults to 512. + :type seqlen: int, optional + :param nsamples: The number of samples to process before stopping. Defaults to 512. + :type nsamples: int, optional + + :returns: None + + diff --git a/latest/autoapi/neural_compressor/adaptor/mxnet_utils/index.html b/latest/autoapi/neural_compressor/adaptor/mxnet_utils/index.html index b936ed6066f..a05779de50c 100644 --- a/latest/autoapi/neural_compressor/adaptor/mxnet_utils/index.html +++ b/latest/autoapi/neural_compressor/adaptor/mxnet_utils/index.html @@ -114,7 +114,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

Cookies | Privacy
diff --git a/latest/autoapi/neural_compressor/adaptor/mxnet_utils/util/index.html b/latest/autoapi/neural_compressor/adaptor/mxnet_utils/util/index.html index 098b2ab5809..71f67423a3a 100644 --- a/latest/autoapi/neural_compressor/adaptor/mxnet_utils/util/index.html +++ b/latest/autoapi/neural_compressor/adaptor/mxnet_utils/util/index.html @@ -678,7 +678,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/calibration/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/calibration/index.html index 1d0257d23cd..56c888f6a88 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/calibration/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/calibration/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/calibrator/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/calibrator/index.html index 0aadbfc0187..e2ba50e5c28 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/calibrator/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/calibrator/index.html @@ -225,7 +225,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/index.html index a844fc597dd..b05fa6f2fcb 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/index.html @@ -127,7 +127,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/activation/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/activation/index.html index f9f2bab590e..d1efa5a0ad7 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/activation/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/activation/index.html @@ -152,7 +152,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/argmax/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/argmax/index.html index 91d1fbc2b15..2e97d684b80 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/argmax/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/argmax/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/attention/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/attention/index.html index 0ea9fe91702..c7d5368851f 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/attention/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/attention/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/binary_op/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/binary_op/index.html index e2c5d554162..d3cb448ea2a 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/binary_op/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/binary_op/index.html @@ -152,7 +152,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/concat/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/concat/index.html index ad47fb23c8f..4d314df71b2 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/concat/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/concat/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/conv/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/conv/index.html index dafc91150b6..9cb30c8f328 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/conv/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/conv/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/direct_q8/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/direct_q8/index.html index 4dbf6de24c5..cab3f673d22 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/direct_q8/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/direct_q8/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/embed_layernorm/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/embed_layernorm/index.html index e4cea439088..2859d28d9ae 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/embed_layernorm/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/embed_layernorm/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/gather/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/gather/index.html index e727002a832..cb8a1e374cb 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/gather/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/gather/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/gavgpool/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/gavgpool/index.html index 74cadd514fa..3358a79e709 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/gavgpool/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/gavgpool/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/gemm/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/gemm/index.html index 356e3eb9404..55635d9d185 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/gemm/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/gemm/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/index.html index 4c76a425dbf..8487a4a8975 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/index.html @@ -135,7 +135,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/lstm/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/lstm/index.html index 8cb04e0781a..e5900ce8194 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/lstm/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/lstm/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/matmul/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/matmul/index.html index e343b8172c2..6d9a74b5861 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/matmul/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/matmul/index.html @@ -143,7 +143,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/maxpool/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/maxpool/index.html index 38ce37fce58..23ac27462a1 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/maxpool/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/maxpool/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/norm/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/norm/index.html index 40233113053..bfc5c64765e 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/norm/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/norm/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/ops/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/ops/index.html index 381b92a7603..e01d8c6e8e4 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/ops/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/ops/index.html @@ -159,7 +159,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/pad/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/pad/index.html index 1f9f255c3f7..5fdb877ce8f 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/pad/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/pad/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/pooling/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/pooling/index.html index 8f8c1e11500..c686a8a4520 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/pooling/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/pooling/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/reduce/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/reduce/index.html index d1b37fdf46b..69e943f6a8e 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/reduce/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/reduce/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/resize/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/resize/index.html index 3c6f80b918f..5439c312dc6 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/resize/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/resize/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/split/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/split/index.html index 18af51bbd08..46693999c4e 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/split/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/split/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/unary_op/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/unary_op/index.html index f633262cba6..00841c45ff1 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/unary_op/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/operators/unary_op/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/quantizer/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/quantizer/index.html index 9abd45e1a93..8c997adc4a2 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/quantizer/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/quantizer/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/smooth_quant/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/smooth_quant/index.html index 4fc3b906643..5e2ed66c3a0 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/smooth_quant/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/smooth_quant/index.html @@ -199,7 +199,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/util/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/util/index.html index f8e2eee8d65..1907eea97bf 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/util/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/util/index.html @@ -462,7 +462,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/ox_utils/weight_only/index.html b/latest/autoapi/neural_compressor/adaptor/ox_utils/weight_only/index.html index 5fe39e9a4dc..c538f9b5041 100644 --- a/latest/autoapi/neural_compressor/adaptor/ox_utils/weight_only/index.html +++ b/latest/autoapi/neural_compressor/adaptor/ox_utils/weight_only/index.html @@ -483,7 +483,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tensorflow/index.html b/latest/autoapi/neural_compressor/adaptor/tensorflow/index.html index b3f9e147911..cdfd4625640 100644 --- a/latest/autoapi/neural_compressor/adaptor/tensorflow/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tensorflow/index.html @@ -143,7 +143,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_converter/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_converter/index.html index 5a76688d320..81e83e9f571 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_converter/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_converter/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_converter_without_calib/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_converter_without_calib/index.html index 093618c3d8a..d9149d6e8d1 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_converter_without_calib/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_converter_without_calib/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/bf16_convert/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/bf16_convert/index.html index 311b5b2b693..444d0a6089c 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/bf16_convert/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/bf16_convert/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/dequantize_cast_optimizer/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/dequantize_cast_optimizer/index.html index 9df7fbe12b1..daec8b48f14 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/dequantize_cast_optimizer/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/dequantize_cast_optimizer/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/index.html index f4bbb5bedc0..2c5d3a0b4f0 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/index.html @@ -115,7 +115,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_add_to_biasadd/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_add_to_biasadd/index.html index 56d8793c505..e15e221d1b2 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_add_to_biasadd/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_add_to_biasadd/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_layout/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_layout/index.html index d40312210aa..b0a6d701d68 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_layout/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_layout/index.html @@ -131,7 +131,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_leakyrelu/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_leakyrelu/index.html index 7e080f8c8c5..e6cd816071f 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_leakyrelu/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_leakyrelu/index.html @@ -141,7 +141,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_nan_to_random/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_nan_to_random/index.html index e3258853bec..a1cb14b27ca 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_nan_to_random/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_nan_to_random/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_placeholder_to_const/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_placeholder_to_const/index.html index 3077640e53f..ff3c8226035 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_placeholder_to_const/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_placeholder_to_const/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dilated_contraction/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dilated_contraction/index.html index 88ebc4b66de..e86d7d0bf60 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dilated_contraction/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dilated_contraction/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dummy_biasadd/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dummy_biasadd/index.html index 7c9add617f9..66d129a1e05 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dummy_biasadd/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dummy_biasadd/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/expanddims_optimizer/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/expanddims_optimizer/index.html index af11270ee28..5abc1361971 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/expanddims_optimizer/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/expanddims_optimizer/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fetch_weight_from_reshape/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fetch_weight_from_reshape/index.html index 93c4fcecfe2..abc4ea7421a 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fetch_weight_from_reshape/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fetch_weight_from_reshape/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_batch_norm/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_batch_norm/index.html index 57bca1178a2..475b2a79468 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_batch_norm/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_batch_norm/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_constant/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_constant/index.html index f8e8da4f2f9..1c4c5bc509d 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_constant/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_constant/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_biasadd_add/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_biasadd_add/index.html index 15b4adfbadd..173b3aa5b0a 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_biasadd_add/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_biasadd_add/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_column_wise_mul/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_column_wise_mul/index.html index 19500768f02..0bb67ab9076 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_column_wise_mul/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_column_wise_mul/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math/index.html index 2012ba23c7a..e7a63f184c8 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math/index.html @@ -140,7 +140,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_bn/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_bn/index.html index 2ff211d4542..baf3f92ef4f 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_bn/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_bn/index.html @@ -219,7 +219,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_in/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_in/index.html index 90009ab3340..2d55b1a38b8 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_in/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_in/index.html @@ -219,7 +219,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu/index.html index 9843c0b3451..b5e727d72ff 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_layer_norm/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_layer_norm/index.html index 350d3c0f6e3..8f9edf179f4 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_layer_norm/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_layer_norm/index.html @@ -186,7 +186,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_conv/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_conv/index.html index 258d85ca8f7..4389edae88e 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_conv/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_conv/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index.html index ceb2f61f5ff..4e58236d38a 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_reshape_transpose/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_reshape_transpose/index.html index a6e3aababb3..e983f7203e4 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_reshape_transpose/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_reshape_transpose/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/graph_cse_optimizer/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/graph_cse_optimizer/index.html index d7d26c47fa6..28303349662 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/graph_cse_optimizer/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/graph_cse_optimizer/index.html @@ -181,7 +181,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/grappler_pass/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/grappler_pass/index.html index 0096af992d0..294df649a0c 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/grappler_pass/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/grappler_pass/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/index.html index 51c4b52dd80..e313386aab3 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/index.html @@ -145,7 +145,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/insert_print_node/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/insert_print_node/index.html index e211ea62d34..fc2cc384724 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/insert_print_node/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/insert_print_node/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/move_squeeze_after_relu/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/move_squeeze_after_relu/index.html index 2a779cf5bc3..2114bdf4923 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/move_squeeze_after_relu/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/move_squeeze_after_relu/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/pre_optimize/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/pre_optimize/index.html index 6f5845cd374..f63cb67a80f 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/pre_optimize/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/pre_optimize/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/remove_training_nodes/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/remove_training_nodes/index.html index ba44b42faa8..a0773cd2745 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/remove_training_nodes/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/remove_training_nodes/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/rename_batch_norm/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/rename_batch_norm/index.html index 70fb0411eaf..5f60acba7df 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/rename_batch_norm/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/rename_batch_norm/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/split_shared_input/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/split_shared_input/index.html index 0d6ff0821c1..aab0f1e85bb 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/split_shared_input/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/split_shared_input/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_equivalent_nodes/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_equivalent_nodes/index.html index 2a3f858c49c..00357646936 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_equivalent_nodes/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_equivalent_nodes/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes/index.html index f4b5b439f93..20dbcb1c11d 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/switch_optimizer/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/switch_optimizer/index.html index 8f74c0f31e9..0dd311ff660 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/switch_optimizer/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/switch_optimizer/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/graph_base/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/graph_base/index.html index af395f0a2c5..295fce76ffa 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/graph_base/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/graph_base/index.html @@ -131,7 +131,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/index.html index f32dd65470b..9e7a64395e6 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/index.html @@ -126,7 +126,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_fake_quant/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_fake_quant/index.html index d9b42f77a73..98592e19e97 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_fake_quant/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_fake_quant/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value/index.html index 39e004dd578..5433ab488ee 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value_without_calib/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value_without_calib/index.html index 62423916469..50e69cf849b 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value_without_calib/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value_without_calib/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index.html index 789ab4e5ca1..2c5857be107 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_requantize/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_requantize/index.html index a5c4cec21f4..31de5a13235 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_requantize/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_requantize/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index.html index 84808262a0f..1801bfa517a 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_requantize/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_requantize/index.html index 1d1f5750396..3529fd06b91 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_requantize/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_requantize/index.html @@ -152,7 +152,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/index.html index d29ef53bfde..1eea4697504 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/index.html @@ -125,7 +125,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/meta_op_optimizer/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/meta_op_optimizer/index.html index 9cab76f4ce8..e105370bf0b 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/meta_op_optimizer/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/meta_op_optimizer/index.html @@ -126,7 +126,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_hostconst_converter/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_hostconst_converter/index.html index 8818cf1e81b..4153bf084b5 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_hostconst_converter/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_hostconst_converter/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_quantized_op_cse/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_quantized_op_cse/index.html index 11f21b5d4d7..05bfeb99ba0 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_quantized_op_cse/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_quantized_op_cse/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/rnn_convert/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/rnn_convert/index.html index 72b8d631693..d60953cbce1 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/rnn_convert/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/rnn_convert/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/scale_propagation/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/scale_propagation/index.html index ff501adf771..c7f79d7be49 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/scale_propagation/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/scale_propagation/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/index.html index 13b06c84f0c..4a988c5766e 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/index.html @@ -117,7 +117,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_graph/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_graph/index.html index 806906b31b1..b5543d9ea6c 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_graph/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_graph/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_node/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_node/index.html index ffd0a937221..91c3fc2e764 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_node/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_node/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_schema/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_schema/index.html index 7f5ad67d2bb..0b663c39c49 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_schema/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_schema/index.html @@ -150,7 +150,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/tf2onnx_utils/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/tf2onnx_utils/index.html index 0d19a159354..3c5ce796af6 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/tf2onnx_utils/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/tf2onnx_utils/index.html @@ -359,7 +359,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/index.html index a2f0bd60a40..2c13abaf05c 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/index.html @@ -116,7 +116,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern/index.html index 1cd728771e0..c7b925306f6 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/merge_duplicated_qdq/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/merge_duplicated_qdq/index.html index a15cc83295a..8869b6e0209 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/merge_duplicated_qdq/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/merge_duplicated_qdq/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/share_qdq_y_pattern/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/share_qdq_y_pattern/index.html index 4b1bb988784..6dc1b18ce68 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/share_qdq_y_pattern/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/share_qdq_y_pattern/index.html @@ -126,7 +126,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_util/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_util/index.html index 975d42a725e..b3581f46786 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_util/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/graph_util/index.html @@ -137,7 +137,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/index.html index 9a0292e71d9..5e0b2c47f3d 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/index.html @@ -131,7 +131,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/index.html index 423795fca49..4fad307aad1 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/index.html @@ -129,7 +129,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/fake_quantize/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/fake_quantize/index.html index 21b5548ac42..dacaeffb509 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/fake_quantize/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/fake_quantize/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/index.html index edf5494846c..68425136fce 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/index.html @@ -125,7 +125,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_config/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_config/index.html index 4c20dce813a..9312f7a7716 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_config/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_config/index.html @@ -126,7 +126,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_helper/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_helper/index.html index c2434621789..f6f6642f0f0 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_helper/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_helper/index.html @@ -163,7 +163,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/index.html index beb72148065..4e6b5bf5632 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/index.html @@ -117,7 +117,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/optimize_layer/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/optimize_layer/index.html index 338a4e5f8a7..34278838914 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/optimize_layer/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/optimize_layer/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_add/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_add/index.html index a63bb6081ed..e21ee13355a 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_add/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_add/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_base/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_base/index.html index 84231fcdfed..96166342521 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_base/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_base/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_bn/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_bn/index.html index 95a12917110..81f0964a0a0 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_bn/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_bn/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_wrapper/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_wrapper/index.html index be9ab86b237..08ebe7ac427 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_wrapper/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_wrapper/index.html @@ -135,7 +135,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_bn/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_bn/index.html index c5b36c9be1a..5d8d33c0770 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_bn/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_bn/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_concatv2/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_concatv2/index.html index f21c3486f30..2d9c1f37745 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_concatv2/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_concatv2/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_conv/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_conv/index.html index 52a7bfaa54d..3fc8a876e0c 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_conv/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_conv/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_deconv/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_deconv/index.html index cade90826ce..e80de98b103 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_deconv/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_deconv/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_in/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_in/index.html index 72a1d3ba893..d3e00986907 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_in/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_in/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_matmul/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_matmul/index.html index f97f4f24b4c..478cc941b7e 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_matmul/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_matmul/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_pooling/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_pooling/index.html index 6f228c85fcf..347ab14f62f 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_pooling/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_pooling/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/index.html index bb6117ae3ab..7a842cd889c 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/index.html @@ -121,7 +121,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/optimize_qdq/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/optimize_qdq/index.html index 6abc0c2c02b..ee01c6065c6 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/optimize_qdq/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/optimize_qdq/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_base/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_base/index.html index 9ea37ca4b4f..116d2351594 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_base/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_base/index.html @@ -139,7 +139,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_bn/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_bn/index.html index 09e093f57fd..8cda62d38bf 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_bn/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_bn/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_concatv2/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_concatv2/index.html index 624ba92d64d..8e99b60526d 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_concatv2/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_concatv2/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_conv/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_conv/index.html index f2caf7f4a00..cf1086e5d61 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_conv/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_conv/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_for_intel_cpu/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_for_intel_cpu/index.html index ed81563a547..8bd794c0e85 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_for_intel_cpu/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_for_intel_cpu/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_matmul/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_matmul/index.html index c543f5b4202..35de1be6172 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_matmul/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_matmul/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_pooling/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_pooling/index.html index 69310bbc318..4dd02b9b522 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_pooling/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_pooling/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph_common/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph_common/index.html index 9c0d867ceb7..7be8cd5f999 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph_common/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/quantize_graph_common/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_calibration/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_calibration/index.html index be9c7ba7742..f833bea0b02 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_calibration/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_calibration/index.html @@ -158,7 +158,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_scaler/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_scaler/index.html index 3a99764a4be..adc9a34fd92 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_scaler/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_scaler/index.html @@ -156,7 +156,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/tf2onnx_converter/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/tf2onnx_converter/index.html index 89356e99857..fdb8ff11c14 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/tf2onnx_converter/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/tf2onnx_converter/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/bias_correction/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/bias_correction/index.html index 681478c3f2a..2a96a55e580 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/bias_correction/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/bias_correction/index.html @@ -135,7 +135,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/graph_transform_base/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/graph_transform_base/index.html index 60ec9699f22..9bc5cc3b08f 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/graph_transform_base/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/graph_transform_base/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/index.html index fa4bf3b096b..37a5b637a66 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/index.html @@ -117,7 +117,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/insert_logging/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/insert_logging/index.html index 2bc0728c1af..7568add3826 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/insert_logging/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/insert_logging/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/rerange_quantized_concat/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/rerange_quantized_concat/index.html index 5d9168830b4..282d7dc50c3 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/rerange_quantized_concat/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/transform_graph/rerange_quantized_concat/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/tf_utils/util/index.html b/latest/autoapi/neural_compressor/adaptor/tf_utils/util/index.html index 5253447e494..faaf35ea846 100644 --- a/latest/autoapi/neural_compressor/adaptor/tf_utils/util/index.html +++ b/latest/autoapi/neural_compressor/adaptor/tf_utils/util/index.html @@ -550,7 +550,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/torch_utils/bf16_convert/index.html b/latest/autoapi/neural_compressor/adaptor/torch_utils/bf16_convert/index.html index bc721b1d6fe..99bd2c18bf6 100644 --- a/latest/autoapi/neural_compressor/adaptor/torch_utils/bf16_convert/index.html +++ b/latest/autoapi/neural_compressor/adaptor/torch_utils/bf16_convert/index.html @@ -155,7 +155,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/torch_utils/hawq_metric/index.html b/latest/autoapi/neural_compressor/adaptor/torch_utils/hawq_metric/index.html index 25380ed4260..ad6d8f621f9 100644 --- a/latest/autoapi/neural_compressor/adaptor/torch_utils/hawq_metric/index.html +++ b/latest/autoapi/neural_compressor/adaptor/torch_utils/hawq_metric/index.html @@ -197,7 +197,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/torch_utils/index.html b/latest/autoapi/neural_compressor/adaptor/torch_utils/index.html index 559606a21d2..8ded7906e52 100644 --- a/latest/autoapi/neural_compressor/adaptor/torch_utils/index.html +++ b/latest/autoapi/neural_compressor/adaptor/torch_utils/index.html @@ -127,7 +127,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/index.html b/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/index.html index b30217b0ebb..32eee2107d6 100644 --- a/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/index.html +++ b/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/index.html @@ -117,7 +117,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/modified_pickle/index.html b/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/modified_pickle/index.html index 85f04400a73..c1894617eee 100644 --- a/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/modified_pickle/index.html +++ b/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/modified_pickle/index.html @@ -168,7 +168,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/quantize/index.html b/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/quantize/index.html index ffce8dce851..fbf54c52d71 100644 --- a/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/quantize/index.html +++ b/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/quantize/index.html @@ -126,7 +126,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/torch_load/index.html b/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/torch_load/index.html index 8899d775886..fb963e17f31 100644 --- a/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/torch_load/index.html +++ b/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/torch_load/index.html @@ -210,7 +210,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/utils/index.html b/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/utils/index.html index 5c03db7eb60..daed7d0c43d 100644 --- a/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/utils/index.html +++ b/latest/autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/utils/index.html @@ -214,7 +214,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/torch_utils/model_wrapper/index.html b/latest/autoapi/neural_compressor/adaptor/torch_utils/model_wrapper/index.html index c61b0a1c535..f6248e93972 100644 --- a/latest/autoapi/neural_compressor/adaptor/torch_utils/model_wrapper/index.html +++ b/latest/autoapi/neural_compressor/adaptor/torch_utils/model_wrapper/index.html @@ -143,7 +143,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/torch_utils/pattern_detector/index.html b/latest/autoapi/neural_compressor/adaptor/torch_utils/pattern_detector/index.html index 0a24514101f..e9ff8b48a9d 100644 --- a/latest/autoapi/neural_compressor/adaptor/torch_utils/pattern_detector/index.html +++ b/latest/autoapi/neural_compressor/adaptor/torch_utils/pattern_detector/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/torch_utils/symbolic_trace/index.html b/latest/autoapi/neural_compressor/adaptor/torch_utils/symbolic_trace/index.html index c8e5d042229..dae8759869d 100644 --- a/latest/autoapi/neural_compressor/adaptor/torch_utils/symbolic_trace/index.html +++ b/latest/autoapi/neural_compressor/adaptor/torch_utils/symbolic_trace/index.html @@ -157,7 +157,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/adaptor/torch_utils/util/index.html b/latest/autoapi/neural_compressor/adaptor/torch_utils/util/index.html index 15ab7c7a062..495a34c3f62 100644 --- a/latest/autoapi/neural_compressor/adaptor/torch_utils/util/index.html +++ b/latest/autoapi/neural_compressor/adaptor/torch_utils/util/index.html @@ -770,7 +770,7 @@

}

Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/algorithm/algorithm/index.html b/latest/autoapi/neural_compressor/algorithm/algorithm/index.html index e83eb749100..d7668e9b6da 100644 --- a/latest/autoapi/neural_compressor/algorithm/algorithm/index.html +++ b/latest/autoapi/neural_compressor/algorithm/algorithm/index.html @@ -174,7 +174,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/algorithm/fast_bias_correction/index.html b/latest/autoapi/neural_compressor/algorithm/fast_bias_correction/index.html index 83fcf1f092a..b72ce1c970a 100644 --- a/latest/autoapi/neural_compressor/algorithm/fast_bias_correction/index.html +++ b/latest/autoapi/neural_compressor/algorithm/fast_bias_correction/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/algorithm/index.html b/latest/autoapi/neural_compressor/algorithm/index.html index 06df6626eec..360d43b54db 100644 --- a/latest/autoapi/neural_compressor/algorithm/index.html +++ b/latest/autoapi/neural_compressor/algorithm/index.html @@ -117,7 +117,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/algorithm/smooth_quant/index.html b/latest/autoapi/neural_compressor/algorithm/smooth_quant/index.html index c3dbe84dc10..d3cc23e5aa1 100644 --- a/latest/autoapi/neural_compressor/algorithm/smooth_quant/index.html +++ b/latest/autoapi/neural_compressor/algorithm/smooth_quant/index.html @@ -131,7 +131,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/algorithm/weight_correction/index.html b/latest/autoapi/neural_compressor/algorithm/weight_correction/index.html index 22fa5b0f6fd..1dca7fb794b 100644 --- a/latest/autoapi/neural_compressor/algorithm/weight_correction/index.html +++ b/latest/autoapi/neural_compressor/algorithm/weight_correction/index.html @@ -133,7 +133,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/benchmark/index.html b/latest/autoapi/neural_compressor/benchmark/index.html index 381e01c4cd2..58e2e26aa89 100644 --- a/latest/autoapi/neural_compressor/benchmark/index.html +++ b/latest/autoapi/neural_compressor/benchmark/index.html @@ -340,7 +340,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/common/base_config/index.html b/latest/autoapi/neural_compressor/common/base_config/index.html index b603271f166..4723ba65983 100644 --- a/latest/autoapi/neural_compressor/common/base_config/index.html +++ b/latest/autoapi/neural_compressor/common/base_config/index.html @@ -251,7 +251,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/common/base_tuning/index.html b/latest/autoapi/neural_compressor/common/base_tuning/index.html index a694aadf0ab..985b8400da4 100644 --- a/latest/autoapi/neural_compressor/common/base_tuning/index.html +++ b/latest/autoapi/neural_compressor/common/base_tuning/index.html @@ -274,7 +274,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/common/benchmark/index.html b/latest/autoapi/neural_compressor/common/benchmark/index.html index 705b31e445a..e8bdd613058 100644 --- a/latest/autoapi/neural_compressor/common/benchmark/index.html +++ b/latest/autoapi/neural_compressor/common/benchmark/index.html @@ -323,7 +323,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/common/index.html b/latest/autoapi/neural_compressor/common/index.html index 936a6668de9..81d761da74f 100644 --- a/latest/autoapi/neural_compressor/common/index.html +++ b/latest/autoapi/neural_compressor/common/index.html @@ -125,7 +125,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/common/tuning_param/index.html b/latest/autoapi/neural_compressor/common/tuning_param/index.html index 93bdba312fa..e7b23333538 100644 --- a/latest/autoapi/neural_compressor/common/tuning_param/index.html +++ b/latest/autoapi/neural_compressor/common/tuning_param/index.html @@ -174,7 +174,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/common/utils/constants/index.html b/latest/autoapi/neural_compressor/common/utils/constants/index.html index 0828cb05793..c84a8560ede 100644 --- a/latest/autoapi/neural_compressor/common/utils/constants/index.html +++ b/latest/autoapi/neural_compressor/common/utils/constants/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/common/utils/index.html b/latest/autoapi/neural_compressor/common/utils/index.html index 14fef62373c..31cf1ff8d47 100644 --- a/latest/autoapi/neural_compressor/common/utils/index.html +++ b/latest/autoapi/neural_compressor/common/utils/index.html @@ -116,7 +116,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/common/utils/logger/index.html b/latest/autoapi/neural_compressor/common/utils/logger/index.html index dfde70ea518..6e4ef21917f 100644 --- a/latest/autoapi/neural_compressor/common/utils/logger/index.html +++ b/latest/autoapi/neural_compressor/common/utils/logger/index.html @@ -135,7 +135,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/common/utils/save_load/index.html b/latest/autoapi/neural_compressor/common/utils/save_load/index.html index b5e2e75c736..e2af0bc21ca 100644 --- a/latest/autoapi/neural_compressor/common/utils/save_load/index.html +++ b/latest/autoapi/neural_compressor/common/utils/save_load/index.html @@ -157,7 +157,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/common/utils/utility/index.html b/latest/autoapi/neural_compressor/common/utils/utility/index.html index 5f9960eb8ce..9b86772c455 100644 --- a/latest/autoapi/neural_compressor/common/utils/utility/index.html +++ b/latest/autoapi/neural_compressor/common/utils/utility/index.html @@ -278,7 +278,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/callbacks/index.html b/latest/autoapi/neural_compressor/compression/callbacks/index.html index 16c3319704e..c51c0c045ea 100644 --- a/latest/autoapi/neural_compressor/compression/callbacks/index.html +++ b/latest/autoapi/neural_compressor/compression/callbacks/index.html @@ -196,7 +196,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/distillation/criterions/index.html b/latest/autoapi/neural_compressor/compression/distillation/criterions/index.html index b1f21bac5f5..7958b290a9f 100644 --- a/latest/autoapi/neural_compressor/compression/distillation/criterions/index.html +++ b/latest/autoapi/neural_compressor/compression/distillation/criterions/index.html @@ -324,7 +324,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/distillation/index.html b/latest/autoapi/neural_compressor/compression/distillation/index.html index 185ee0ebbbe..5032b26d67a 100644 --- a/latest/autoapi/neural_compressor/compression/distillation/index.html +++ b/latest/autoapi/neural_compressor/compression/distillation/index.html @@ -116,7 +116,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/distillation/optimizers/index.html b/latest/autoapi/neural_compressor/compression/distillation/optimizers/index.html index fd23491970c..d4f1ec8e349 100644 --- a/latest/autoapi/neural_compressor/compression/distillation/optimizers/index.html +++ b/latest/autoapi/neural_compressor/compression/distillation/optimizers/index.html @@ -232,7 +232,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/distillation/utility/index.html b/latest/autoapi/neural_compressor/compression/distillation/utility/index.html index af9e57aa71a..ee74ed43f34 100644 --- a/latest/autoapi/neural_compressor/compression/distillation/utility/index.html +++ b/latest/autoapi/neural_compressor/compression/distillation/utility/index.html @@ -135,7 +135,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/hpo/index.html b/latest/autoapi/neural_compressor/compression/hpo/index.html index 593908d5e91..5b4380950ef 100644 --- a/latest/autoapi/neural_compressor/compression/hpo/index.html +++ b/latest/autoapi/neural_compressor/compression/hpo/index.html @@ -114,7 +114,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/hpo/sa_optimizer/index.html b/latest/autoapi/neural_compressor/compression/hpo/sa_optimizer/index.html index 26cdfe58e07..e2650e59b79 100644 --- a/latest/autoapi/neural_compressor/compression/hpo/sa_optimizer/index.html +++ b/latest/autoapi/neural_compressor/compression/hpo/sa_optimizer/index.html @@ -106,7 +106,7 @@ Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/criteria/index.html b/latest/autoapi/neural_compressor/compression/pruner/criteria/index.html index 894665defd6..2ccf50180a4 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/criteria/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/criteria/index.html @@ -321,7 +321,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/index.html b/latest/autoapi/neural_compressor/compression/pruner/index.html index c6452255d70..695c2adea9d 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/index.html @@ -185,7 +185,7 @@

Package ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/model_slim/auto_slim/index.html b/latest/autoapi/neural_compressor/compression/pruner/model_slim/auto_slim/index.html index 11a803e18da..bddef981560 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/model_slim/auto_slim/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/model_slim/auto_slim/index.html @@ -183,7 +183,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/model_slim/index.html b/latest/autoapi/neural_compressor/compression/pruner/model_slim/index.html index 71e51e7bb45..153096f5f51 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/model_slim/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/model_slim/index.html @@ -116,7 +116,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/model_slim/pattern_analyzer/index.html b/latest/autoapi/neural_compressor/compression/pruner/model_slim/pattern_analyzer/index.html index b3ec4c7140e..3f94c4cf9f1 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/model_slim/pattern_analyzer/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/model_slim/pattern_analyzer/index.html @@ -453,7 +453,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/model_slim/weight_slim/index.html b/latest/autoapi/neural_compressor/compression/pruner/model_slim/weight_slim/index.html index c5a79e6aec2..0ad80c8fb8d 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/model_slim/weight_slim/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/model_slim/weight_slim/index.html @@ -197,7 +197,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/patterns/base/index.html b/latest/autoapi/neural_compressor/compression/pruner/patterns/base/index.html index 56bc22ecc9d..3594ff0997a 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/patterns/base/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/patterns/base/index.html @@ -376,7 +376,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/patterns/index.html b/latest/autoapi/neural_compressor/compression/pruner/patterns/index.html index 250c1cb4481..943bce48a58 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/patterns/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/patterns/index.html @@ -151,7 +151,7 @@

Package ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/patterns/mha/index.html b/latest/autoapi/neural_compressor/compression/pruner/patterns/mha/index.html index 0b91e19ffa4..30776287de4 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/patterns/mha/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/patterns/mha/index.html @@ -145,7 +145,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/patterns/ninm/index.html b/latest/autoapi/neural_compressor/compression/pruner/patterns/ninm/index.html index af953483dc1..5456a426d95 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/patterns/ninm/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/patterns/ninm/index.html @@ -145,7 +145,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/patterns/nxm/index.html b/latest/autoapi/neural_compressor/compression/pruner/patterns/nxm/index.html index b2c0cf1a99f..944b2272e0b 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/patterns/nxm/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/patterns/nxm/index.html @@ -172,7 +172,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/pruners/base/index.html b/latest/autoapi/neural_compressor/compression/pruner/pruners/base/index.html index 486bce9aa25..3a14682a166 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/pruners/base/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/pruners/base/index.html @@ -442,7 +442,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/pruners/basic/index.html b/latest/autoapi/neural_compressor/compression/pruner/pruners/basic/index.html index a1f09c21dc8..78e464b4505 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/pruners/basic/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/pruners/basic/index.html @@ -204,7 +204,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/pruners/block_mask/index.html b/latest/autoapi/neural_compressor/compression/pruner/pruners/block_mask/index.html index f861cde7a3e..d5a5cd75447 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/pruners/block_mask/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/pruners/block_mask/index.html @@ -161,7 +161,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/pruners/index.html b/latest/autoapi/neural_compressor/compression/pruner/pruners/index.html index 079cdfaa5b4..38de5fe5649 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/pruners/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/pruners/index.html @@ -161,7 +161,7 @@

Package ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/pruners/mha/index.html b/latest/autoapi/neural_compressor/compression/pruner/pruners/mha/index.html index 193faaf35b1..94ab2e571af 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/pruners/mha/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/pruners/mha/index.html @@ -177,7 +177,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/pruners/pattern_lock/index.html b/latest/autoapi/neural_compressor/compression/pruner/pruners/pattern_lock/index.html index c57843491fc..31033164377 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/pruners/pattern_lock/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/pruners/pattern_lock/index.html @@ -141,7 +141,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/pruners/progressive/index.html b/latest/autoapi/neural_compressor/compression/pruner/pruners/progressive/index.html index 433bde53d44..401f6a43e0e 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/pruners/progressive/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/pruners/progressive/index.html @@ -143,7 +143,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/pruners/retrain_free/index.html b/latest/autoapi/neural_compressor/compression/pruner/pruners/retrain_free/index.html index a8912e4e295..e03c1928001 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/pruners/retrain_free/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/pruners/retrain_free/index.html @@ -170,7 +170,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/pruning/index.html b/latest/autoapi/neural_compressor/compression/pruner/pruning/index.html index d51a3f0cca5..138240bf7a8 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/pruning/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/pruning/index.html @@ -288,7 +288,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/regs/index.html b/latest/autoapi/neural_compressor/compression/pruner/regs/index.html index 03f722cc369..d613228b68d 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/regs/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/regs/index.html @@ -226,7 +226,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/schedulers/index.html b/latest/autoapi/neural_compressor/compression/pruner/schedulers/index.html index ff41e3b27ce..c62408e060f 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/schedulers/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/schedulers/index.html @@ -236,7 +236,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/tf_criteria/index.html b/latest/autoapi/neural_compressor/compression/pruner/tf_criteria/index.html index f4dbfe09ede..6de21e9b795 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/tf_criteria/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/tf_criteria/index.html @@ -189,7 +189,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/utils/index.html b/latest/autoapi/neural_compressor/compression/pruner/utils/index.html index 88dafefd35d..878578af205 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/utils/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/utils/index.html @@ -395,7 +395,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/wanda/index.html b/latest/autoapi/neural_compressor/compression/pruner/wanda/index.html index e4f7107b0d9..da3fff867ca 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/wanda/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/wanda/index.html @@ -114,7 +114,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/compression/pruner/wanda/utils/index.html b/latest/autoapi/neural_compressor/compression/pruner/wanda/utils/index.html index 080a633bc3c..c573568b863 100644 --- a/latest/autoapi/neural_compressor/compression/pruner/wanda/utils/index.html +++ b/latest/autoapi/neural_compressor/compression/pruner/wanda/utils/index.html @@ -140,7 +140,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/config/index.html b/latest/autoapi/neural_compressor/config/index.html index da817412241..f6fd90ad5ce 100644 --- a/latest/autoapi/neural_compressor/config/index.html +++ b/latest/autoapi/neural_compressor/config/index.html @@ -1061,7 +1061,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/contrib/index.html b/latest/autoapi/neural_compressor/contrib/index.html index e5de95a881a..7bfbf6c6340 100644 --- a/latest/autoapi/neural_compressor/contrib/index.html +++ b/latest/autoapi/neural_compressor/contrib/index.html @@ -114,7 +114,7 @@

SubpackagesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/contrib/strategy/index.html b/latest/autoapi/neural_compressor/contrib/strategy/index.html index 7168f822d58..39e3d9d5e7c 100644 --- a/latest/autoapi/neural_compressor/contrib/strategy/index.html +++ b/latest/autoapi/neural_compressor/contrib/strategy/index.html @@ -115,7 +115,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/contrib/strategy/sigopt/index.html b/latest/autoapi/neural_compressor/contrib/strategy/sigopt/index.html index 4c0f712a832..b9ab36b5bce 100644 --- a/latest/autoapi/neural_compressor/contrib/strategy/sigopt/index.html +++ b/latest/autoapi/neural_compressor/contrib/strategy/sigopt/index.html @@ -170,7 +170,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/contrib/strategy/tpe/index.html b/latest/autoapi/neural_compressor/contrib/strategy/tpe/index.html index 617e4edd688..40b716bae2f 100644 --- a/latest/autoapi/neural_compressor/contrib/strategy/tpe/index.html +++ b/latest/autoapi/neural_compressor/contrib/strategy/tpe/index.html @@ -170,7 +170,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/dataloaders/base_dataloader/index.html b/latest/autoapi/neural_compressor/data/dataloaders/base_dataloader/index.html index bd2972f26fc..77fc8191795 100644 --- a/latest/autoapi/neural_compressor/data/dataloaders/base_dataloader/index.html +++ b/latest/autoapi/neural_compressor/data/dataloaders/base_dataloader/index.html @@ -128,7 +128,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/dataloaders/dataloader/index.html b/latest/autoapi/neural_compressor/data/dataloaders/dataloader/index.html index ec2890debb4..5a7ac0836d5 100644 --- a/latest/autoapi/neural_compressor/data/dataloaders/dataloader/index.html +++ b/latest/autoapi/neural_compressor/data/dataloaders/dataloader/index.html @@ -141,7 +141,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/dataloaders/default_dataloader/index.html b/latest/autoapi/neural_compressor/data/dataloaders/default_dataloader/index.html index 4cf6b06608f..81fa8375fb6 100644 --- a/latest/autoapi/neural_compressor/data/dataloaders/default_dataloader/index.html +++ b/latest/autoapi/neural_compressor/data/dataloaders/default_dataloader/index.html @@ -141,7 +141,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/dataloaders/fetcher/index.html b/latest/autoapi/neural_compressor/data/dataloaders/fetcher/index.html index 1072fd5a23e..f1149812dd0 100644 --- a/latest/autoapi/neural_compressor/data/dataloaders/fetcher/index.html +++ b/latest/autoapi/neural_compressor/data/dataloaders/fetcher/index.html @@ -143,7 +143,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/dataloaders/mxnet_dataloader/index.html b/latest/autoapi/neural_compressor/data/dataloaders/mxnet_dataloader/index.html index 4ce34bb6eb9..5938abc878f 100644 --- a/latest/autoapi/neural_compressor/data/dataloaders/mxnet_dataloader/index.html +++ b/latest/autoapi/neural_compressor/data/dataloaders/mxnet_dataloader/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/dataloaders/onnxrt_dataloader/index.html b/latest/autoapi/neural_compressor/data/dataloaders/onnxrt_dataloader/index.html index e12e38c68e3..7dfed84377e 100644 --- a/latest/autoapi/neural_compressor/data/dataloaders/onnxrt_dataloader/index.html +++ b/latest/autoapi/neural_compressor/data/dataloaders/onnxrt_dataloader/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/dataloaders/pytorch_dataloader/index.html b/latest/autoapi/neural_compressor/data/dataloaders/pytorch_dataloader/index.html index f893f50841e..9db17e91fdb 100644 --- a/latest/autoapi/neural_compressor/data/dataloaders/pytorch_dataloader/index.html +++ b/latest/autoapi/neural_compressor/data/dataloaders/pytorch_dataloader/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/dataloaders/sampler/index.html b/latest/autoapi/neural_compressor/data/dataloaders/sampler/index.html index 9e6a718b818..5ee265f18e8 100644 --- a/latest/autoapi/neural_compressor/data/dataloaders/sampler/index.html +++ b/latest/autoapi/neural_compressor/data/dataloaders/sampler/index.html @@ -158,7 +158,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/dataloaders/tensorflow_dataloader/index.html b/latest/autoapi/neural_compressor/data/dataloaders/tensorflow_dataloader/index.html index bf652720700..9b3d9bc4032 100644 --- a/latest/autoapi/neural_compressor/data/dataloaders/tensorflow_dataloader/index.html +++ b/latest/autoapi/neural_compressor/data/dataloaders/tensorflow_dataloader/index.html @@ -167,7 +167,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/datasets/bert_dataset/index.html b/latest/autoapi/neural_compressor/data/datasets/bert_dataset/index.html index c3d67a72d98..015954a1881 100644 --- a/latest/autoapi/neural_compressor/data/datasets/bert_dataset/index.html +++ b/latest/autoapi/neural_compressor/data/datasets/bert_dataset/index.html @@ -305,7 +305,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/datasets/coco_dataset/index.html b/latest/autoapi/neural_compressor/data/datasets/coco_dataset/index.html index d526948afa9..18dc779cfbe 100644 --- a/latest/autoapi/neural_compressor/data/datasets/coco_dataset/index.html +++ b/latest/autoapi/neural_compressor/data/datasets/coco_dataset/index.html @@ -199,7 +199,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/datasets/dataset/index.html b/latest/autoapi/neural_compressor/data/datasets/dataset/index.html index 4a427a358b9..3ba3ebd5170 100644 --- a/latest/autoapi/neural_compressor/data/datasets/dataset/index.html +++ b/latest/autoapi/neural_compressor/data/datasets/dataset/index.html @@ -723,7 +723,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/datasets/dummy_dataset/index.html b/latest/autoapi/neural_compressor/data/datasets/dummy_dataset/index.html index ca55e85c25b..515c72949f3 100644 --- a/latest/autoapi/neural_compressor/data/datasets/dummy_dataset/index.html +++ b/latest/autoapi/neural_compressor/data/datasets/dummy_dataset/index.html @@ -128,7 +128,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/datasets/dummy_dataset_v2/index.html b/latest/autoapi/neural_compressor/data/datasets/dummy_dataset_v2/index.html index 76a59784ad0..20ab2a5b0d7 100644 --- a/latest/autoapi/neural_compressor/data/datasets/dummy_dataset_v2/index.html +++ b/latest/autoapi/neural_compressor/data/datasets/dummy_dataset_v2/index.html @@ -138,7 +138,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/datasets/imagenet_dataset/index.html b/latest/autoapi/neural_compressor/data/datasets/imagenet_dataset/index.html index 4673c72f2b4..01de35d6b9d 100644 --- a/latest/autoapi/neural_compressor/data/datasets/imagenet_dataset/index.html +++ b/latest/autoapi/neural_compressor/data/datasets/imagenet_dataset/index.html @@ -179,7 +179,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/datasets/index.html b/latest/autoapi/neural_compressor/data/datasets/index.html index 9ebd1589205..39ebe326511 100644 --- a/latest/autoapi/neural_compressor/data/datasets/index.html +++ b/latest/autoapi/neural_compressor/data/datasets/index.html @@ -120,7 +120,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/datasets/style_transfer_dataset/index.html b/latest/autoapi/neural_compressor/data/datasets/style_transfer_dataset/index.html index f64c0ebd473..c2051150c0c 100644 --- a/latest/autoapi/neural_compressor/data/datasets/style_transfer_dataset/index.html +++ b/latest/autoapi/neural_compressor/data/datasets/style_transfer_dataset/index.html @@ -129,7 +129,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/filters/coco_filter/index.html b/latest/autoapi/neural_compressor/data/filters/coco_filter/index.html index f8a7931771a..0c217c01b36 100644 --- a/latest/autoapi/neural_compressor/data/filters/coco_filter/index.html +++ b/latest/autoapi/neural_compressor/data/filters/coco_filter/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/filters/filter/index.html b/latest/autoapi/neural_compressor/data/filters/filter/index.html index b868ff714a8..844a2d3cdc9 100644 --- a/latest/autoapi/neural_compressor/data/filters/filter/index.html +++ b/latest/autoapi/neural_compressor/data/filters/filter/index.html @@ -219,7 +219,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/filters/index.html b/latest/autoapi/neural_compressor/data/filters/index.html index 4c7af72873f..46b991b9f18 100644 --- a/latest/autoapi/neural_compressor/data/filters/index.html +++ b/latest/autoapi/neural_compressor/data/filters/index.html @@ -115,7 +115,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/index.html b/latest/autoapi/neural_compressor/data/index.html index 62ff8a83c89..1fbee716d97 100644 --- a/latest/autoapi/neural_compressor/data/index.html +++ b/latest/autoapi/neural_compressor/data/index.html @@ -116,7 +116,7 @@

SubpackagesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/transforms/imagenet_transform/index.html b/latest/autoapi/neural_compressor/data/transforms/imagenet_transform/index.html index 43a28876b8e..e55345fe95c 100644 --- a/latest/autoapi/neural_compressor/data/transforms/imagenet_transform/index.html +++ b/latest/autoapi/neural_compressor/data/transforms/imagenet_transform/index.html @@ -317,7 +317,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/transforms/index.html b/latest/autoapi/neural_compressor/data/transforms/index.html index bace01e070e..83edc2c698c 100644 --- a/latest/autoapi/neural_compressor/data/transforms/index.html +++ b/latest/autoapi/neural_compressor/data/transforms/index.html @@ -117,7 +117,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/transforms/postprocess/index.html b/latest/autoapi/neural_compressor/data/transforms/postprocess/index.html index 9c74c11c9cb..4ee183244ae 100644 --- a/latest/autoapi/neural_compressor/data/transforms/postprocess/index.html +++ b/latest/autoapi/neural_compressor/data/transforms/postprocess/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/transforms/tokenization/index.html b/latest/autoapi/neural_compressor/data/transforms/tokenization/index.html index 6d295fa657c..e21c8a4dc61 100644 --- a/latest/autoapi/neural_compressor/data/transforms/tokenization/index.html +++ b/latest/autoapi/neural_compressor/data/transforms/tokenization/index.html @@ -186,7 +186,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/data/transforms/transform/index.html b/latest/autoapi/neural_compressor/data/transforms/transform/index.html index cb490991bb8..087b8a3ad6e 100644 --- a/latest/autoapi/neural_compressor/data/transforms/transform/index.html +++ b/latest/autoapi/neural_compressor/data/transforms/transform/index.html @@ -1267,7 +1267,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/index.html b/latest/autoapi/neural_compressor/index.html index 4532fc8a304..82a37420e7d 100644 --- a/latest/autoapi/neural_compressor/index.html +++ b/latest/autoapi/neural_compressor/index.html @@ -138,7 +138,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/metric/bleu/index.html b/latest/autoapi/neural_compressor/metric/bleu/index.html index 0dd1670146f..ed7c98ecb61 100644 --- a/latest/autoapi/neural_compressor/metric/bleu/index.html +++ b/latest/autoapi/neural_compressor/metric/bleu/index.html @@ -203,7 +203,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/metric/bleu_util/index.html b/latest/autoapi/neural_compressor/metric/bleu_util/index.html index d9a1c2acc19..e435e13da1d 100644 --- a/latest/autoapi/neural_compressor/metric/bleu_util/index.html +++ b/latest/autoapi/neural_compressor/metric/bleu_util/index.html @@ -145,7 +145,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/metric/coco_label_map/index.html b/latest/autoapi/neural_compressor/metric/coco_label_map/index.html index cf36c2fcab9..fc32cc38c75 100644 --- a/latest/autoapi/neural_compressor/metric/coco_label_map/index.html +++ b/latest/autoapi/neural_compressor/metric/coco_label_map/index.html @@ -106,7 +106,7 @@ Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/metric/coco_tools/index.html b/latest/autoapi/neural_compressor/metric/coco_tools/index.html index 5f65b5dfd6d..c3356e3885c 100644 --- a/latest/autoapi/neural_compressor/metric/coco_tools/index.html +++ b/latest/autoapi/neural_compressor/metric/coco_tools/index.html @@ -315,7 +315,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/metric/evaluate_squad/index.html b/latest/autoapi/neural_compressor/metric/evaluate_squad/index.html index 6be52d207f9..e0a778612b1 100644 --- a/latest/autoapi/neural_compressor/metric/evaluate_squad/index.html +++ b/latest/autoapi/neural_compressor/metric/evaluate_squad/index.html @@ -204,7 +204,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/metric/f1/index.html b/latest/autoapi/neural_compressor/metric/f1/index.html index a54c4361642..797eca13267 100644 --- a/latest/autoapi/neural_compressor/metric/f1/index.html +++ b/latest/autoapi/neural_compressor/metric/f1/index.html @@ -215,7 +215,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/metric/index.html b/latest/autoapi/neural_compressor/metric/index.html index 742c51b259f..5719b591df1 100644 --- a/latest/autoapi/neural_compressor/metric/index.html +++ b/latest/autoapi/neural_compressor/metric/index.html @@ -120,7 +120,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/metric/metric/index.html b/latest/autoapi/neural_compressor/metric/metric/index.html index b864514467a..7f2786c3aff 100644 --- a/latest/autoapi/neural_compressor/metric/metric/index.html +++ b/latest/autoapi/neural_compressor/metric/metric/index.html @@ -643,7 +643,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/mix_precision/index.html b/latest/autoapi/neural_compressor/mix_precision/index.html index 08409ab039d..0b578d212ea 100644 --- a/latest/autoapi/neural_compressor/mix_precision/index.html +++ b/latest/autoapi/neural_compressor/mix_precision/index.html @@ -176,7 +176,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/model/base_model/index.html b/latest/autoapi/neural_compressor/model/base_model/index.html index f5ff65f1d74..20fe078caa7 100644 --- a/latest/autoapi/neural_compressor/model/base_model/index.html +++ b/latest/autoapi/neural_compressor/model/base_model/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/model/index.html b/latest/autoapi/neural_compressor/model/index.html index 83cb1725175..4b74b2c896e 100644 --- a/latest/autoapi/neural_compressor/model/index.html +++ b/latest/autoapi/neural_compressor/model/index.html @@ -121,7 +121,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/model/keras_model/index.html b/latest/autoapi/neural_compressor/model/keras_model/index.html index 71d5150c2e8..273bf53c4bb 100644 --- a/latest/autoapi/neural_compressor/model/keras_model/index.html +++ b/latest/autoapi/neural_compressor/model/keras_model/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/model/model/index.html b/latest/autoapi/neural_compressor/model/model/index.html index d6d8423d23c..120c01000a2 100644 --- a/latest/autoapi/neural_compressor/model/model/index.html +++ b/latest/autoapi/neural_compressor/model/model/index.html @@ -147,7 +147,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/model/mxnet_model/index.html b/latest/autoapi/neural_compressor/model/mxnet_model/index.html index 1a4cc1c95ee..41d7d66ecf6 100644 --- a/latest/autoapi/neural_compressor/model/mxnet_model/index.html +++ b/latest/autoapi/neural_compressor/model/mxnet_model/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/model/nets_factory/index.html b/latest/autoapi/neural_compressor/model/nets_factory/index.html index 16c35a1e4ff..f7f1fd505ea 100644 --- a/latest/autoapi/neural_compressor/model/nets_factory/index.html +++ b/latest/autoapi/neural_compressor/model/nets_factory/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/model/onnx_model/index.html b/latest/autoapi/neural_compressor/model/onnx_model/index.html index 3c480f1d8d9..784907ca087 100644 --- a/latest/autoapi/neural_compressor/model/onnx_model/index.html +++ b/latest/autoapi/neural_compressor/model/onnx_model/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/model/tensorflow_model/index.html b/latest/autoapi/neural_compressor/model/tensorflow_model/index.html index 24cb58deafa..ebd8c1d72db 100644 --- a/latest/autoapi/neural_compressor/model/tensorflow_model/index.html +++ b/latest/autoapi/neural_compressor/model/tensorflow_model/index.html @@ -500,7 +500,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/model/torch_model/index.html b/latest/autoapi/neural_compressor/model/torch_model/index.html index 0c9092b194f..c284097669a 100644 --- a/latest/autoapi/neural_compressor/model/torch_model/index.html +++ b/latest/autoapi/neural_compressor/model/torch_model/index.html @@ -152,7 +152,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/objective/index.html b/latest/autoapi/neural_compressor/objective/index.html index 67b25ba0939..ae53fab65ef 100644 --- a/latest/autoapi/neural_compressor/objective/index.html +++ b/latest/autoapi/neural_compressor/objective/index.html @@ -233,7 +233,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/index.html b/latest/autoapi/neural_compressor/profiling/index.html index 13999dc32fd..919f44fcfc8 100644 --- a/latest/autoapi/neural_compressor/profiling/index.html +++ b/latest/autoapi/neural_compressor/profiling/index.html @@ -106,7 +106,7 @@ Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/parser/factory/index.html b/latest/autoapi/neural_compressor/profiling/parser/factory/index.html index e5d8445cccf..62fee1dfc14 100644 --- a/latest/autoapi/neural_compressor/profiling/parser/factory/index.html +++ b/latest/autoapi/neural_compressor/profiling/parser/factory/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/parser/onnx_parser/factory/index.html b/latest/autoapi/neural_compressor/profiling/parser/onnx_parser/factory/index.html index 75fec7318ac..b19e6a15540 100644 --- a/latest/autoapi/neural_compressor/profiling/parser/onnx_parser/factory/index.html +++ b/latest/autoapi/neural_compressor/profiling/parser/onnx_parser/factory/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/parser/onnx_parser/parser/index.html b/latest/autoapi/neural_compressor/profiling/parser/onnx_parser/parser/index.html index 421755fb632..e200c42038b 100644 --- a/latest/autoapi/neural_compressor/profiling/parser/onnx_parser/parser/index.html +++ b/latest/autoapi/neural_compressor/profiling/parser/onnx_parser/parser/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/parser/parser/index.html b/latest/autoapi/neural_compressor/profiling/parser/parser/index.html index 2251adcedb8..e2838d9c5a2 100644 --- a/latest/autoapi/neural_compressor/profiling/parser/parser/index.html +++ b/latest/autoapi/neural_compressor/profiling/parser/parser/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/parser/result/index.html b/latest/autoapi/neural_compressor/profiling/parser/result/index.html index 54d3ef98b49..2aaaf35585b 100644 --- a/latest/autoapi/neural_compressor/profiling/parser/result/index.html +++ b/latest/autoapi/neural_compressor/profiling/parser/result/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/parser/tensorflow_parser/factory/index.html b/latest/autoapi/neural_compressor/profiling/parser/tensorflow_parser/factory/index.html index 84f1579932c..9d72d0c20b2 100644 --- a/latest/autoapi/neural_compressor/profiling/parser/tensorflow_parser/factory/index.html +++ b/latest/autoapi/neural_compressor/profiling/parser/tensorflow_parser/factory/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/parser/tensorflow_parser/parser/index.html b/latest/autoapi/neural_compressor/profiling/parser/tensorflow_parser/parser/index.html index 5910d2bd125..cb1ea1140c0 100644 --- a/latest/autoapi/neural_compressor/profiling/parser/tensorflow_parser/parser/index.html +++ b/latest/autoapi/neural_compressor/profiling/parser/tensorflow_parser/parser/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/profiler/factory/index.html b/latest/autoapi/neural_compressor/profiling/profiler/factory/index.html index 06711dfc882..593882374c7 100644 --- a/latest/autoapi/neural_compressor/profiling/profiler/factory/index.html +++ b/latest/autoapi/neural_compressor/profiling/profiler/factory/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/factory/index.html b/latest/autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/factory/index.html index d6b222bf04e..db161d57033 100644 --- a/latest/autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/factory/index.html +++ b/latest/autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/factory/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/profiler/index.html b/latest/autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/profiler/index.html index 2e0dba6ebe5..0a323b99b54 100644 --- a/latest/autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/profiler/index.html +++ b/latest/autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/profiler/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/utils/index.html b/latest/autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/utils/index.html index f608a5fb6cd..0f89b8f2ab5 100644 --- a/latest/autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/utils/index.html +++ b/latest/autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/utils/index.html @@ -137,7 +137,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/profiler/profiler/index.html b/latest/autoapi/neural_compressor/profiling/profiler/profiler/index.html index 94146d42b0c..7876e7e94a1 100644 --- a/latest/autoapi/neural_compressor/profiling/profiler/profiler/index.html +++ b/latest/autoapi/neural_compressor/profiling/profiler/profiler/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/factory/index.html b/latest/autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/factory/index.html index 5fe79984bee..d4afffa5afd 100644 --- a/latest/autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/factory/index.html +++ b/latest/autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/factory/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/profiler/index.html b/latest/autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/profiler/index.html index 9e1ca3b1408..201c3abe9c4 100644 --- a/latest/autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/profiler/index.html +++ b/latest/autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/profiler/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/utils/index.html b/latest/autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/utils/index.html index 1f7b4e7a2bf..775a6944f81 100644 --- a/latest/autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/utils/index.html +++ b/latest/autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/utils/index.html @@ -169,7 +169,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/quantization/index.html b/latest/autoapi/neural_compressor/quantization/index.html index 522fb5f2ad6..1aa6610cf4e 100644 --- a/latest/autoapi/neural_compressor/quantization/index.html +++ b/latest/autoapi/neural_compressor/quantization/index.html @@ -245,7 +245,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/auto/index.html b/latest/autoapi/neural_compressor/strategy/auto/index.html index d5f13db6941..d2a0567d7cd 100644 --- a/latest/autoapi/neural_compressor/strategy/auto/index.html +++ b/latest/autoapi/neural_compressor/strategy/auto/index.html @@ -127,7 +127,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/auto_mixed_precision/index.html b/latest/autoapi/neural_compressor/strategy/auto_mixed_precision/index.html index 2d5326e0360..2631d338ad5 100644 --- a/latest/autoapi/neural_compressor/strategy/auto_mixed_precision/index.html +++ b/latest/autoapi/neural_compressor/strategy/auto_mixed_precision/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/basic/index.html b/latest/autoapi/neural_compressor/strategy/basic/index.html index 8363bc35f92..9b1eced5ce2 100644 --- a/latest/autoapi/neural_compressor/strategy/basic/index.html +++ b/latest/autoapi/neural_compressor/strategy/basic/index.html @@ -127,7 +127,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/bayesian/index.html b/latest/autoapi/neural_compressor/strategy/bayesian/index.html index 8946738ebb2..7058b2cf9a1 100644 --- a/latest/autoapi/neural_compressor/strategy/bayesian/index.html +++ b/latest/autoapi/neural_compressor/strategy/bayesian/index.html @@ -181,7 +181,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/conservative/index.html b/latest/autoapi/neural_compressor/strategy/conservative/index.html index 2dcd548d707..cdbb8f7d80f 100644 --- a/latest/autoapi/neural_compressor/strategy/conservative/index.html +++ b/latest/autoapi/neural_compressor/strategy/conservative/index.html @@ -128,7 +128,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/exhaustive/index.html b/latest/autoapi/neural_compressor/strategy/exhaustive/index.html index e300d04d81d..219e9c814e6 100644 --- a/latest/autoapi/neural_compressor/strategy/exhaustive/index.html +++ b/latest/autoapi/neural_compressor/strategy/exhaustive/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/hawq_v2/index.html b/latest/autoapi/neural_compressor/strategy/hawq_v2/index.html index 8f652d54c8f..9e7a96f5d20 100644 --- a/latest/autoapi/neural_compressor/strategy/hawq_v2/index.html +++ b/latest/autoapi/neural_compressor/strategy/hawq_v2/index.html @@ -128,7 +128,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/index.html b/latest/autoapi/neural_compressor/strategy/index.html index 8a274c1b196..763df818528 100644 --- a/latest/autoapi/neural_compressor/strategy/index.html +++ b/latest/autoapi/neural_compressor/strategy/index.html @@ -132,7 +132,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/mse/index.html b/latest/autoapi/neural_compressor/strategy/mse/index.html index 8628996a42b..80f6dd12191 100644 --- a/latest/autoapi/neural_compressor/strategy/mse/index.html +++ b/latest/autoapi/neural_compressor/strategy/mse/index.html @@ -128,7 +128,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/mse_v2/index.html b/latest/autoapi/neural_compressor/strategy/mse_v2/index.html index 55ff79f7c38..f5a8d4ad3c9 100644 --- a/latest/autoapi/neural_compressor/strategy/mse_v2/index.html +++ b/latest/autoapi/neural_compressor/strategy/mse_v2/index.html @@ -128,7 +128,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/random/index.html b/latest/autoapi/neural_compressor/strategy/random/index.html index 5b0b6eff1c3..1952c2f5862 100644 --- a/latest/autoapi/neural_compressor/strategy/random/index.html +++ b/latest/autoapi/neural_compressor/strategy/random/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/strategy/index.html b/latest/autoapi/neural_compressor/strategy/strategy/index.html index a661d52b49b..ed570683cc3 100644 --- a/latest/autoapi/neural_compressor/strategy/strategy/index.html +++ b/latest/autoapi/neural_compressor/strategy/strategy/index.html @@ -161,7 +161,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/utils/constant/index.html b/latest/autoapi/neural_compressor/strategy/utils/constant/index.html index b01e43f3ea6..9149b511685 100644 --- a/latest/autoapi/neural_compressor/strategy/utils/constant/index.html +++ b/latest/autoapi/neural_compressor/strategy/utils/constant/index.html @@ -106,7 +106,7 @@ Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/utils/index.html b/latest/autoapi/neural_compressor/strategy/utils/index.html index 5c0d755bf34..42e0be42fa2 100644 --- a/latest/autoapi/neural_compressor/strategy/utils/index.html +++ b/latest/autoapi/neural_compressor/strategy/utils/index.html @@ -118,7 +118,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/utils/tuning_sampler/index.html b/latest/autoapi/neural_compressor/strategy/utils/tuning_sampler/index.html index 6a82953ef40..21c36a80b35 100644 --- a/latest/autoapi/neural_compressor/strategy/utils/tuning_sampler/index.html +++ b/latest/autoapi/neural_compressor/strategy/utils/tuning_sampler/index.html @@ -207,7 +207,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/utils/tuning_space/index.html b/latest/autoapi/neural_compressor/strategy/utils/tuning_space/index.html index f954d922004..3efac070afd 100644 --- a/latest/autoapi/neural_compressor/strategy/utils/tuning_space/index.html +++ b/latest/autoapi/neural_compressor/strategy/utils/tuning_space/index.html @@ -207,7 +207,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/utils/tuning_structs/index.html b/latest/autoapi/neural_compressor/strategy/utils/tuning_structs/index.html index 290d3b6b215..f3a78d11f0c 100644 --- a/latest/autoapi/neural_compressor/strategy/utils/tuning_structs/index.html +++ b/latest/autoapi/neural_compressor/strategy/utils/tuning_structs/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/strategy/utils/utility/index.html b/latest/autoapi/neural_compressor/strategy/utils/utility/index.html index bfb633a7ca2..56aa3b98981 100644 --- a/latest/autoapi/neural_compressor/strategy/utils/utility/index.html +++ b/latest/autoapi/neural_compressor/strategy/utils/utility/index.html @@ -245,7 +245,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/template/api_doc_example/index.html b/latest/autoapi/neural_compressor/template/api_doc_example/index.html index f12f35a90bb..6230214ef31 100644 --- a/latest/autoapi/neural_compressor/template/api_doc_example/index.html +++ b/latest/autoapi/neural_compressor/template/api_doc_example/index.html @@ -349,7 +349,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/algorithms/index.html b/latest/autoapi/neural_compressor/tensorflow/algorithms/index.html index 374f707be34..3f338e66468 100644 --- a/latest/autoapi/neural_compressor/tensorflow/algorithms/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/algorithms/index.html @@ -115,7 +115,7 @@

SubpackagesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/calibration/index.html b/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/calibration/index.html index b614356beee..7344ad2bcc1 100644 --- a/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/calibration/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/calibration/index.html @@ -158,7 +158,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/core/index.html b/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/core/index.html index 86b3c05f455..033db77bef9 100644 --- a/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/core/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/core/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/index.html b/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/index.html index 3df9b75a8f5..642d4e6d77e 100644 --- a/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/index.html @@ -116,7 +116,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/scaler/index.html b/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/scaler/index.html index ed92e7c8008..f2cf1f41e7a 100644 --- a/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/scaler/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/algorithms/smoother/scaler/index.html @@ -156,7 +156,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/algorithms/static_quant/index.html b/latest/autoapi/neural_compressor/tensorflow/algorithms/static_quant/index.html index 6678f2d1f58..71f83101536 100644 --- a/latest/autoapi/neural_compressor/tensorflow/algorithms/static_quant/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/algorithms/static_quant/index.html @@ -115,7 +115,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/algorithms/static_quant/keras/index.html b/latest/autoapi/neural_compressor/tensorflow/algorithms/static_quant/keras/index.html index abb7eb5c5f9..6d3101c2f97 100644 --- a/latest/autoapi/neural_compressor/tensorflow/algorithms/static_quant/keras/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/algorithms/static_quant/keras/index.html @@ -152,7 +152,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/algorithms/static_quant/tensorflow/index.html b/latest/autoapi/neural_compressor/tensorflow/algorithms/static_quant/tensorflow/index.html index e2ad6ba6bae..0669ebc5728 100644 --- a/latest/autoapi/neural_compressor/tensorflow/algorithms/static_quant/tensorflow/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/algorithms/static_quant/tensorflow/index.html @@ -161,7 +161,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/index.html b/latest/autoapi/neural_compressor/tensorflow/index.html index 94f3a0dc2c2..1121962f390 100644 --- a/latest/autoapi/neural_compressor/tensorflow/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/index.html @@ -117,7 +117,7 @@

SubpackagesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/keras/index.html b/latest/autoapi/neural_compressor/tensorflow/keras/index.html index c89d059695a..1d21adb3ee8 100644 --- a/latest/autoapi/neural_compressor/tensorflow/keras/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/keras/index.html @@ -115,7 +115,7 @@

SubpackagesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/keras/layers/conv2d/index.html b/latest/autoapi/neural_compressor/tensorflow/keras/layers/conv2d/index.html index 7f2d9015b93..ae667916aed 100644 --- a/latest/autoapi/neural_compressor/tensorflow/keras/layers/conv2d/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/keras/layers/conv2d/index.html @@ -141,7 +141,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/keras/layers/dense/index.html b/latest/autoapi/neural_compressor/tensorflow/keras/layers/dense/index.html index c7f707a2d94..5ff4a23532f 100644 --- a/latest/autoapi/neural_compressor/tensorflow/keras/layers/dense/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/keras/layers/dense/index.html @@ -141,7 +141,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/keras/layers/depthwise_conv2d/index.html b/latest/autoapi/neural_compressor/tensorflow/keras/layers/depthwise_conv2d/index.html index 01f5cded242..a2e06971d94 100644 --- a/latest/autoapi/neural_compressor/tensorflow/keras/layers/depthwise_conv2d/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/keras/layers/depthwise_conv2d/index.html @@ -141,7 +141,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/keras/layers/index.html b/latest/autoapi/neural_compressor/tensorflow/keras/layers/index.html index 8a762fa9769..d30a1f63e4d 100644 --- a/latest/autoapi/neural_compressor/tensorflow/keras/layers/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/keras/layers/index.html @@ -119,7 +119,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/keras/layers/layer_initializer/index.html b/latest/autoapi/neural_compressor/tensorflow/keras/layers/layer_initializer/index.html index b4a3025e95b..c314402974e 100644 --- a/latest/autoapi/neural_compressor/tensorflow/keras/layers/layer_initializer/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/keras/layers/layer_initializer/index.html @@ -106,7 +106,7 @@ Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/keras/layers/pool2d/index.html b/latest/autoapi/neural_compressor/tensorflow/keras/layers/pool2d/index.html index 844532a43a8..848f97963e5 100644 --- a/latest/autoapi/neural_compressor/tensorflow/keras/layers/pool2d/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/keras/layers/pool2d/index.html @@ -159,7 +159,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/keras/layers/separable_conv2d/index.html b/latest/autoapi/neural_compressor/tensorflow/keras/layers/separable_conv2d/index.html index ddbdd9924aa..19cc2269bbc 100644 --- a/latest/autoapi/neural_compressor/tensorflow/keras/layers/separable_conv2d/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/keras/layers/separable_conv2d/index.html @@ -141,7 +141,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/keras/quantization/config/index.html b/latest/autoapi/neural_compressor/tensorflow/keras/quantization/config/index.html index 65de5ef4728..6f08a7dff05 100644 --- a/latest/autoapi/neural_compressor/tensorflow/keras/quantization/config/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/keras/quantization/config/index.html @@ -164,7 +164,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/keras/quantization/index.html b/latest/autoapi/neural_compressor/tensorflow/keras/quantization/index.html index 9a9673d9600..fcb5dac2935 100644 --- a/latest/autoapi/neural_compressor/tensorflow/keras/quantization/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/keras/quantization/index.html @@ -114,7 +114,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/algorithm_entry/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/algorithm_entry/index.html index 477abc2bacf..f9d26291a98 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/algorithm_entry/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/algorithm_entry/index.html @@ -170,7 +170,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/autotune/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/autotune/index.html index 4bd11f0108e..ac32dd56886 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/autotune/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/autotune/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/config/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/config/index.html index a902f4a4924..fdf2683b468 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/config/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/config/index.html @@ -169,7 +169,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/index.html index 9a95a347727..0eea26fe158 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/index.html @@ -124,7 +124,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/quantize/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/quantize/index.html index c4c9bd85d55..3bcdb15a1f9 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/quantize/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/quantize/index.html @@ -179,7 +179,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_converter/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_converter/index.html index 49062cffb99..aece8871d19 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_converter/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_converter/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/bf16_convert/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/bf16_convert/index.html index 0ca598f60be..6bbde06a7e8 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/bf16_convert/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/bf16_convert/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/dequantize_cast_optimizer/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/dequantize_cast_optimizer/index.html index 755ef653acb..733e6c07fcb 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/dequantize_cast_optimizer/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/dequantize_cast_optimizer/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/index.html index 63d0b0569ab..5896b85de4d 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/index.html @@ -115,7 +115,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_add_to_biasadd/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_add_to_biasadd/index.html index 4f4729b1307..82519e0e95c 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_add_to_biasadd/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_add_to_biasadd/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_layout/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_layout/index.html index ab19a666655..6695a09bcb8 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_layout/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_layout/index.html @@ -131,7 +131,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_leakyrelu/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_leakyrelu/index.html index 6dbc145aa71..f10b9d14711 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_leakyrelu/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_leakyrelu/index.html @@ -141,7 +141,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_nan_to_random/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_nan_to_random/index.html index c5265604276..68ebd6811be 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_nan_to_random/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_nan_to_random/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_placeholder_to_const/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_placeholder_to_const/index.html index 0f0fc06a104..32e69fc0a35 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_placeholder_to_const/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_placeholder_to_const/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dilated_contraction/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dilated_contraction/index.html index f516bf3b54a..109db2af89f 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dilated_contraction/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dilated_contraction/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dummy_biasadd/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dummy_biasadd/index.html index 0342b3ecf7d..ce9e1d53b1b 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dummy_biasadd/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dummy_biasadd/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/expanddims_optimizer/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/expanddims_optimizer/index.html index ac55404b602..830d3839cc9 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/expanddims_optimizer/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/expanddims_optimizer/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fetch_weight_from_reshape/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fetch_weight_from_reshape/index.html index 9dd3add9414..f18e2759fd6 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fetch_weight_from_reshape/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fetch_weight_from_reshape/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_batch_norm/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_batch_norm/index.html index 2d0d172da60..1fcc5fa5406 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_batch_norm/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_batch_norm/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_constant/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_constant/index.html index 0997d340193..08d37e3da3e 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_constant/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_constant/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_biasadd_add/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_biasadd_add/index.html index fbd1bc5ea06..12c4b5b450d 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_biasadd_add/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_biasadd_add/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_column_wise_mul/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_column_wise_mul/index.html index c447e0ff4b9..a042311be53 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_column_wise_mul/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_column_wise_mul/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_conv_with_math/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_conv_with_math/index.html index 38084c627dc..5fa29a7d8a7 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_conv_with_math/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_conv_with_math/index.html @@ -140,7 +140,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_bn/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_bn/index.html index 3bc85501ba7..54c12da2b93 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_bn/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_bn/index.html @@ -219,7 +219,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_in/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_in/index.html index 05f766f67e1..40a023bef15 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_in/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_in/index.html @@ -219,7 +219,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_gelu/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_gelu/index.html index 710baf96c68..b3ce87eb7bc 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_gelu/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_gelu/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_layer_norm/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_layer_norm/index.html index 0a1c5ded7d7..3c20e2084ec 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_layer_norm/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_layer_norm/index.html @@ -186,7 +186,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_conv/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_conv/index.html index ed4f2706fe8..db0f160f0ab 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_conv/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_conv/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index.html index 3e81f60ceca..c0660584abe 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_reshape_transpose/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_reshape_transpose/index.html index 28f1615c344..aff0fea6619 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_reshape_transpose/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_reshape_transpose/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/graph_cse_optimizer/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/graph_cse_optimizer/index.html index 45a3891d8f0..bad7ae0dc1e 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/graph_cse_optimizer/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/graph_cse_optimizer/index.html @@ -181,7 +181,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/grappler_pass/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/grappler_pass/index.html index e8c535a6c48..158d6b70c32 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/grappler_pass/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/grappler_pass/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/index.html index 2a70eb47c15..aac932605ce 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/index.html @@ -145,7 +145,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/insert_print_node/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/insert_print_node/index.html index 6ad8c14bb7b..98e07c21d8f 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/insert_print_node/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/insert_print_node/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/move_squeeze_after_relu/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/move_squeeze_after_relu/index.html index 9e033f573da..a53d2707481 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/move_squeeze_after_relu/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/move_squeeze_after_relu/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize/index.html index 0e156a2b0fe..6213ada752f 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/remove_training_nodes/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/remove_training_nodes/index.html index 8659f4cebf5..2033d00fae5 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/remove_training_nodes/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/remove_training_nodes/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/rename_batch_norm/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/rename_batch_norm/index.html index d1aaf1bb6dd..dce9a5d69f0 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/rename_batch_norm/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/rename_batch_norm/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/split_shared_input/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/split_shared_input/index.html index 3775b064568..4229fc0f699 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/split_shared_input/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/split_shared_input/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_equivalent_nodes/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_equivalent_nodes/index.html index 247c8353c4b..61b90cf3aae 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_equivalent_nodes/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_equivalent_nodes/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_unused_nodes/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_unused_nodes/index.html index 8b49fdc792b..a17b6751647 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_unused_nodes/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_unused_nodes/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/switch_optimizer/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/switch_optimizer/index.html index c87caa25b98..7a714dbf95f 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/switch_optimizer/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/switch_optimizer/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/graph_base/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/graph_base/index.html index 73ef9a6b8c7..7ce535593ad 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/graph_base/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/graph_base/index.html @@ -131,7 +131,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/index.html index 7599cf262d3..407eb6fcd90 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/index.html @@ -125,7 +125,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_fake_quant/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_fake_quant/index.html index a851b03af2a..d9df1e2897d 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_fake_quant/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_fake_quant/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_value/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_value/index.html index 213fb62f79c..c4a3f6734f1 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_value/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_value/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index.html index 57fd7b69266..3cc82545758 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_requantize/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_requantize/index.html index 266c72df50d..5d7157ebbd0 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_requantize/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_requantize/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index.html index 6d66a6c19f0..75bec2b49dc 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_requantize/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_requantize/index.html index 2290a497877..2cdce6afe6f 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_requantize/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_requantize/index.html @@ -152,7 +152,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/index.html index a093d97ed39..0d833ee7120 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/index.html @@ -123,7 +123,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/meta_op_optimizer/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/meta_op_optimizer/index.html index 418d8e00683..4a75ef82673 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/meta_op_optimizer/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/meta_op_optimizer/index.html @@ -126,7 +126,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_hostconst_converter/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_hostconst_converter/index.html index 9dc0b83aa3d..99f16ee5834 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_hostconst_converter/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_hostconst_converter/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_quantized_op_cse/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_quantized_op_cse/index.html index 7ae627b8b88..f7caaada98b 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_quantized_op_cse/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_quantized_op_cse/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/scale_propagation/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/scale_propagation/index.html index 9fb6b35add7..4da95af2506 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/scale_propagation/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/scale_propagation/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/index.html index 46f57276469..37254c88783 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/index.html @@ -116,7 +116,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/insert_qdq_pattern/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/insert_qdq_pattern/index.html index 975c011804f..7872b849d93 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/insert_qdq_pattern/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/insert_qdq_pattern/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/merge_duplicated_qdq/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/merge_duplicated_qdq/index.html index 584c269a28c..5adb0cbccb0 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/merge_duplicated_qdq/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/merge_duplicated_qdq/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/share_qdq_y_pattern/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/share_qdq_y_pattern/index.html index 2d049d47b93..0b62195345c 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/share_qdq_y_pattern/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/share_qdq_y_pattern/index.html @@ -126,7 +126,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_util/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_util/index.html index b89a323a01a..238c791d831 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_util/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/graph_util/index.html @@ -137,7 +137,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/index.html index 2c717e6d166..13c01ced8c9 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/index.html @@ -127,7 +127,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/index.html index 85ddec622a4..3b8ac03ede4 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/index.html @@ -128,7 +128,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_bn/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_bn/index.html index 357dd3d6c4c..cb6aa00a059 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_bn/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_bn/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_concatv2/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_concatv2/index.html index bfc843922f7..efa84ac4a85 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_concatv2/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_concatv2/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_conv/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_conv/index.html index be0d68743db..bdc4c1cca9b 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_conv/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_conv/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_deconv/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_deconv/index.html index cd583f3088c..d9eb5b46086 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_deconv/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_deconv/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_in/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_in/index.html index a1c66349842..bb2103dfdd7 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_in/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_in/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_matmul/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_matmul/index.html index 66ee8e2e018..501085a8ddc 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_matmul/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_matmul/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_pooling/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_pooling/index.html index 55c7406a0c0..8f89f2d3ba5 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_pooling/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_pooling/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/index.html index 80f0228067e..55bed48e3f5 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/index.html @@ -121,7 +121,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/optimize_qdq/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/optimize_qdq/index.html index bc799b3d33b..cf7dca5e53d 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/optimize_qdq/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/optimize_qdq/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_base/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_base/index.html index 31d2faf8bdf..e71da411566 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_base/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_base/index.html @@ -139,7 +139,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_bn/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_bn/index.html index d1375bb52b0..5892b65fff3 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_bn/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_bn/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_concatv2/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_concatv2/index.html index 0510a5d4b9a..befb8ef842c 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_concatv2/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_concatv2/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_conv/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_conv/index.html index 7e540610bdc..705222a5c3d 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_conv/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_conv/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_for_intel_cpu/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_for_intel_cpu/index.html index fe24b7db7d2..61482a817c2 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_for_intel_cpu/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_for_intel_cpu/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_matmul/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_matmul/index.html index 1f8620186f8..c4b354eaab4 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_matmul/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_matmul/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_pooling/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_pooling/index.html index abd0eff74b1..ddcc46b37d0 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_pooling/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_pooling/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph_common/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph_common/index.html index f11a5da8fab..202febf5a7b 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph_common/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph_common/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/bias_correction/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/bias_correction/index.html index cd2c45a161a..dda6366f1f1 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/bias_correction/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/bias_correction/index.html @@ -135,7 +135,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/graph_transform_base/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/graph_transform_base/index.html index 0b8753c210f..1f10862920d 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/graph_transform_base/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/graph_transform_base/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/index.html index 9aaa2f9b552..a8afd132409 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/index.html @@ -117,7 +117,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/insert_logging/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/insert_logging/index.html index 4fe18e73446..2f20f555107 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/insert_logging/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/insert_logging/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/rerange_quantized_concat/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/rerange_quantized_concat/index.html index 20c7d1d87c6..5571dffcc34 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/rerange_quantized_concat/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/rerange_quantized_concat/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/utility/index.html b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/utility/index.html index 901d121f42c..e18f791720a 100644 --- a/latest/autoapi/neural_compressor/tensorflow/quantization/utils/utility/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/quantization/utils/utility/index.html @@ -408,7 +408,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/utils/constants/index.html b/latest/autoapi/neural_compressor/tensorflow/utils/constants/index.html index f9e22cfcfe9..86421f4fe28 100644 --- a/latest/autoapi/neural_compressor/tensorflow/utils/constants/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/utils/constants/index.html @@ -106,7 +106,7 @@ Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/utils/data/index.html b/latest/autoapi/neural_compressor/tensorflow/utils/data/index.html index 657bf64fdfb..7f4bb915a92 100644 --- a/latest/autoapi/neural_compressor/tensorflow/utils/data/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/utils/data/index.html @@ -213,7 +213,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/utils/index.html b/latest/autoapi/neural_compressor/tensorflow/utils/index.html index f6817c449f1..82ce7d9c037 100644 --- a/latest/autoapi/neural_compressor/tensorflow/utils/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/utils/index.html @@ -118,7 +118,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/utils/model/index.html b/latest/autoapi/neural_compressor/tensorflow/utils/model/index.html index 3a7c78741ef..52e823eeebe 100644 --- a/latest/autoapi/neural_compressor/tensorflow/utils/model/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/utils/model/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/utils/model_wrappers/index.html b/latest/autoapi/neural_compressor/tensorflow/utils/model_wrappers/index.html index 5ba5667af9a..71bde353057 100644 --- a/latest/autoapi/neural_compressor/tensorflow/utils/model_wrappers/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/utils/model_wrappers/index.html @@ -520,7 +520,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/tensorflow/utils/utility/index.html b/latest/autoapi/neural_compressor/tensorflow/utils/utility/index.html index 43c347fcb60..6a03ffaefac 100644 --- a/latest/autoapi/neural_compressor/tensorflow/utils/utility/index.html +++ b/latest/autoapi/neural_compressor/tensorflow/utils/utility/index.html @@ -331,7 +331,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/base_algorithm/index.html b/latest/autoapi/neural_compressor/torch/algorithms/base_algorithm/index.html index decd17726af..b9fe4827523 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/base_algorithm/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/base_algorithm/index.html @@ -135,7 +135,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/fp8_quant/utils/logger/index.html b/latest/autoapi/neural_compressor/torch/algorithms/fp8_quant/utils/logger/index.html index 600b96b4dd5..0f74fad24e8 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/fp8_quant/utils/logger/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/fp8_quant/utils/logger/index.html @@ -106,7 +106,7 @@ Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/index.html b/latest/autoapi/neural_compressor/torch/algorithms/index.html index 813804cf267..3bbe2c6c20a 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/index.html @@ -128,7 +128,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/index.html b/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/index.html index b9e3729d2e5..e2d2fd64485 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/index.html @@ -116,7 +116,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/load/index.html b/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/load/index.html index c850f2b4b5a..8914f06dd1d 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/load/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/load/index.html @@ -212,7 +212,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/modified_pickle/index.html b/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/modified_pickle/index.html index f0beb70dbf3..665f4fc0bcc 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/modified_pickle/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/modified_pickle/index.html @@ -166,7 +166,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/utils/index.html b/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/utils/index.html index 9c544e9ebcc..34f17e7f174 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/utils/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/layer_wise/utils/index.html @@ -308,7 +308,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/mixed_precision/half_precision_convert/index.html b/latest/autoapi/neural_compressor/torch/algorithms/mixed_precision/half_precision_convert/index.html index b488acbcea9..0e447a6f6f7 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/mixed_precision/half_precision_convert/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/mixed_precision/half_precision_convert/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/mixed_precision/index.html b/latest/autoapi/neural_compressor/torch/algorithms/mixed_precision/index.html index 6ae4a903bb5..3dfd613c491 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/mixed_precision/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/mixed_precision/index.html @@ -115,7 +115,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/mixed_precision/module_wrappers/index.html b/latest/autoapi/neural_compressor/torch/algorithms/mixed_precision/module_wrappers/index.html index eee2fada0d0..f7fd5a7296d 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/mixed_precision/module_wrappers/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/mixed_precision/module_wrappers/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/mx_quant/index.html b/latest/autoapi/neural_compressor/torch/algorithms/mx_quant/index.html index 0f6c26bc227..967ff01f8e2 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/mx_quant/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/mx_quant/index.html @@ -115,7 +115,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/mx_quant/mx/index.html b/latest/autoapi/neural_compressor/torch/algorithms/mx_quant/mx/index.html index 3a25323f4d7..3dbfb3f45f8 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/mx_quant/mx/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/mx_quant/mx/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/mx_quant/utils/index.html b/latest/autoapi/neural_compressor/torch/algorithms/mx_quant/utils/index.html index 76764c096e5..76de82a92f5 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/mx_quant/utils/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/mx_quant/utils/index.html @@ -173,7 +173,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/core/index.html b/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/core/index.html index dbeaae209a3..d3cd3923ec7 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/core/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/core/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/half_precision_rewriter/index.html b/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/half_precision_rewriter/index.html index 6f763b7b51e..fc1630501b8 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/half_precision_rewriter/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/half_precision_rewriter/index.html @@ -274,7 +274,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/index.html b/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/index.html index a1461aefdb0..a4002c371f1 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/index.html @@ -117,7 +117,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/save_load/index.html b/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/save_load/index.html index 807f6502190..4eba86580d8 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/save_load/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/save_load/index.html @@ -154,7 +154,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/utility/index.html b/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/utility/index.html index b0d7dd81ddc..fb427eb195c 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/utility/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/pt2e_quant/utility/index.html @@ -162,7 +162,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/index.html b/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/index.html index d2fc8938c89..8b51c26c028 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/index.html @@ -116,7 +116,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/save_load/index.html b/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/save_load/index.html index eb1f4051e94..ded65ca2ff7 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/save_load/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/save_load/index.html @@ -140,7 +140,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/smooth_quant/index.html b/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/smooth_quant/index.html index fc7c4d5c2e2..35dcaa232a5 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/smooth_quant/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/smooth_quant/index.html @@ -159,7 +159,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/utility/index.html b/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/utility/index.html index 965aa583b63..6e6acf7a5d6 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/utility/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/smooth_quant/utility/index.html @@ -684,7 +684,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/static_quant/index.html b/latest/autoapi/neural_compressor/torch/algorithms/static_quant/index.html index 47f4fe93e87..bf802e93f88 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/static_quant/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/static_quant/index.html @@ -116,7 +116,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/static_quant/save_load/index.html b/latest/autoapi/neural_compressor/torch/algorithms/static_quant/save_load/index.html index 4f47549dded..d74e2800b6d 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/static_quant/save_load/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/static_quant/save_load/index.html @@ -150,7 +150,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/static_quant/static_quant/index.html b/latest/autoapi/neural_compressor/torch/algorithms/static_quant/static_quant/index.html index 7261fb14c48..0b4d46d1d29 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/static_quant/static_quant/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/static_quant/static_quant/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/static_quant/utility/index.html b/latest/autoapi/neural_compressor/torch/algorithms/static_quant/utility/index.html index 918ca5c8186..9ccbc0874a9 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/static_quant/utility/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/static_quant/utility/index.html @@ -345,7 +345,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/autoround/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/autoround/index.html index 1258aed0b84..755b7aa5f67 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/autoround/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/autoround/index.html @@ -162,7 +162,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/awq/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/awq/index.html index 5374d3c6a5b..412b7e22afb 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/awq/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/awq/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/gptq/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/gptq/index.html index 8f724ff1d85..e6b558fc035 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/gptq/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/gptq/index.html @@ -239,7 +239,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/bitpack/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/bitpack/index.html index 45b28a5085f..b2c1a16bc69 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/bitpack/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/bitpack/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/config/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/config/index.html index 6f7f8d82f62..4feffcad79c 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/config/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/config/index.html @@ -143,7 +143,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/core/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/core/index.html index ecba325680a..57b6f72a38f 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/core/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/core/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/index.html index 4a1d1cafdf8..19ed8ab243d 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/index.html @@ -119,7 +119,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/optimizer/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/optimizer/index.html index d46251ee473..97eaff27d4a 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/optimizer/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/optimizer/index.html @@ -146,7 +146,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/qtensor/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/qtensor/index.html index 37ecf290efc..43765a8e772 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/qtensor/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/qtensor/index.html @@ -201,7 +201,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/quantizer/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/quantizer/index.html index f56eba9267d..85c0e3240b7 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/quantizer/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/hqq/quantizer/index.html @@ -203,7 +203,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/index.html index c00dfda22a2..fa4fc5f521d 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/index.html @@ -129,7 +129,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/modules/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/modules/index.html index b307ae65cd8..98c4c1fd640 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/modules/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/modules/index.html @@ -188,7 +188,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/rtn/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/rtn/index.html index 92a9e8c724d..41d695bda19 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/rtn/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/rtn/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/save_load/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/save_load/index.html index 8a71c19fcb5..6fba282d947 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/save_load/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/save_load/index.html @@ -201,7 +201,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/teq/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/teq/index.html index d63cb4ee19b..0db5628744d 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/teq/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/teq/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/utility/index.html b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/utility/index.html index 28f974b58f0..b4524e145f9 100644 --- a/latest/autoapi/neural_compressor/torch/algorithms/weight_only/utility/index.html +++ b/latest/autoapi/neural_compressor/torch/algorithms/weight_only/utility/index.html @@ -558,7 +558,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/export/index.html b/latest/autoapi/neural_compressor/torch/export/index.html index 564fd546554..bc1f003e307 100644 --- a/latest/autoapi/neural_compressor/torch/export/index.html +++ b/latest/autoapi/neural_compressor/torch/export/index.html @@ -114,7 +114,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/export/pt2e_export/index.html b/latest/autoapi/neural_compressor/torch/export/pt2e_export/index.html index 5071315c3c1..88b194041f1 100644 --- a/latest/autoapi/neural_compressor/torch/export/pt2e_export/index.html +++ b/latest/autoapi/neural_compressor/torch/export/pt2e_export/index.html @@ -167,7 +167,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/index.html b/latest/autoapi/neural_compressor/torch/index.html index ce26b92589a..d84b70c94a4 100644 --- a/latest/autoapi/neural_compressor/torch/index.html +++ b/latest/autoapi/neural_compressor/torch/index.html @@ -117,7 +117,7 @@

SubpackagesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/quantization/algorithm_entry/index.html b/latest/autoapi/neural_compressor/torch/quantization/algorithm_entry/index.html index d59cc548e04..1312bc5f06b 100644 --- a/latest/autoapi/neural_compressor/torch/quantization/algorithm_entry/index.html +++ b/latest/autoapi/neural_compressor/torch/quantization/algorithm_entry/index.html @@ -413,7 +413,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/quantization/autotune/index.html b/latest/autoapi/neural_compressor/torch/quantization/autotune/index.html index dde2e13db70..d1721487143 100644 --- a/latest/autoapi/neural_compressor/torch/quantization/autotune/index.html +++ b/latest/autoapi/neural_compressor/torch/quantization/autotune/index.html @@ -175,7 +175,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/quantization/config/index.html b/latest/autoapi/neural_compressor/torch/quantization/config/index.html index 9e551c1367b..d455ad2c433 100644 --- a/latest/autoapi/neural_compressor/torch/quantization/config/index.html +++ b/latest/autoapi/neural_compressor/torch/quantization/config/index.html @@ -521,7 +521,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/quantization/index.html b/latest/autoapi/neural_compressor/torch/quantization/index.html index bd0434f8ee4..8742d65d95a 100644 --- a/latest/autoapi/neural_compressor/torch/quantization/index.html +++ b/latest/autoapi/neural_compressor/torch/quantization/index.html @@ -116,7 +116,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/quantization/load_entry/index.html b/latest/autoapi/neural_compressor/torch/quantization/load_entry/index.html index 731ac65ff65..9da56cae881 100644 --- a/latest/autoapi/neural_compressor/torch/quantization/load_entry/index.html +++ b/latest/autoapi/neural_compressor/torch/quantization/load_entry/index.html @@ -173,7 +173,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/quantization/quantize/index.html b/latest/autoapi/neural_compressor/torch/quantization/quantize/index.html index 89e2a27cba2..10639e8f96e 100644 --- a/latest/autoapi/neural_compressor/torch/quantization/quantize/index.html +++ b/latest/autoapi/neural_compressor/torch/quantization/quantize/index.html @@ -215,7 +215,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/utils/auto_accelerator/index.html b/latest/autoapi/neural_compressor/torch/utils/auto_accelerator/index.html index 8389027d5e2..ded8e107cb9 100644 --- a/latest/autoapi/neural_compressor/torch/utils/auto_accelerator/index.html +++ b/latest/autoapi/neural_compressor/torch/utils/auto_accelerator/index.html @@ -215,7 +215,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/utils/bit_packer/index.html b/latest/autoapi/neural_compressor/torch/utils/bit_packer/index.html index b0656de6e69..e2162b9a72b 100644 --- a/latest/autoapi/neural_compressor/torch/utils/bit_packer/index.html +++ b/latest/autoapi/neural_compressor/torch/utils/bit_packer/index.html @@ -233,7 +233,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/utils/constants/index.html b/latest/autoapi/neural_compressor/torch/utils/constants/index.html index 6f7610c948e..eb6ff746bf8 100644 --- a/latest/autoapi/neural_compressor/torch/utils/constants/index.html +++ b/latest/autoapi/neural_compressor/torch/utils/constants/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/utils/environ/index.html b/latest/autoapi/neural_compressor/torch/utils/environ/index.html index e9156f0ee3b..aa864d64d0c 100644 --- a/latest/autoapi/neural_compressor/torch/utils/environ/index.html +++ b/latest/autoapi/neural_compressor/torch/utils/environ/index.html @@ -202,7 +202,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/utils/index.html b/latest/autoapi/neural_compressor/torch/utils/index.html index 97896ccdd95..a37731825fe 100644 --- a/latest/autoapi/neural_compressor/torch/utils/index.html +++ b/latest/autoapi/neural_compressor/torch/utils/index.html @@ -118,7 +118,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/torch/utils/utility/index.html b/latest/autoapi/neural_compressor/torch/utils/utility/index.html index 9ba35817312..33b8c2c52f2 100644 --- a/latest/autoapi/neural_compressor/torch/utils/utility/index.html +++ b/latest/autoapi/neural_compressor/torch/utils/utility/index.html @@ -94,47 +94,94 @@

Functions

- + + + + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + +

register_algo(name)

is_optimum_habana_available()

Checks if the Optimum Habana module is available for use with the transformers library.

register_algo(name)

Decorator function to register algorithms in the algos_mapping dictionary.

fetch_module(model, op_name)

fetch_module(model, op_name)

Get module with a given op name.

set_module(model, op_name, new_module)

set_module(model, op_name, new_module)

Set module with a given op name.

get_model_info(→ List[Tuple[str, str]])

get_model_info(→ List[Tuple[str, str]])

Get model info according to white_module_list.

get_double_quant_config_dict([double_quant_type])

get_double_quant_config_dict([double_quant_type])

Query config dict of double_quant according to double_quant_type.

get_quantizer(model, quantizer_cls[, quant_config])

get_quantizer(model, quantizer_cls[, quant_config])

Get the quantizer.

postprocess_model(model, mode, quantizer)

postprocess_model(model, mode, quantizer)

Process quantizer attribute of model according to current phase.

dump_model_op_stats(mode, tune_cfg)

dump_model_op_stats(mode, tune_cfg)

Dump quantizable ops stats of model to user.

get_model_device(model)

get_model_device(model)

Get the device.

get_processor_type_from_user_config([user_processor_type])

get_processor_type_from_user_config([user_processor_type])

Get the processor type.

dowload_hf_model(repo_id[, cache_dir, repo_type, revision])

dowload_hf_model(repo_id[, cache_dir, repo_type, revision])

Download hugging face model from hf hub.

load_empty_model(pretrained_model_name_or_path[, cls])

load_empty_model(pretrained_model_name_or_path[, cls])

Load a empty model.

get_module(module, key)

Get module from model by key name.

get_layer_names_in_block(model[, supported_types, ...])

Retrieves the names of layers within each block of the model.

to_dtype(input[, dtype])

Moves input data to the specified data type.

to_device(input[, device])

Moves input data to the specified device.

get_block_names(model)

Get the block names for transformers-like networks.

validate_modules(module_names)

Test a list of modules' validity.

get_multimodal_block_names(model[, quant_vision])

Get the multimodal model block names for transformers-like networks.

detect_device([device])

Detects the device to use for model execution (GPU, HPU, or CPU).

run_fn_for_vlm_autoround(model, dataloader[, seqlen, ...])

Runs a model on a provided dataset with automatic device detection for vector-language models.

Module Contents

+
+
+neural_compressor.torch.utils.utility.is_optimum_habana_available()[source]
+

Checks if the Optimum Habana module is available for use with the transformers library.

+

This function checks two conditions: +1. If the optimum package is available using transformers.utils.import_utils.is_optimum_available. +2. If the optimum.habana module can be found using importlib.util.find_spec.

+
+
Returns:
+

True if Optimum Habana is available, False otherwise.

+
+
Return type:
+

bool

+
+
+
+
neural_compressor.torch.utils.utility.register_algo(name)[source]
@@ -317,6 +364,132 @@

Module Contents +
+neural_compressor.torch.utils.utility.get_module(module, key)[source]
+

Get module from model by key name.

+
+
Parameters:
+
    +
  • module (torch.nn.Module) – original model

  • +
  • key (str) – module name to be replaced

  • +
+
+
+

+ +
+
+neural_compressor.torch.utils.utility.get_layer_names_in_block(model, supported_types=[torch.nn.Linear, transformers.modeling_utils.Conv1D], quant_block_list=None)[source]
+

Retrieves the names of layers within each block of the model.

+
+
Returns:
+

+
A list of strings, where each string is the name of a layer

within a block of the model.

+
+
+

+
+
Return type:
+

list

+
+
+
+ +
+
+neural_compressor.torch.utils.utility.to_dtype(input, dtype=torch.float32)[source]
+

Moves input data to the specified data type.

+

Args: +input: The input data to be moved. +dtype: The target data type.

+

Returns: +The input data on the specified data type.

+
+ +
+
+neural_compressor.torch.utils.utility.to_device(input, device=torch.device('cpu'))[source]
+

Moves input data to the specified device.

+

Args: +input: The input data to be moved. +device: The target device.

+

Returns: +The input data on the specified device.

+
+ +
+
+neural_compressor.torch.utils.utility.get_block_names(model)[source]
+

Get the block names for transformers-like networks.

+

Args: +model: The model.

+

Returns: +block_names: A list whose elements are list of block’s layer names

+
+ +
+
+neural_compressor.torch.utils.utility.validate_modules(module_names)[source]
+

Test a list of modules’ validity.

+

Args: +modules (list of str): List of strings to be validated.

+

Returns: +bool: True if all modules have equal length or not dependent, otherwise False.

+
+ +
+
+neural_compressor.torch.utils.utility.get_multimodal_block_names(model, quant_vision=False)[source]
+

Get the multimodal model block names for transformers-like networks.

+

Args: +model: The model.

+

Returns: +block_names: A list whose elements are list of block’s layer names

+
+ +
+
+neural_compressor.torch.utils.utility.detect_device(device=None)[source]
+

Detects the device to use for model execution (GPU, HPU, or CPU).

+
+
Parameters:
+

device (str, int, torch.device, optional) –

    +
  • If a string (‘cuda’, ‘cpu’, or ‘hpu’) or torch.device is provided, that device is selected.

  • +
  • If an integer is provided, it treats it as a GPU device index.

  • +
  • If None or ‘auto’, it automatically selects ‘cuda’ if available, ‘hpu’ if Habana is available, +or falls back to ‘cpu’.

  • +
+

+
+
Returns:
+

The selected device in string format (‘cuda:X’, ‘hpu’, or ‘cpu’).

+
+
Return type:
+

str

+
+
+
+ +
+
+neural_compressor.torch.utils.utility.run_fn_for_vlm_autoround(model, dataloader, seqlen=512, nsamples=512)[source]
+

Runs a model on a provided dataset with automatic device detection for vector-language models.

+
+
Parameters:
+
    +
  • model – The model to run.

  • +
  • dataloader – A PyTorch dataloader providing the input data for the model.

  • +
  • seqlen (int, optional) – The minimum sequence length of input data to process. Defaults to 512.

  • +
  • nsamples (int, optional) – The number of samples to process before stopping. Defaults to 512.

  • +
+
+
Returns:
+

None

+
+
+
+
@@ -334,7 +507,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/training/index.html b/latest/autoapi/neural_compressor/training/index.html index 28baabe9976..67215558f49 100644 --- a/latest/autoapi/neural_compressor/training/index.html +++ b/latest/autoapi/neural_compressor/training/index.html @@ -304,7 +304,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/transformers/quantization/utils/index.html b/latest/autoapi/neural_compressor/transformers/quantization/utils/index.html index fcc23dbff54..214ec137015 100644 --- a/latest/autoapi/neural_compressor/transformers/quantization/utils/index.html +++ b/latest/autoapi/neural_compressor/transformers/quantization/utils/index.html @@ -106,7 +106,7 @@ Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/transformers/utils/index.html b/latest/autoapi/neural_compressor/transformers/utils/index.html index a553348801c..1a1d26323c2 100644 --- a/latest/autoapi/neural_compressor/transformers/utils/index.html +++ b/latest/autoapi/neural_compressor/transformers/utils/index.html @@ -114,7 +114,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/transformers/utils/quantization_config/index.html b/latest/autoapi/neural_compressor/transformers/utils/quantization_config/index.html index 34523e8b83a..de4b05ac3ee 100644 --- a/latest/autoapi/neural_compressor/transformers/utils/quantization_config/index.html +++ b/latest/autoapi/neural_compressor/transformers/utils/quantization_config/index.html @@ -187,7 +187,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/collect_layer_histogram/index.html b/latest/autoapi/neural_compressor/utils/collect_layer_histogram/index.html index 92ce37df686..e6cb9ca53e1 100644 --- a/latest/autoapi/neural_compressor/utils/collect_layer_histogram/index.html +++ b/latest/autoapi/neural_compressor/utils/collect_layer_histogram/index.html @@ -128,7 +128,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/constant/index.html b/latest/autoapi/neural_compressor/utils/constant/index.html index 7c80e22514b..17a562536cf 100644 --- a/latest/autoapi/neural_compressor/utils/constant/index.html +++ b/latest/autoapi/neural_compressor/utils/constant/index.html @@ -106,7 +106,7 @@ Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/create_obj_from_config/index.html b/latest/autoapi/neural_compressor/utils/create_obj_from_config/index.html index c1ae57c7628..2d59cc03cc5 100644 --- a/latest/autoapi/neural_compressor/utils/create_obj_from_config/index.html +++ b/latest/autoapi/neural_compressor/utils/create_obj_from_config/index.html @@ -242,7 +242,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/export/index.html b/latest/autoapi/neural_compressor/utils/export/index.html index 732ebdce54a..361318f3e0a 100644 --- a/latest/autoapi/neural_compressor/utils/export/index.html +++ b/latest/autoapi/neural_compressor/utils/export/index.html @@ -116,7 +116,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/export/qlinear2qdq/index.html b/latest/autoapi/neural_compressor/utils/export/qlinear2qdq/index.html index d98ae696d4e..a3a5932441f 100644 --- a/latest/autoapi/neural_compressor/utils/export/qlinear2qdq/index.html +++ b/latest/autoapi/neural_compressor/utils/export/qlinear2qdq/index.html @@ -147,7 +147,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/export/tf2onnx/index.html b/latest/autoapi/neural_compressor/utils/export/tf2onnx/index.html index 077b4a1a4d5..3661dbd1ebe 100644 --- a/latest/autoapi/neural_compressor/utils/export/tf2onnx/index.html +++ b/latest/autoapi/neural_compressor/utils/export/tf2onnx/index.html @@ -158,7 +158,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/export/torch2onnx/index.html b/latest/autoapi/neural_compressor/utils/export/torch2onnx/index.html index e7cdb41ef31..986044760b6 100644 --- a/latest/autoapi/neural_compressor/utils/export/torch2onnx/index.html +++ b/latest/autoapi/neural_compressor/utils/export/torch2onnx/index.html @@ -268,7 +268,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/index.html b/latest/autoapi/neural_compressor/utils/index.html index 23731e51a01..f576605b9b7 100644 --- a/latest/autoapi/neural_compressor/utils/index.html +++ b/latest/autoapi/neural_compressor/utils/index.html @@ -131,7 +131,7 @@

SubmodulesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/kl_divergence/index.html b/latest/autoapi/neural_compressor/utils/kl_divergence/index.html index ada6789a744..4d2b8257b4d 100644 --- a/latest/autoapi/neural_compressor/utils/kl_divergence/index.html +++ b/latest/autoapi/neural_compressor/utils/kl_divergence/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/load_huggingface/index.html b/latest/autoapi/neural_compressor/utils/load_huggingface/index.html index d3d00ddc034..98a9104b740 100644 --- a/latest/autoapi/neural_compressor/utils/load_huggingface/index.html +++ b/latest/autoapi/neural_compressor/utils/load_huggingface/index.html @@ -168,7 +168,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/logger/index.html b/latest/autoapi/neural_compressor/utils/logger/index.html index 55607ae8717..1b26660706c 100644 --- a/latest/autoapi/neural_compressor/utils/logger/index.html +++ b/latest/autoapi/neural_compressor/utils/logger/index.html @@ -195,7 +195,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/options/index.html b/latest/autoapi/neural_compressor/utils/options/index.html index df526c99a56..ac4abd5f716 100644 --- a/latest/autoapi/neural_compressor/utils/options/index.html +++ b/latest/autoapi/neural_compressor/utils/options/index.html @@ -125,7 +125,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/pytorch/index.html b/latest/autoapi/neural_compressor/utils/pytorch/index.html index 9ea22783692..0f8c0e62216 100644 --- a/latest/autoapi/neural_compressor/utils/pytorch/index.html +++ b/latest/autoapi/neural_compressor/utils/pytorch/index.html @@ -214,7 +214,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/utility/index.html b/latest/autoapi/neural_compressor/utils/utility/index.html index 03ef3d65374..553f45e9cb4 100644 --- a/latest/autoapi/neural_compressor/utils/utility/index.html +++ b/latest/autoapi/neural_compressor/utils/utility/index.html @@ -838,7 +838,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/utils/weights_details/index.html b/latest/autoapi/neural_compressor/utils/weights_details/index.html index 12d4340a95b..6a6fadbf7a0 100644 --- a/latest/autoapi/neural_compressor/utils/weights_details/index.html +++ b/latest/autoapi/neural_compressor/utils/weights_details/index.html @@ -134,7 +134,7 @@

Module ContentsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/autoapi/neural_compressor/version/index.html b/latest/autoapi/neural_compressor/version/index.html index 0740c947d16..599c951aba1 100644 --- a/latest/autoapi/neural_compressor/version/index.html +++ b/latest/autoapi/neural_compressor/version/index.html @@ -106,7 +106,7 @@ Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/build_docs/source/index.html b/latest/docs/build_docs/source/index.html index 29c3926b662..a330c977fe8 100644 --- a/latest/docs/build_docs/source/index.html +++ b/latest/docs/build_docs/source/index.html @@ -114,7 +114,7 @@

Sections Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/2x_user_guide.html b/latest/docs/source/2x_user_guide.html index 70a790e8e50..5432383ba8c 100644 --- a/latest/docs/source/2x_user_guide.html +++ b/latest/docs/source/2x_user_guide.html @@ -176,7 +176,7 @@

Advanced TopicsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/PT_DynamicQuant.html b/latest/docs/source/3x/PT_DynamicQuant.html index 2d436453677..73d86a34de9 100644 --- a/latest/docs/source/3x/PT_DynamicQuant.html +++ b/latest/docs/source/3x/PT_DynamicQuant.html @@ -146,7 +146,7 @@

Examples Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/PT_FP8Quant.html b/latest/docs/source/3x/PT_FP8Quant.html index 4e27b494300..f30a608ad47 100644 --- a/latest/docs/source/3x/PT_FP8Quant.html +++ b/latest/docs/source/3x/PT_FP8Quant.html @@ -218,7 +218,7 @@

Examples Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/PT_MXQuant.html b/latest/docs/source/3x/PT_MXQuant.html index 34701ad332f..7d939de15fe 100644 --- a/latest/docs/source/3x/PT_MXQuant.html +++ b/latest/docs/source/3x/PT_MXQuant.html @@ -222,7 +222,7 @@

ReferenceSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/PT_MixedPrecision.html b/latest/docs/source/3x/PT_MixedPrecision.html index beb284cb369..990bf449a52 100644 --- a/latest/docs/source/3x/PT_MixedPrecision.html +++ b/latest/docs/source/3x/PT_MixedPrecision.html @@ -218,7 +218,7 @@

Examples Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/PT_SmoothQuant.html b/latest/docs/source/3x/PT_SmoothQuant.html index 53b8d781fe0..52fb5609e76 100644 --- a/latest/docs/source/3x/PT_SmoothQuant.html +++ b/latest/docs/source/3x/PT_SmoothQuant.html @@ -402,7 +402,7 @@

Supported Framework MatrixSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/PT_StaticQuant.html b/latest/docs/source/3x/PT_StaticQuant.html index 01bf3fb3885..ef31ea0d7d9 100644 --- a/latest/docs/source/3x/PT_StaticQuant.html +++ b/latest/docs/source/3x/PT_StaticQuant.html @@ -208,7 +208,7 @@

Model Examples with PT2ESphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/PT_WeightOnlyQuant.html b/latest/docs/source/3x/PT_WeightOnlyQuant.html index c31bd3bebd7..8cf5e0c1a88 100644 --- a/latest/docs/source/3x/PT_WeightOnlyQuant.html +++ b/latest/docs/source/3x/PT_WeightOnlyQuant.html @@ -737,7 +737,7 @@

ReferenceSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/PyTorch.html b/latest/docs/source/3x/PyTorch.html index d90bed5386d..1c0dd7ab8f6 100644 --- a/latest/docs/source/3x/PyTorch.html +++ b/latest/docs/source/3x/PyTorch.html @@ -382,7 +382,7 @@

Common ProblemsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/TF_Quant.html b/latest/docs/source/3x/TF_Quant.html index 9e4eeac46d8..fc8c5c7ae61 100644 --- a/latest/docs/source/3x/TF_Quant.html +++ b/latest/docs/source/3x/TF_Quant.html @@ -222,7 +222,7 @@

Examples Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/TF_SQ.html b/latest/docs/source/3x/TF_SQ.html index c8bb1dd72b5..47541ead64f 100644 --- a/latest/docs/source/3x/TF_SQ.html +++ b/latest/docs/source/3x/TF_SQ.html @@ -162,7 +162,7 @@

Examples Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/TensorFlow.html b/latest/docs/source/3x/TensorFlow.html index 256141b0bbc..df3294d32c5 100644 --- a/latest/docs/source/3x/TensorFlow.html +++ b/latest/docs/source/3x/TensorFlow.html @@ -331,7 +331,7 @@

Backend and DeviceSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/autotune.html b/latest/docs/source/3x/autotune.html index 03820033aae..071b7227e16 100644 --- a/latest/docs/source/3x/autotune.html +++ b/latest/docs/source/3x/autotune.html @@ -187,7 +187,7 @@

Working with Tensorflow ModelSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/benchmark.html b/latest/docs/source/3x/benchmark.html index 242f291bbed..81d53a14c46 100644 --- a/latest/docs/source/3x/benchmark.html +++ b/latest/docs/source/3x/benchmark.html @@ -202,7 +202,7 @@

Demo usageSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/client_quant.html b/latest/docs/source/3x/client_quant.html index 5b4b8853b6b..68ebfe905da 100644 --- a/latest/docs/source/3x/client_quant.html +++ b/latest/docs/source/3x/client_quant.html @@ -140,7 +140,7 @@

Get StartedSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/design.html b/latest/docs/source/3x/design.html index 1fd4ec77574..f8954f02c24 100644 --- a/latest/docs/source/3x/design.html +++ b/latest/docs/source/3x/design.html @@ -116,7 +116,7 @@

WorkflowsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/gaudi_version_map.html b/latest/docs/source/3x/gaudi_version_map.html index acdfacc2c01..b2d78e9623c 100644 --- a/latest/docs/source/3x/gaudi_version_map.html +++ b/latest/docs/source/3x/gaudi_version_map.html @@ -124,7 +124,7 @@

Version mapping between Intel Neural Compressor to Gaudi Software StackSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/llm_recipes.html b/latest/docs/source/3x/llm_recipes.html index fa3d06acbcb..bfd3d08832c 100644 --- a/latest/docs/source/3x/llm_recipes.html +++ b/latest/docs/source/3x/llm_recipes.html @@ -102,7 +102,7 @@ Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/quantization.html b/latest/docs/source/3x/quantization.html index 6664ccc08ae..6a97d53df68 100644 --- a/latest/docs/source/3x/quantization.html +++ b/latest/docs/source/3x/quantization.html @@ -435,7 +435,7 @@

ReferenceSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/3x/transformers_like_api.html b/latest/docs/source/3x/transformers_like_api.html index b0760e335a5..e426ab23f41 100644 --- a/latest/docs/source/3x/transformers_like_api.html +++ b/latest/docs/source/3x/transformers_like_api.html @@ -348,7 +348,7 @@

Examples Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/CODE_OF_CONDUCT.html b/latest/docs/source/CODE_OF_CONDUCT.html index 1b299c73913..f28ed83fe45 100644 --- a/latest/docs/source/CODE_OF_CONDUCT.html +++ b/latest/docs/source/CODE_OF_CONDUCT.html @@ -182,7 +182,7 @@

AttributionSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/CONTRIBUTING.html b/latest/docs/source/CONTRIBUTING.html index 4a62f4e14a8..70892e3f5ab 100644 --- a/latest/docs/source/CONTRIBUTING.html +++ b/latest/docs/source/CONTRIBUTING.html @@ -175,7 +175,7 @@

Contributor Covenant Code of ConductSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/FX.html b/latest/docs/source/FX.html index b9096c138fa..add6c367335 100644 --- a/latest/docs/source/FX.html +++ b/latest/docs/source/FX.html @@ -252,7 +252,7 @@

Static Quantization & Quantization Aware TrainingSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/SECURITY.html b/latest/docs/source/SECURITY.html index 59950ba095b..336bcbdb608 100644 --- a/latest/docs/source/SECURITY.html +++ b/latest/docs/source/SECURITY.html @@ -117,7 +117,7 @@

Report a VulnerabilitySphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/Welcome.html b/latest/docs/source/Welcome.html index 6bdb24a8a3f..5e021d1b24e 100644 --- a/latest/docs/source/Welcome.html +++ b/latest/docs/source/Welcome.html @@ -336,7 +336,7 @@

CommunicationSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/adaptor.html b/latest/docs/source/adaptor.html index e45a2e2c443..6a0970f8ce7 100644 --- a/latest/docs/source/adaptor.html +++ b/latest/docs/source/adaptor.html @@ -273,7 +273,7 @@

Implement ONNXRTAdaptor ClassSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/add_new_adaptor.html b/latest/docs/source/add_new_adaptor.html index 3fda50a0db0..3f754d28a4f 100644 --- a/latest/docs/source/add_new_adaptor.html +++ b/latest/docs/source/add_new_adaptor.html @@ -263,7 +263,7 @@

Calculate the data range and generate quantized modelSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/add_new_data_type.html b/latest/docs/source/add_new_data_type.html index 5777a130e2b..80155b77b06 100644 --- a/latest/docs/source/add_new_data_type.html +++ b/latest/docs/source/add_new_data_type.html @@ -264,7 +264,7 @@

Summary Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/adaptor.html b/latest/docs/source/api-doc/adaptor.html index 8ff4ff3a9db..c13816f51e1 100644 --- a/latest/docs/source/api-doc/adaptor.html +++ b/latest/docs/source/api-doc/adaptor.html @@ -112,7 +112,7 @@

Adaptor Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/adaptor/onnxrt.html b/latest/docs/source/api-doc/adaptor/onnxrt.html index 9f84284bfc5..dd9db03d477 100644 --- a/latest/docs/source/api-doc/adaptor/onnxrt.html +++ b/latest/docs/source/api-doc/adaptor/onnxrt.html @@ -118,7 +118,7 @@

ONNX RuntimeSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/adaptor/torch_utils.html b/latest/docs/source/api-doc/adaptor/torch_utils.html index 1f998044297..03bf5c0091f 100644 --- a/latest/docs/source/api-doc/adaptor/torch_utils.html +++ b/latest/docs/source/api-doc/adaptor/torch_utils.html @@ -122,7 +122,7 @@

Torch UtilsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/api_2.html b/latest/docs/source/api-doc/api_2.html index e2e569610fe..fbd852a5a2d 100644 --- a/latest/docs/source/api-doc/api_2.html +++ b/latest/docs/source/api-doc/api_2.html @@ -152,7 +152,7 @@

2.0 APISphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/api_3.html b/latest/docs/source/api-doc/api_3.html index 0cca67eb5f1..90ef7b30fef 100644 --- a/latest/docs/source/api-doc/api_3.html +++ b/latest/docs/source/api-doc/api_3.html @@ -146,7 +146,7 @@

3.0 APISphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/api_doc_example.html b/latest/docs/source/api-doc/api_doc_example.html index f6c70d0c023..77cbfe26442 100644 --- a/latest/docs/source/api-doc/api_doc_example.html +++ b/latest/docs/source/api-doc/api_doc_example.html @@ -135,7 +135,7 @@

API Document ExampleSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/apis.html b/latest/docs/source/api-doc/apis.html index 3906e0e9d29..f58a6b5967b 100644 --- a/latest/docs/source/api-doc/apis.html +++ b/latest/docs/source/api-doc/apis.html @@ -124,7 +124,7 @@

APIs< Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/benchmark.html b/latest/docs/source/api-doc/benchmark.html index e3db32cafc3..7389672c92b 100644 --- a/latest/docs/source/api-doc/benchmark.html +++ b/latest/docs/source/api-doc/benchmark.html @@ -132,7 +132,7 @@

BenchmarkSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/compression.html b/latest/docs/source/api-doc/compression.html index 301292c3577..ddae12d9699 100644 --- a/latest/docs/source/api-doc/compression.html +++ b/latest/docs/source/api-doc/compression.html @@ -139,7 +139,7 @@

CompressionSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/config.html b/latest/docs/source/api-doc/config.html index 070e2723d80..0006da5fdbe 100644 --- a/latest/docs/source/api-doc/config.html +++ b/latest/docs/source/api-doc/config.html @@ -135,7 +135,7 @@

Config< Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/mix_precision.html b/latest/docs/source/api-doc/mix_precision.html index 96c84cd1632..a87b3fde7b9 100644 --- a/latest/docs/source/api-doc/mix_precision.html +++ b/latest/docs/source/api-doc/mix_precision.html @@ -135,7 +135,7 @@

Mix PrecisionSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/model.html b/latest/docs/source/api-doc/model.html index 91903a0c7c2..067b1cec3fa 100644 --- a/latest/docs/source/api-doc/model.html +++ b/latest/docs/source/api-doc/model.html @@ -145,7 +145,7 @@

ModelSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/objective.html b/latest/docs/source/api-doc/objective.html index b94e380cff0..3e8cd629bf1 100644 --- a/latest/docs/source/api-doc/objective.html +++ b/latest/docs/source/api-doc/objective.html @@ -135,7 +135,7 @@

ObjectiveSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/quantization.html b/latest/docs/source/api-doc/quantization.html index 22c61702c5d..245f4660790 100644 --- a/latest/docs/source/api-doc/quantization.html +++ b/latest/docs/source/api-doc/quantization.html @@ -135,7 +135,7 @@

QuantizationSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/strategy.html b/latest/docs/source/api-doc/strategy.html index fd81edc8aaa..32d96b28c0e 100644 --- a/latest/docs/source/api-doc/strategy.html +++ b/latest/docs/source/api-doc/strategy.html @@ -145,7 +145,7 @@

Strategy Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/tf_quantization_autotune.html b/latest/docs/source/api-doc/tf_quantization_autotune.html index 1214b28ae54..1d9e7e11479 100644 --- a/latest/docs/source/api-doc/tf_quantization_autotune.html +++ b/latest/docs/source/api-doc/tf_quantization_autotune.html @@ -132,7 +132,7 @@

Tensorflow Quantization AutoTuneSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/tf_quantization_common.html b/latest/docs/source/api-doc/tf_quantization_common.html index f98222eda5c..f5f10b8e93c 100644 --- a/latest/docs/source/api-doc/tf_quantization_common.html +++ b/latest/docs/source/api-doc/tf_quantization_common.html @@ -132,7 +132,7 @@

Tensorflow Quantization Base APISphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/tf_quantization_config.html b/latest/docs/source/api-doc/tf_quantization_config.html index 353a9a3c620..7a0c289f92f 100644 --- a/latest/docs/source/api-doc/tf_quantization_config.html +++ b/latest/docs/source/api-doc/tf_quantization_config.html @@ -132,7 +132,7 @@

Tensorflow Quantization ConfigSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/torch_quantization_autotune.html b/latest/docs/source/api-doc/torch_quantization_autotune.html index 43ae205e4ea..19f03f8ddae 100644 --- a/latest/docs/source/api-doc/torch_quantization_autotune.html +++ b/latest/docs/source/api-doc/torch_quantization_autotune.html @@ -132,7 +132,7 @@

Pytorch Quantization AutoTuneSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/torch_quantization_common.html b/latest/docs/source/api-doc/torch_quantization_common.html index be0054b55d0..33a514fb26d 100644 --- a/latest/docs/source/api-doc/torch_quantization_common.html +++ b/latest/docs/source/api-doc/torch_quantization_common.html @@ -132,7 +132,7 @@

Pytorch Quantization Base APISphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/torch_quantization_config.html b/latest/docs/source/api-doc/torch_quantization_config.html index c5f73f8068e..8801de6a23e 100644 --- a/latest/docs/source/api-doc/torch_quantization_config.html +++ b/latest/docs/source/api-doc/torch_quantization_config.html @@ -132,7 +132,7 @@

Pytorch Quantization ConfigSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/api-doc/training.html b/latest/docs/source/api-doc/training.html index 163b5b9a177..9fc4c21455c 100644 --- a/latest/docs/source/api-doc/training.html +++ b/latest/docs/source/api-doc/training.html @@ -135,7 +135,7 @@

Training Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/benchmark.html b/latest/docs/source/benchmark.html index 053e715d9cc..cf738af1a00 100644 --- a/latest/docs/source/benchmark.html +++ b/latest/docs/source/benchmark.html @@ -163,7 +163,7 @@

Examples Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/calibration.html b/latest/docs/source/calibration.html index 42cde10a1cd..5c7364d2b06 100644 --- a/latest/docs/source/calibration.html +++ b/latest/docs/source/calibration.html @@ -169,7 +169,7 @@

ReferenceSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/coding_style.html b/latest/docs/source/coding_style.html index e411cf2f571..82476ccb8bb 100644 --- a/latest/docs/source/coding_style.html +++ b/latest/docs/source/coding_style.html @@ -351,7 +351,7 @@

ReferenceSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/dataloader.html b/latest/docs/source/dataloader.html index 96cff45f541..df1f0ef7a36 100644 --- a/latest/docs/source/dataloader.html +++ b/latest/docs/source/dataloader.html @@ -258,7 +258,7 @@

Examples Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/design.html b/latest/docs/source/design.html index c6cdfc12c66..1abfd207036 100644 --- a/latest/docs/source/design.html +++ b/latest/docs/source/design.html @@ -116,7 +116,7 @@

Workflow Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/distillation_quantization.html b/latest/docs/source/distillation_quantization.html index a0324782b7e..ca0410999a9 100644 --- a/latest/docs/source/distillation_quantization.html +++ b/latest/docs/source/distillation_quantization.html @@ -186,7 +186,7 @@

Examples Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/distributed.html b/latest/docs/source/distributed.html index 73db8fdd668..fcc8d0e0a1e 100644 --- a/latest/docs/source/distributed.html +++ b/latest/docs/source/distributed.html @@ -344,7 +344,7 @@

TensorFlow Examples:Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/examples_readme.html b/latest/docs/source/examples_readme.html index bd5a3f08030..47c3b94ea13 100644 --- a/latest/docs/source/examples_readme.html +++ b/latest/docs/source/examples_readme.html @@ -128,7 +128,7 @@

Release DataSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/export.html b/latest/docs/source/export.html index de782eb91e0..920f13bcc75 100644 --- a/latest/docs/source/export.html +++ b/latest/docs/source/export.html @@ -325,7 +325,7 @@

Supported quantized opsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/faq.html b/latest/docs/source/faq.html index d4e125b2056..df9dc7a14e6 100644 --- a/latest/docs/source/faq.html +++ b/latest/docs/source/faq.html @@ -139,7 +139,7 @@

Issue 5: Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/framework_yaml.html b/latest/docs/source/framework_yaml.html index 321b5de1304..31c8fd3d80c 100644 --- a/latest/docs/source/framework_yaml.html +++ b/latest/docs/source/framework_yaml.html @@ -323,7 +323,7 @@

Get started with Framework YAML FilesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/get_started.html b/latest/docs/source/get_started.html index 7c633b0207a..3f8736118c0 100644 --- a/latest/docs/source/get_started.html +++ b/latest/docs/source/get_started.html @@ -201,7 +201,7 @@

Feature MatrixSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/incompatible_changes.html b/latest/docs/source/incompatible_changes.html index 0d7bea073c8..e7bff8434f3 100644 --- a/latest/docs/source/incompatible_changes.html +++ b/latest/docs/source/incompatible_changes.html @@ -143,7 +143,7 @@

Built-in transform/dataset/metric APIsSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/infrastructure.html b/latest/docs/source/infrastructure.html index a4f18838fb1..7b67066b426 100644 --- a/latest/docs/source/infrastructure.html +++ b/latest/docs/source/infrastructure.html @@ -283,7 +283,7 @@

Supported Feature MatrixSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/installation_guide.html b/latest/docs/source/installation_guide.html index f3cbf464b04..979aa34e3a7 100644 --- a/latest/docs/source/installation_guide.html +++ b/latest/docs/source/installation_guide.html @@ -327,7 +327,7 @@

Validated Software EnvironmentSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/legal_information.html b/latest/docs/source/legal_information.html index b1c836c4250..03009b9fe69 100644 --- a/latest/docs/source/legal_information.html +++ b/latest/docs/source/legal_information.html @@ -147,7 +147,7 @@

TrademarksSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/llm_recipes.html b/latest/docs/source/llm_recipes.html index ee1c67bd8aa..d9b6a09e339 100644 --- a/latest/docs/source/llm_recipes.html +++ b/latest/docs/source/llm_recipes.html @@ -470,7 +470,7 @@

Large Language Models AccuracySphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/metric.html b/latest/docs/source/metric.html index 87675937564..4b94d974831 100644 --- a/latest/docs/source/metric.html +++ b/latest/docs/source/metric.html @@ -466,7 +466,7 @@

Example Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/migration.html b/latest/docs/source/migration.html index d372ded75a0..402ec174d94 100644 --- a/latest/docs/source/migration.html +++ b/latest/docs/source/migration.html @@ -782,7 +782,7 @@

Examples Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/mixed_precision.html b/latest/docs/source/mixed_precision.html index 1d9fe0b11da..3e81a735e19 100644 --- a/latest/docs/source/mixed_precision.html +++ b/latest/docs/source/mixed_precision.html @@ -296,7 +296,7 @@

Examples Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/model.html b/latest/docs/source/model.html index 4c9382afa5b..4527abc1b9c 100644 --- a/latest/docs/source/model.html +++ b/latest/docs/source/model.html @@ -221,7 +221,7 @@

Examples Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/mx_quantization.html b/latest/docs/source/mx_quantization.html index 58557d2cb18..2765cfbe446 100644 --- a/latest/docs/source/mx_quantization.html +++ b/latest/docs/source/mx_quantization.html @@ -242,7 +242,7 @@

ReferenceSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/objective.html b/latest/docs/source/objective.html index 69b3e61eb2d..114403ded85 100644 --- a/latest/docs/source/objective.html +++ b/latest/docs/source/objective.html @@ -185,7 +185,7 @@

Example Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/orchestration.html b/latest/docs/source/orchestration.html index 19778c6b8ac..82a70f42feb 100644 --- a/latest/docs/source/orchestration.html +++ b/latest/docs/source/orchestration.html @@ -195,7 +195,7 @@

Examples Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/pruning.html b/latest/docs/source/pruning.html index 55cb257eaf9..d8faa1ff11b 100644 --- a/latest/docs/source/pruning.html +++ b/latest/docs/source/pruning.html @@ -518,7 +518,7 @@

ReferenceSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/publication_list.html b/latest/docs/source/publication_list.html index 7627792d311..ef4a217a4b8 100644 --- a/latest/docs/source/publication_list.html +++ b/latest/docs/source/publication_list.html @@ -215,7 +215,7 @@

2018 - 2020 (4)Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/quantization.html b/latest/docs/source/quantization.html index 0927bc0ce7d..f141e81c9dd 100644 --- a/latest/docs/source/quantization.html +++ b/latest/docs/source/quantization.html @@ -771,7 +771,7 @@

Examples Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/quantization_layer_wise.html b/latest/docs/source/quantization_layer_wise.html index e64b1686089..8c7911e612b 100644 --- a/latest/docs/source/quantization_layer_wise.html +++ b/latest/docs/source/quantization_layer_wise.html @@ -192,7 +192,7 @@

ONNX Runtime framework exampleSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/quantization_mixed_precision.html b/latest/docs/source/quantization_mixed_precision.html index 96f8be5ad33..6baaeab2d18 100644 --- a/latest/docs/source/quantization_mixed_precision.html +++ b/latest/docs/source/quantization_mixed_precision.html @@ -165,7 +165,7 @@

PyTorch Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/quantization_weight_only.html b/latest/docs/source/quantization_weight_only.html index 34ce01813f0..8bb7184f110 100644 --- a/latest/docs/source/quantization_weight_only.html +++ b/latest/docs/source/quantization_weight_only.html @@ -468,7 +468,7 @@

ReferenceSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/releases_info.html b/latest/docs/source/releases_info.html index cdac1a3f68d..03b0dffcb1c 100644 --- a/latest/docs/source/releases_info.html +++ b/latest/docs/source/releases_info.html @@ -140,7 +140,7 @@

Incompatible ChangesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/sigopt_strategy.html b/latest/docs/source/sigopt_strategy.html index a421d778a14..15eb2e5e018 100644 --- a/latest/docs/source/sigopt_strategy.html +++ b/latest/docs/source/sigopt_strategy.html @@ -186,7 +186,7 @@

Performance Comparison of Different StrategiesSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/smooth_quant.html b/latest/docs/source/smooth_quant.html index 5232d724af9..963af173983 100644 --- a/latest/docs/source/smooth_quant.html +++ b/latest/docs/source/smooth_quant.html @@ -726,7 +726,7 @@

ReferenceSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/transform.html b/latest/docs/source/transform.html index 1339a2861a6..c6e3fc39be5 100644 --- a/latest/docs/source/transform.html +++ b/latest/docs/source/transform.html @@ -639,7 +639,7 @@

ONNXRT< Built with Sphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/tuning_strategies.html b/latest/docs/source/tuning_strategies.html index 2d8be716c48..fa236db21ee 100644 --- a/latest/docs/source/tuning_strategies.html +++ b/latest/docs/source/tuning_strategies.html @@ -583,7 +583,7 @@

Customize a New Tuning StrategySphinx using a theme provided by Read the Docs. - +

diff --git a/latest/docs/source/validated_model_list.html b/latest/docs/source/validated_model_list.html index 9213d47ad28..8948fb5c6b9 100644 --- a/latest/docs/source/validated_model_list.html +++ b/latest/docs/source/validated_model_list.html @@ -2530,7 +2530,7 @@

Validated ONNX QDQ INT8 Models on Multiple Hardware through ONNX RuntimeSphinx using a theme provided by Read the Docs. - +

diff --git a/latest/genindex.html b/latest/genindex.html index 20955a94107..9f16a31f740 100644 --- a/latest/genindex.html +++ b/latest/genindex.html @@ -719,6 +719,8 @@

D

  • (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer)
  • +
  • detect_device() (in module neural_compressor.torch.utils.utility) +
  • detect_processor_type_based_on_hw() (in module neural_compressor.common.utils.utility)
  • detection_type (neural_compressor.metric.coco_tools.COCOWrapper attribute) @@ -739,14 +741,14 @@

    D

  • device_synchronize() (in module neural_compressor.torch.utils.environ)
  • + + -
  • get_ipex_version() (in module neural_compressor.torch.utils.environ) +
  • +
  • get_layer_names_in_block() (in module neural_compressor.torch.utils.utility)
  • get_layers() (in module neural_compressor.compression.pruner.utils)
  • @@ -1473,6 +1479,8 @@

    G

  • (in module neural_compressor.torch.algorithms.smooth_quant.utility)
  • (in module neural_compressor.torch.algorithms.weight_only.utility) +
  • +
  • (in module neural_compressor.torch.utils.utility)
  • @@ -1486,6 +1494,8 @@

    G

  • get_mse_order_per_fp32() (in module neural_compressor.adaptor.torch_utils.util)
  • get_mse_order_per_int8() (in module neural_compressor.adaptor.torch_utils.util) +
  • +
  • get_multimodal_block_names() (in module neural_compressor.torch.utils.utility)
  • get_named_children() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils) @@ -1888,6 +1898,8 @@

    I

  • is_model_quantized() (in module neural_compressor.adaptor.mxnet_utils.util)
  • is_onnx_domain() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils) +
  • +
  • is_optimum_habana_available() (in module neural_compressor.torch.utils.utility)
  • is_package_available() (in module neural_compressor.torch.utils.environ)
  • @@ -7469,10 +7481,10 @@

    R

  • register_pruning() (in module neural_compressor.compression.pruner.pruning)
  • - - + - +
    • TensorflowSavedModelModel (class in neural_compressor.model.tensorflow_model)
    • +
    • validate_modules() (in module neural_compressor.torch.utils.utility) +
    • ValueInfo (class in neural_compressor.adaptor.ox_utils.util)
    • values_from_const() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn) @@ -8407,7 +8427,7 @@

      X

      Built with Sphinx using a theme provided by Read the Docs. - +

      diff --git a/latest/objects.inv b/latest/objects.inv index fd851c8350032259eac23b892916205569621106..f45a4a3de252ceac399b69b88e8a50b865010542 100644 GIT binary patch delta 29381 zcmV)dK&QXF?g5nV0g#V>II^hwet$);b?@tDoy_jjtJZmQ+UYFcwlh6dW$*jK1(A@% zHYbyyEY+{S7&w!X2p~X0wI8ZdqJ@YrA}|v}jEf9~u9rk;L)NfBj#B-#N%fDWU%YM*EWx0UAS%!eDis zgdz5b@U<7BJpbD#O&S%98${4@1eFiIpd2qo!CY`haCV;{A7_hV2!x**N*BZUjv~B# z-u#ii9@6Eq#4D3+0VIFsMthg~BWL=TWJ#h`5M$RvDOv?#KmwdE%9CM4u zZ$i8zjvwSC3swbT9%7k?@DjDCizq>nVK<~`OJb~xe9_Qs=+|{dYx$2Pdvjy@VM&@G z7xjw!UZ9u+e`0@Gnt*l$&}!Mj6QMWkmOa#^F=B7|dY^;jhiHG5gzhR#ytgG`VjpE0 zI=CcpeY^|2MMIRLEXOV>SVNS=>tM4eOol0sXoJGKFTFI);<>j2!kx1uO;ydZ^0d5f z-V2-^s;Zw=yKYR87VRZuMeMGV>;q+UwnWKZX;5_2YqlG94u{B6yJec7P8zLU0>sWA}JW3L>b?XtU%EST; zRFYzzRJj$X1*q3KwFn1M^LJVeLxa=Ef+cgV>W@FlKP!Lc?sKdn^qHjk!L3C>1h*FEeBqyNf{b>ufBg~=Fb9tEYJ*yfS2n^PylxVYAoDxI}p zPd0y&?O~e7QV>igFaBE@HwERY)K06 z_L&NS2qYEbfO)3Jqy+P16-|-?tfGKLdbf-zMZSJ+ra~ZcGZo`JA|EUZ^{|w9o~#0* z%}@|;00Jfifdoy6azLBh5k6(GC+o;E>=%DxRlHslg&c?=3Q5j0|3@s)1M}FBQV-8A zusrw^B$7-HOeC2k=joaM3+SsppemVtV0jt_0IzSk7>E#ZLC(_)`v=TV13E=4+o%e# z6)FYb0+mXZ^ShztDFwvBg)XHHY}MWy<##)^0QD%=BAll-je<)?VJ1?PYp%HnRcC*+ zWvKh3Eyelq{QAPbKUH7~emTaa0>t*}@=vah#lJ~G(t@autsv>dzRY6fZZec^?O^J9 zOKGVjmIMT0P1Cun z!;%DSwK{XcYAx2SFO0D$1?vH`GXdNr3pPPalPnjQAYS9l&9^AU3)LJ!?tXu9NrvJ) zO>%{!azG$>x8D~X?6~Y+Ekv<2ga!3@w-!%n2KBZmjup=JFo0u4qf#TqDL=Yr8H$W; zS;cNZ#{=#NottA)%-=Wvz`=iJOWY(552{lZEfMN77cC(UO4IP*qK|ji3j+<890AiJ z;&qx8NED{3;B4Sw{k2t?tlZoppz^ylLEEI{-4+h3a*sIqkgqOvThBFncyN3v^0g-~ z0BOpT<#-<=w}I&0f49g#Vsu~+Pe$UUN}F&39m#)4Ht$VB=|;UtTAqKWeM>)Hi9gEC z#EwHqYG)sx*m;8s>2~j4zy5nZ^VdoT3RcPFgIy+c_`AUB&~oWl-L?Ln7%(2^8~c9U z#*+xd1UfY=K2lB!B2LQ&a#~j?mAcle@gc}Qv5MO@<<(_{t9FCzyK&D{y zcF1s8n2JOt-&`;%4Rt)&gJ=4{bV$ZbgRv<_F)Mr-iv1)q6q$|=W6|kosUq|p#jnY8 zZ5(`Pj@kxeR2m`=Z7a(aKxju8#u@fy+L+=yq_UBlqwbH*-BgNi6R80xrnWW$G|E#0|`|ARep z!=9P#r5!a&&lX_e^*Q4T&DtH{SxfJbFaiSv~|yrB;b1?8M+dN!>w@wV0bA-xhr-!hpTXE$XNrr(&6LO zmG9ir@AXL|-`UkgrXq9PFaV2q*Ft_P)+FdkwwYK|(#0$^3&(Z&#AG1GY|=EQ$Z^$( zx$rH2Up#y;dJ6bSU1h_675nCz|5KI$nB*m45ykFDKtIbLi8cwor@~BVdP;bBuKBZ{ z^btQB%v^tXPFM(HTD%hrO(R6MW+K$qU@Aau9c6st9ebVjKp&V5&oT1=?D8>7$rh`- ztJp+vItt9hrlW#~=B9q~<-5ty95oKys?{N-h7#UaUcZ=>{k;-v9;u z2Iy|C1}HEZnYkMv!`3X(02wxE-Uet6W9Dvv4BNCw17zN&nHwN*gEsI>3TnA#nj0pW zw=&<%Egcv&0f3!f!D3`;xhvg_=NtLl|Mi76CWA0;6Ra^av>mgVF-xm4hcz?Rt;S4Y zo3VfRw3Wi`c9Y0E3d|JvjtXApACb={{n+H+=Npsd{-|}3X06k|e>NYVT8rTz)q2{I zx_$in=NY3lYAb+|Rll)5UBpDh21?8XY@mk6>$Uo3_$B&TL-i~yx5rQ=rWU4*XaJAX zh>mh6QEnZ@(-!VLW>xEf;1~WLGeeKRJQ#}h6oNR2H@trw zLbo8JI=~|_s>ONcPhZQ2CKt(_wNlqSKJ+)XcyS=#K!AE0XmHq*dI_Vkz?`s5&!o=T zVfBOzU{r<*z{4_BAughL;ZN?^14Ds1ZJef6Bnwg(zZby{g}CfsKv!(zO5h_js>XR? zkH6MWP1bGa4TZ3_)<~>_;(aqjh2o!e+4^+h5QQUMJCKy zXMBXUu8$XnBpnDh%E=T@M*v5|Ry}`g;`SYXTs_{+5xrSrcE;Mjww?bIVtIkqMVKHrNx>&? zIQc+0Y@IR??>x{a>>9o6Es8fmym`QpnE~ZV%QQ?%lbpC)`aiKX1L%zn=(@o!D|ap2 z`oQDNy0E#o#SsE`tq^}}z_~&&ckhq%rG|%e@sJ$|n(aZ35dB)DX_67%G05pN+Fa#j z%L207XC_mctYb4ZS%2n2wJu*7L+uEL&FLM{&I^8*zo%Ud9gLYB{>AdR^3~Bx^Jloc zSC}pFLE**m=X@bK&N79Ywiae*sSAS3;~Locer)Y%k+c7k_2qwABMy&l?u;rwvZ7IS zYcI(lejht;xGs&HHCkM**ka4pi21Y;&cndNZnAb^4?3CAsJvM5AC`NEFELj!Gg?K2 zZ|p{dWPb%a#LmONfHh^srF6Htm6lv&i4Wp6&Ro$`<~hqJhfA~~AOQ;bHSlaN0zZ-o zUt9?ZayJav%~pT3JulJ_Um!pK<|h#f;!C6F*Aw&a;&df9DG2B(OS9W?ku_3;Qa3K5 z^icI_FSANwX7I0U&??NiP%Bz3-9<(s!}Y!1-R|JUmFyht_^S|-tcr^pyRyki>t}D? z{db^gJt_%o9*@eZju2pXu%|Th;w%OQ?F%=|?tEiAdJuoX{EZP?qy2RA^w`*?KR0>a zEOmP#oNZ`NnRd|vk6#uAn{CF#gT`Y_f%Cu~-qg=%Jb)K7OfgN}Cce#;hx%ZkfQ(27 zjmU_6hi?$3VYp4QJA(KBT^0`mQ8{Cslv(Vvp5xhPUFWjTVzb;;jKYJixB5tmEY-%n z-*A{NLsWm?pscmRpesc^R$V#HJ0d_tR1zeW(`B+7a)m_&{z&QMb4FK@lxh_0d;ZE{4Rs0s;M zh3$~R=drW)!Wr8Au5&ry1cQdkj0SP2u5MoV+2>cnSl|X`lC8#!)Hz8+-YUIgM%1k9 z#t^n!CTXrYJL4H+7|$BPxF5gra5(n%ROeJhUBdw^{$R(?nrQ(`(Pko53a z5F>wPnBzB8HNe@RK55(M)0v+gQi*M|Y&s6E9-@RTsitW8&R(`t;f0`#@bmN8p=os` z^T*$TGQ@Fn0PLW>O%IXXXA{1!WSfaVC4Cr&YyOu%J{$1=m`O0U>B zVAluG)x9_#!i4LEWHas;vWFpfsI39^`_LEwE7BwhgLq@Y`K4$x-Y0;r-p`-;oLoxU61Uzy8*{_r_FzW z=VwLr!ti55Std4?VdfhJo-zr<7U+@SxO7vIm~Aw8-Yh74cE%#HGZ+Qaqr%h1fLO6k zGTQSdn94{CGePL6FcpoCl5V@k*4eSY7zxTz<3Ox}9m0&sHWiOqhB4<10mC^IVD zR1{_!&YZPL$r$$2C;{pxmBOGLU5bC}Ry116KhNS$;OV1B57`;0s}GH@?)Qy6zi-LJ|VSSQ)a~ z9;JO4>OhcsXa}O4*Lri_)6LUs!p`HOA9T-jJc&G&wK~&KZ^3*|jE3*Jf}DRryY|G=kPJ| zMwI*b!eq12=wS~FVOuJk%e!*Ro1X6*b^t#gM^|LdFnzGWtz(A8EDn^fkI9Gg`Y{d8 zv-m@OTJUvk7(gJV*fr50osEBUdRylB1~qnb`@s$3kl(JWzOGtvV;X1hk}yOByYwmj zp`R~B5$7Z;JW}su5GP@Y%FV~?Ww87p=L_FyhVzqX8HRPRcQd@^M_I5#&K7pbrrbpC zt}!ine&CU%IEagL5=4mpt?4N)1G0Ebh&Vr|JU&Mpx4{m6$d?16^&x)_ODxSpadc72 z!@^bv&BHK&ICk{+t=g?R8haV04;*Z^MDU1U3Cd-N*zxmC?xpn}J6d@m?*w!}^Omv$ zOtT1)#TeKDG=5o{7DXYf-e+g-(O9Ry( z+wMn@m-C$(xx6n*E35Z~IaETh4rre%4ZODyLvr>+8|8K$`9EK)dpGdj!>Ll}Wz2x) z58}<`TJU?EUYrf8=aiiyRX?L_3aP`7BKS%F@Klq>6)Zo&>~^{m7APUB z^*V^LST4%Dinpz^Ce^x(M{!j-ABymvLZZwfrq{1elY15*77xt!FJa=nm4~t4v@B3_ z=P3iGljnz%u@)8q$CJnwCM**)vKUku{b71o62e2FSMX(uVsAzi#OxyOg^}93pccc} zV(-oM)*D`q-c^$t7b60F(33wGDPa4hS$v$5?cxvtE!fBs7#!k=EqyAel*|&OXoY=# z_d0DBNU$lAV)0Cj_b5t3ES8!s4YNjJxI(m0C7Hi`WwQ-@AIEM^Xl1x~oXXkC_Nwy5 zk?0tK2g+sBsq6IbpY4;#7bbsa#ha%AF>*cz_bup}M(oHkenmac$*p?@7b8k`RZZyQ zUEnS1o)pop@#1ADhk0eFOA(ZPi8an}ylh;`7YzBkI5=VslyH+|0og`Vr>d&oPoJBr zQfTM`(dt~?fBW%#_xjtz%fwmV)A}V8w+&xH?~Av^%vssY0QDy~!<&B~Cz-?qpdP~- z<#e#CcF0DpZ%l|n4jdEX+|q|ihiFWuJBQUis7Bf~?ZgT$X(w+Vz9Z{{V;JZ3mos@9 zd1D|!!3zT!&QJbCJx`pZ8pm1DMsFLT>}{e8>fcW`xK7c0az7}po1qWai>AN!A60!$ zCv8YWH;J)fE#jiPpgey<->kBAK+Gzrr&A>-3Kf+R7twx8z}}-488)wNm9Pmm-z8hN z12Z?Tkd{ZTPRjBEPWgr5(zKn(iIt6t1Po?!nxx5kOH`C(I5kfr&o z3KNse;=AjDjmuR9bLd=&>K$abJa>b!N zSyF7aGV83ADMT~m^1-uQ)6z~U+q$&VI*;uC_p}g(QVsYq{VIx`haWdz@7?>ae|)`{ z?k0nR{CIz~gyQwTAHOb!BD!^tRi-qXCuB}ODD}vlZP3;uj<-fJQPNl^*3|7hl!Q1sGw`jF{6FIK)I^pM|n={*f)l^U9rF5*_X;?Jsp&nPzyXS-hrhb4+R( zPGiXEzhP|>+l);gTgTzJI88jZjxd%QYMRDc;hZ!_M%jgxSIeiY!NTGy6aE^?J1m6u zn28rr3uf{1qBHKDs}Gdn*9-;AK>4jeDO7)c?c_@JUt56Vu><8Z`YG2%X(T~XIoBOP zxd2VzQwO_aJ^Bdy0V0^6)SCY*PCi2HZ*aRP-Y2p~a<#WfplshV)HJgQjb?zOL8tXEznCkiv->vhYCE(H30{bT^;c z&d4*wMS#*gU@xcGBpLq5gM*a(C zhasxG$dR+aAiC3>aIW0xu)vCS$bKj;(+IrgTDeQMNn=&AJx2V60`hu$!R7q*_ODUW zsU3g8vQDk~E0lI>#zh9@{|(3&ln35LwdZ~4ODo?4{);Zx18Xj;JP&{HbN+!1b=i)} zkmMI^%3mTs5tQ-syXvbcFTxCQBl>6Y&qY+<#SFS@hWH|DbH&_4Y|hJY^?uRwF4SEI8~i!+jB zFX3GnJk0_+SZ3>O*v&Pz`n$Y|5o`rbOCHdB=$6E?AAH^X9-XhTaqzL3iB<~PQ8axfr&)leTmVes1@R7rfe$eeW)w4bH(|0up|yl>7^iN8 z@;BRE8KJ$Kvt^P2Ik*|yR>`laO;US_gZR>jd6i~w9)NgtXOgd+(XN7S73aTb7bmx| z)#-!T*z^n-EpC5*hOAaLLIX^zTcQE&Wi&@a?2BlVhS(N1Oan+8Tc-h}%}vzffv_Wm zY;2^4Xf`%eBa{|5RAaPOHdP~_RyS57yqD2jje%c8gEa&86X?;7bKAuOu6}1a|)J90hxiNa-1?}J^Xg1-^qXIap?j%4>0)~8tc0ySaA?X z0J({Y0m)8GP*rJ_?)rWgKj`PHJ|P$yR1Px-_}v=7JxE^L17)>+B*t`=ul5kxY_IR5 zK9#d#hsC*h@Fxa}Bm`tHqgms;0hc}Q%P{S8;p+4hot}i6diIabn`9wCtMCaCy6=%o z5bf8HnIAF6R_+Dsjx6nD#j)?w;YDgEotUn%7?4fU5JiZ;M%oGh6Yr&sa3aMo>m9t_6GZXV$fM&3h`F zdNVyL`?@p(K->E@eP9baH+@7qdpLbc+qyYJKKp=+F>ZR;A15!&B5 z8lkqXdo*OUuY)wgXmJ;5NNZy!X^hgcZqf*%jUA;SrtMv&X7yFwQBq#&1PB&*Ag>Ao z59ohgVc^xB;J}0HQ+D})z?{xyF%bF83?^{nVg{0y54!=u8x#=CrXsYixD3%OEifaX zHWrx?Oxp_02%>exhA&uRV=aJ&ua*VU`qk2EKUNo+LY3@MUU*RGay}O}XsMqI8mtbO z3mCXYfWhXw5WsroIwX2q5J~{u9S98zi{gJai64rP1Y3;!D|R>{^9@j)q`b<0GNQwt zxp1>k-qN{!h+}_Wo*!YfBj=BR*_8i};w@haUijq2x5rl-%0Oqu zh31}c9UjV|u`!&b*6C}q^t@;AjMc-@hmh0$ElY~@o~lh22XIiBiurZrasq}zzq>Sh zM)`s`?6pTZu2d}=AOL)db2*B-s#wYwIojd5&#b5i<{GtABSWW-YNR;gii>|>hhN&$ z21<3tIGYO8j~M6MgxG{sHn27$LF~DJqvMN1+ul+wYH&z(tz!chS-a)LuEy))6Pm+{RkZp7Q|X^NW!u>+U2%Blx(vEoaQtb(z^IV2IQaqJ;k+AUe)(dRCTeb<(%xMH%0 zq`afh9-sq?LJM%{Ls|*ONzC)JkJqRO$&x@t>LbG5jASTUsHys(MM-~S@WqqsbgQ@k z-Oin$(C*^3Se!ML1JH*!LUs(PDj}2Hzn2?%NHDb+X!WE z*R@69I&SFLBnU&+k%>*JNyts$5hJp{?$abA2H}5u{9#b~UCiDjU_YRB*gY>P|B923 z5c?b44$gg&=LRYGo=JZ*k{7V%;osk}nauVO?6LotVK2zdmYDBXF9xOm`2PLh#soi! zzW?W^n!a04|$dd;QAZ)CckAsX(#G+U4=^%2r<*kzr?I@vDvWB$J|L4I|XjoUns(b*F7OHT|K4`1nYL%-hF zc;{U%LpKpi-$Q>8&T08Y+b2NoDBqTJ{WEDq4@@LxoB*=&P?AQ#2C|$R@kl>@Co$Fe zsz>a-6Rof`!U+Mm#)M`t=^$Dm!dPXSIOOX7H+K{#t5NZnflQG8REPFob!6i3bb~iKFg{HSw977Q|5xb7|;i{pYQ)m5-Yr$ zW0K1j8J15*zGSde&I*?;c#esZ+O_}y20A843v!DQDasYM5-~UlBu&!o#^qDrvKgXz zWUH!fut)PMLt2)lJTlJ`*9_}Ndx-jV%hF_C67Se|yyuf0C@52ZT>to<9#pcLmpAKh+}&P1fBIi4hU?i}Xb-`>P5T5QAlgua^2{V)~*C_W%ZmW~ysWSD1Q<2$=4zammyeZD8e5}_U5+Vf${aivoUON?5xLGpA2!vax7+BBrxRijzk)}&y1+1N~7Wt{> z!kicO__chh5zzCjCm`2BNTwfq$`zMML`0z zN2z%C(v-m&Rex8n9G2Anbia)D@9 zUDH>opQ!>21OYNRUjlK$N7a9NZPQN78Ixg^DS6}cvJKY6DlA@;;5_&fUuQ{bw+ny7 z@%}P`-CA{4f!fYzrz6WFW3bQHoLhw^O3lxJY_IYZdw+D;Rvrdx4Pnz%UP=jXhotfbg-TV*u(Z9Ra-Gu2X~ivXeHO% zH_Jp4w||HO)`Z(Y#otX@=D~?08z%kF}1+1{!f=haD&p>ls^tLZc(b zKbGW~hQ|_`*W^o5#C{NOz9jnvSycCxLvx5NEfH3QsYbFTRvhR%{e(iBT=d$L(kmhi zBl3Oh+tt}>O`-phk}M{F&nWhj$etl_i^m$8-;yl&Gl>ZbZ^Iy697j}zKJ!tx(`pl0 znboaivni*yv8;jm17{@gme20u{Vl)E=`q8EUH4*zlLVI~*{FoE1BK>gd+H>PV^c+B z(KMnx{pn^QjK&fe5R#Yo0{LIXnRE+cyxvwv3R*NH|9Z>4^T(%Rimkj=%pNdnfS#|u zzuf+Ket20DqB$(VqA9A@iZZ)8)_pKbIzasucs5j$y#;Jb7q%fyZV%4F(;p3uC6f)W z$yJu2!=eP=Z#aV$nb(ReYe7bZJF*y>Y=eor4r2Oqg7=dLEjtM1{_F44s)8evN-Z3J z?_y!?Gv466VPCdYIdMWe`I~^aDa-P-5X@|=BlOl7^8@6EXqANXC}#hYUmvWh&Mim~ z%GUurnMy^#6FTf~)PZhQ0m+&;P=8V{-!jiDhN^L!ZfrNLti?PvNs=&E(8|-PYvf1X zf8C*w_0rR++ZIj&$Y@?vYx{(Isp1NMNPr3EdDed07f@ccp0p)LYAbrc&d=|+HvYm|w(-K;+^9*^9SFW1qrj@NKx~c20vNj)w zkkYrYE+7f-1d_HIH)uI`5CN0(>Fd?uvzC%HVh{v?}!@7AA` z^IBKgIW4Nel+ywlG&$9AwOGuhqskA7lPBf1sfI;Y z4e1bHy1mxa%m$K+D9-EGk~3&!yXqe5jAS^ftTPr}J&FTB4ePr7G=q|FIL7n`uSxo^ zd9^ujGv$xA&bWx^53`K+5f{>bK5NmqPo|znpsmK4-7qIFqM6h^Tz#FvjZ_Pt!$27p zlLPZvH-96=l+N8gJ$!wAzPo;zBa}i@`8A)*zL#0D>B~ad^ks=``m#Va<@ZZ9ImdK7 zTk7V{uyb|W#1^k__rHIgP1)V+d?D;fs{<*pBtu@YUZ@Fn`rp{G=sck;zb`{R- z+vj=OFs8lHakBKo0uWne^qd$4!uXOAyhocdm(oo4!biFPVy9ZX-Zc~svKMQjhuR8!b* zr<=fj?W|CaU3n!Y=D*TBA^V?Dd`jaR+taCFVk@1bjjfap-%%=UGmUB3N(oP-MeM%`!kh^Q- zkwi2BlkIFLlr*%Rlx?94gNRz2-!>Fe4L$~ON_!H=dxf|*$t10967p#d>7b4)KBbY= zok0aV`-vs3?I&;bOqFW)Q#gto*X(B2Gzns21I_*YgAEuqFg#to+JNzbcG%}2%h7ms z9(8m2!?N=#(bj=~k>H#J5uz3B(je_2f>4#Knb{CFmIB$KFkp9oyD0YEAYS7P z#~!Ai(t@~2LCRZ#7DW@KIhMJZK@)||t*P`=n{={W)F!2WbNBdFzDIsU^;DhZe4<)+ z7)15$QZ}1ao2>rB_KNKA#Eew`esTt?Q%Ej69@TVC#ch(1G^1TWgF1LXUdkTMt5GO_lK&z*&2>z_ICwden&_GWGm(PNB*A#3B}(5rBV7Ex%#1+z>kO&_qxc8H zb>%-Go-6-hj(eeoX@8OzDRTlyD>8fg-`GP7QPi5s%%mce6$ZKr~i&nf6g6rCR0`k`-z$+=ZgOCwTSV5OUu6VHQrIp6I8lUu*bgiQtpn*z6>?v z`%<_w9@w+T>XD{vPH~(&>ISRW%ggM#;4ROBiyXi~~Mph@d|tv_lQKhAOnVv9cBU2zbUjNN52d85mO z&Tm&gpWHt_YnVHNa7K5zRa(YQnnd14nur~_pC6BUeBjt=1~6`#j5>h^^SB3{XxUbG3_d9A25?|G%sUIn0RluC=6G~ zdwb=7dubpg^58i}6*ybD?voFkeF;cz4-#L|_cO?`ZNB=Ez8+F+Cz*{@FHw?^?Gu}D zvIj~y=||>UL}QP4e&S%WB|qZuU`yDl;ktSw!zAc7F-%4otUHqdH5maTlN&WXf7h?_ zl*`_bYZ4Iqe?SguJ}DEcI-^YPTr-=W%X4s)oG=0+MZlYH?BC!uDnfD-cw`X_-bS*? z^zvS`WdL5wJ-oAPUll>?kCuO{2g#cu${u#ASCP$!%-~24UN)k&jZg zVt>Zq&~929(v%Jr>-vdTU9vf`>a3Fg z(n~(M@;s*B`6~Y9wdk~RJB399!ePhzeVTsG-yArf?ZWxri{jLZmkm-?Tt7`Ny9OaX z+W_(RB_THa9uL~cEE*3{2f9996&p9C)qKctSUqnHrCOaexhk&`NL1cce-a0Un{EfKZl75S z@ukQ%c0%fOK9A8s^-qtHw)QDQm!@%aSZ0~Yb_YYRx48LYG=#RcAtGs2q6TB#d%^y@bisyrZ|J7f5cM0DrdJ?2Qm58 zZQM0+>c$M6PoHDWpm7=6G2>F4r<)2$Jz@hM6_9?sYDZ~>OB*dE+m9f}E=XzXivx&Z zS0D>#`2?aUp5oO69fo`8Y0bz(vGaf21~}8q1p|57*&t1lP&;!9AwVq7)75VC$UbUj z!6B6z&I8(8gwxOce`vR1f6>nLhVUY^w*oK6Daz0)1gehv;$=;xy+>XafmRpj^(6RJ^rR9M(^w)WCb;gFx}d#ZE%L zyeJIa>Lxnumq&}_>V?8AtuNN;x$ObA>~SdI$aDIe^t*xxHu-0k7YVW(~|~G zSH6TPqgVsW25~B~CTly#*~Us$lOo2t;V#AK$GH^L0n>BU2$~{ORq2U>PP+rd0y@=} z7O0IKZN6Fy2Gp7_$a)p7Q8hu;X`0?O8YJUzFzqHNX8A?8_Nv5a=l@EO6Jy#TLTMW6 zSM_6Xid5AHf5|2Cvy+MKTX_&~#K;IDtLeka?6E&aA-CoJBuRN=nAd*4r%l$AX@=DK z#=fh+n3UsI2hQ*Z?PGy@wh{^I*h;9gT7UYd30NlA95Mf26+2`2Z6!0=;(WRN^v~oMb$J)qL6h z?lQ>iB>7xDRaDwe4p)JUIi$pA5EGwu5k$~ow7=~^8MH3OTbnq`X)2xbPpIX zJw%73Aaev(XqqRWLRS^Yd;V?C5DslMVI)RxhYW|sEYYi!%@n{&T^&RA;F&&{HH>3M z8?ZJ;fBcLHspImr4C^SIC!!sdRaC#D_%(U1%{Cm)qgER*Hw}@8w@K*ZjOpa(vCR?L zF}?EycuaE@=f~!6ugP`g3&Y_)=L$d{_yODH$Y0U%;|cWNHl4I^M4j0fk0^9*#Ix(J z$%bNj#$rfQizqtG(c$eVG913PD(5a{cc9*|e`hoHZjYJ^XeZvMv~Kr(7#8`-03om=)7ac18w zef}&S&k^cLu7LCbZLHlDVoKvAGujjGsWKeft}^GAe*eP0ey88QCqAB~;GQrU+E&TK ze~Z=4kv`{G-zM>Pg@y3Cd@@_`#;k=jv&eDPLc4~0KYd+>1K88%+|mbS=jST+XX*l- zvKiV~FA0k%cKKF>s0fXRvZrr`aP~CvkY4j=?3m<-BAhLxCv1l|I^K!5_fdJEsNtx# z)y)#rw!$)|@$M&m@<1QV7}8^gLtE@)eEL!18xLql+YIsS=;PtMsh@oLK3h1C znr+tJ)gh%ewFfO)OKhyQlPzQ8eG``s3+bmKCVyRHo*gBK~sZS8`NOt1~uq6sCSDrsKI>ET(m)*)6T^kf7CgR zU9dr&-ONQB)H$tPzCoSY+9exQv!-qgFEOf>%+P!?HN4gQHq%m~QRASk5@&lDe`V5D zHAA!-Wjtp0>??a^GF#xrtu<)o+->192F~9K&Ti)tEy6rTrpOmFqil}Uczx5+Hba%u z(Z_3@N942VXK|FuY&FkOyNwyVe@_4YdFGhbD(4Jrt+XB8+sD6uUMj+)UVt{g>WPyK z#X0LW8jtco;|xI_=;Seet-j5?Tz=E$@vo8N*tU`RLWEc-+{xx#BizB%pOcob$!zSr8L9F7HA$x96{?f6Q#;2I`zU z{^XH8cnIPLwEtdOY$$>!4Fui+Fs+ zp3b$?YJw90t*43W8r)=P+j`8#v#rT_%&OK98QzX@%s@cPGR*S^?K%bgE?rjmjkbuR z3e84uRHgIGpT3q4XImb8f7X6b!}%1@JWIU?%1j5fr;Y}cJ*gLb&lAiOHUk=)r(r-` zI?BoTYlzhBR`Q`Xv!NX7b6(h^7yjgqJ(weyr_F}6mt;Ze;`buhp%9lnE~Az1xJJWK z9o6Z)u*YBPr?b_&=Uo79dl47xwc$+BZEKq&vTc1Ls=xCmGe`88f7y`segv$3pJmdg zugh>Gd)k~A{$Q5qoiG%_LjHAy49&pS(hbF@mMfKhV*>|n>un>lul z{A$MYOtQ1i_&|A$-mvP$lM`(iXqwY1=axUYr4RUVvOiLj1^>Lgz=ra7!=@w0m6#72 zS5wC85B8bAY+2J0e~0l{4J-sRt=<7dJKRO=vjcgXgX7Ev?xb3ip**R$-h}mB{joW- zZ;oJ3_KFF2%o_BWLp>7Uh2m;Fnh_#V5iU@iPbfAW*RWi;!O~59&j|n`0%97P{yW7PTVd1pV<3l;_88}RmT(>4CpbH&Le&H8~u-; zs$<~t0(r|pO|FW*&vKEMW3w^sYcn0tzCIpJ!>jYASqo)m?x7I7X_lmDgG?Rp$xRw) zG8nvpF6V_ke|gN3$MV4pZRAOV0W5v@-%SLIiZv4%D!5vDM?bnLBJ^o&&V;_EIjwTO zvDYq$VE#IREyaJjd3tOt2F`Ei4Fx*(3IG8e@WWe#7~LPK|(e`I$A@Bh0jhNdHV#%^_k*=N9D zvCp6ZVV}io)TxB{cimw10-JX}J}3?UPWhGBcC z?KByyp_$P3rAZP7@y2+-Nf617QSPEof^+L-vI238?Fc+-07N?rD zc)EG|R*eV^1n8J$5GHlL!mSwZTQO8)C`3a=&Qtk}hX`s7xtxK9En}6%U&gR_jtm&j zll>MN^Pe&n-b$n6{&7{aL~og~|9OLrSh|}hdb_zIH|re0X*1#NTPGPE3=vEPro{|_ zf9vR*C4wD|-4>j^IAVV>AIPI-!`o|c@HwVvmKe@I06k|ayp1LYp`)5+iQEzc(X-a- zo5|8eiHZJYDcMMHO4;2?5bo{hSwmDTAk}&Lrm-1AAF3A)lj&2gh3tjREau)(br&0?5oBtFNK8H&^~b!|PY$s-lvK+Uae`+UYhLPXQM%c*LwE5OM^Y|H_wzBq5qqL8cZ+log zhw_y2Lkyu%VKgCpz_UK(SJqq$P-pNFL8|ImT0PAUo94NpAMO+6E6nVUbI1V@Z~KBG zc9+Eh>TilDIxq+1m@KSb;l;i_z#U6=i)joNasM6bY^*}OJi&5V@P#&Lf3z_HZ8DP0 z3sATm10CR@B4lSp@M#K_mOu-l?sTYBgFLZ%J;;+g4;6ue=LY~%nqTY3-@;^t!s{en z2b(2O+z)oC5YBR~NYqj^LsHA(jg)qLMzPd*{za>d+$NwV3)$wXhU|Ka=`cNEw<}r6 zcYpt2Czzd7FKn>L(QM-L>P*I`FMrB6&T35=G#k!I5Y;m)R-vu4B>H|X2O)s z?%Tyh)^?&*bM_WSlcQOO^~M@>`>;YfDA%2pGeg#%%(l*Lc^E|YQIYK6vVgMt$9R6T z2W^MKz-KQcQOqu3TH_4I9`3wYvk+F)CDpa03&AO`j?i8Z(VfPovIz zLaf*&Q=YzUSki(WV!D|A*V`9b1ANCMzeOJHZygpRp}$!LF?P2>P7<15BlVoB594E; zx*JRuWe9RM+2&@nrb(9dMiHeh4pGY9&(MxJUJ{0NCQ%r=>-6uR-Rh0Nk}&0ae=G8B zK=7WnMLD4!!v2yZ8QxHb;D3z9YLhGplG9E^h>1@3MjwJV#G)&sv7iBrhPK~S?I#z% zyXEK4R`6_yZ9Ld6ppe+cGp~b^%$vMbh5dGVXOaEdVRcm&kZqJpDQS`^>vP!Jez*Rv zs=g})il*M!H1&1cbgYRy>-5+1N;r`b*|PiT=i&EpYH4Nu6VlYZ6@Q`wZRkFN>6=~p zM2!Rp3O*SEfWpLp@zhY$>Iexec^+y=unuTvHVwSD5F%vJz#)-^1)}Uw|{Hd>FPP?*s#{y2BDwf*gmo{`&-Awmi_w{`zk($y3v!7AEczrpG3p(<}#L;d2I!VVj%57N}=s783wrc00ldeV?e>2vr2pb9N`MZ(f zcu9rJUX}s%8Flro;&$@0?F_EY0)rM{ zEeoVaQA^WYYH$uvhm3u)iA-`${s|!I<@x8S952Ic&XzxP^AvkxV&1EFeQUc>Zt2-H z+ zs_XOIO2E)jHWBjnmlytp536;@)|0rkf!S?rRUwCpJ|L@Rz1~65uGz=PoGVIm6{cGR z5oVbBu(d-h3|1MEIE8&qy6W(84_gTL@+4jy=)LAKN%+cs)v9kSL@b@Bv#QP{-l_;f z-}S^cN}q4Se+ug}(Eut!U|f$T%IW{GrFiR&OA_~8w|i)mk)0LdGE%nf*=kh2Sy6P) zlGM!!BDtNQI8KVq){StKP|PI5HfPdxl6|1euU@p<2??<{)HGj;G8_|o`Km)o+0Nr6 z&0RX+DY87+;qYK5bbU(nht_Jn*$`c}2sh`a7urg@8LQ5-pv{txcY$XqshAh?vH17H z-F>`B5pl-O*cC^g zehCxrtz46HgOg}UEPwm7usaj5c?BP*WNR^A;1L&DkIB}sv&(!6Dl)Sjwm+35)^4mf z`Q;$P%zhb!7FT=O>(o486J5TD>+cQ2(tA)vHbAUIJMkqhtK|=iT4iwyvcjxNo6KFuYY<~(+o0NcfiOGR+6O-h8 zdwhKY@K2vg2L~{0P*^rHJzxE_)LUyRL!2m*!&FoTz%-AEF+msPZivy^E~qfsbh?kF z_t@J$(#(h0#Z1^DGM9aH3CdW@xni?4_GHHGJ4jPN#lW-#RFJa@NQ%5SD6Uc*IG^)K z{|8dr9iddjYkwvxAiO7}!8uRJbAEez5~o#2T`(sPt4H}Hu9&iwQGNgR5K3ng_ZS(A zu+|9|=MXj7L}Y(Mu^(c)2$PfqQPq!v2yQJ9F2c3Q`cwHHo3_VUvJWCEFT@+<9bj6z zqh=`bzc=TjL3p?n2j|~XU{wJDWzZ2!7`_Kgki;!3i+^)EXdmPno7CqA-egIUx_P-5 z=3=m8+O4@PN3%{?5N|GunXf3alej%608-Od0s-sVN+jzR0M20?#nt8nINV_#&=e7G z>(Xa@6MQ7s1)YhqBnfCUekWRsG@jzx~*KOU#3Q& zHE4f({D1u?fBb_!1skluGTwc^wx02!2rVVkJUQPS*z4DFbj(VO&MrxFh`-0IFNFPm z_IpGb68HC5g==jo$w&-QUsEwKbu|^_JimU2?CSK`6ri-1D7SF{Rw$3MIX{0q@uimc z^{a19$LTBt9!Jz$Lv5B#Zw0jpHiAK3e^4e}) z(x!a4w1=7<)ERn$0GXO?FK%YN|%mJ_l&;jIMYtFx^nV=vCF1nXTA zXDMWPW0Re9yu$VmF^Z}^mlkujw}#q@nGs8KVf$nuO9`zHF2U9{Zmsj%6#Fct1g*|W zDZ^1bPVOv_JPDr!55w{*28Ek$3^B3#!A}adZ%kx#3jh^1>U5MVZOs`WU^|rMw|}^t zL6K&tHoq@Uaw8gL{t|X^nA^u+7iVC5dl!fJ?P+<2@~YhBIav3*eb@G~I6qQ$Op?kr zIbe|I*YArnwC+7vo~1c#Z*i8_ugjNTelZ0*FmG{^s+&|D2uw%Q;xD;QZv=+R$7@uC z#I0LowR^P;lCjAy+n7c=mlna5^M4Gsaj`i;dzo|x2e4dxn$k9Bu^CF+nEI2Gm%(RO zW{dXLxs8qBZDY#q%l+kSo^4oc3eOg%kL2AYMI6MNWs%tWb=_B=jSQW6phk+y{(%WV zlS$e)KB{@7lAz;{N(Rd{JDh1%%ws(AAnPDTA)Eg8x;G~-OBG|@An`yxO%oU7*ZGapxrigoG`|2&!s z6MJygb=IOl*N?y3#q;~kl~q8$S>~XBTgJm5`22o}ek~wT+ZjP?uGdnT$E+#;D3GuWb4L7v_kIcv+I5Or;=C3(SVr#{@FgJfBwThre7^X z`iF(t=^qwmr+-+Oo&I58cKX%A?DVUJ+38mcv(v8&W~aL8=WL8%RuV6Xb3%t~Dr2xZ ztx|(ur*_z(6t{dozI-2Xl;ON`9Pffma`%8xSf*4mxlHSb9hq)^h=08;{VqS`K8iA( zfxpPr>}_fT1S#ITJDlb6lR`iQ{P}VSLGEsu56}(;Av>mG5rn`Jgdk$ufUv&^0joXH z1~2S~)`bB10{&1rh(xGUJ9ng`)hP$(N*mUXQb?#zKKM(3?ON<_TYy0OPDB6*jT2P> zv^VE`u-irQWdOz&Tz|8B0GFZRhq4HFu$#@fca|X$S#zoK*$@Qfk=={vU#wU}MMwe) zK!3e>2}!fWu4$Fi8r}Wv&pHJ_omkFF}6oy|=c?Z~pS(*_==QNbhsGlBa6}p*x zj-<#T=4LE!s570J=ImesJY>Dh@eD zgaNZD?jMv!)qi9Bc(<5NdYj+8cz55gpIVOv=|iv&Vk>dmtPhxM6W>bAXA)Z+^H;n` zfsv_o$UjR)Wt`W0Z)^OH&T*L z5s75IkYU!(OjiF9BXFtauGBm#0)(S_tZ?W0R5m>*nSY%t=7}&JH^{vJHqXTSqSio( z&8OudD7FAGhJNX@fh)o!3%uMGoYstxHAr-jwg8dTS(4?uJh}5D+j^Y$kl!lv`Q3k4 z(b-C_t>5vs*n5+l5S&6IiIK$d-Yp-n6S7O!%!xzRpBaTpM9QRU(I^wsf0^Oy^zWbD zibLM~5q}bCgdirr&X2$*+SCJ3r&f$Xo!;r4qbGN~>jWq{8dfRBv~ao+O4D*maXFG| z1jOFN@)Db*`Eo?v!{!zalvuR{P=Fy>bW5@{%UkIB!478|9D7)*Aa_zq7UE7?=U?~k zO^0FXDDx=7DcxduO0n6{!nsD0M)wm-+TBm?{C_B~HTNH?_I#@Cr%0p_%G0Q;A7Wpr zZvlqGFZW_^{8|s)BFzcyFGjOLZ8nql?^LQ-66x%~5;^y^y6M61Yn!0P$E#u^?Lh@a zILnf34n=D=DKdv5skJg1dgwoMsQExyJeMB%E|8omwqhiSz7u zPhzvm<(3|+EjsS5)3`<{DKh+nUxW6V+?Ln8Pc0dfS7`LKro~-ZB^%asgQZV&(9@TY zn{tt?VavYV|M+rs@4mcVy?&iR-97sAaDNw7Lq5aG9)8@+VbhQFYdra!2`x?hFwdNn zbJ2#Cp)f#fFGnWb;Z{`Wq0lQrzS=9Fsho*f)Hi%^@t!(%CZ$QV3S!)S={t*BzK|+e z{Rev_pHFE)BbrmydD$kJ*Xm=Ox*JSp)Rxbut-c09^k!yK%l6ao8plP1WciF**?;?G zHY*k;p7c0RM=uUh$|gcc5aAis5~-E$K>C<}y?i&1T9$)Dz6)rkY*d~%+y zTn?_zc;=JKEFaIOmTkzV1CM0w=85-)=W;buouqC!TWiV`=CgIo`9^1c`Rd%wccVFX z6(*u|w2E^wxxLw8KE0xO%h^FwsDE6HXI?R%Cg3d&AFQWuGqpIh%UiB9aW^QPM`xM^ z@yw-FsGPBhD<@#)QO5QM(H6$187IfspHV5!i%9GpoKLB!jaG@w)xX+GKa%W?4ko;r zvQNHEK4`g&XvH^Ut((*OHpE1Fo={aC$v*R=eCAQd`qQ+^iGrExobpJLPk%0cuZ}1A zJhSEvSxz%vN_9w<(>#~aBYl*~Q|`UAl2h-!)Xq=w{iXq#IzM=VLjgc-j#8STbkzCb|yuO+Yz>d6o&zY#vh{(Z zT1NDwOlGgsf5{-{QIe3Y*vG0xJrpz7V?6m23cu&k2__x)SFwe@RYToUYxi)z2Qvi8v`*wb{_o zq~(#LvUS}9?F4O8H4NI|7-xa^OPOahCZroMdOKt|ER1@hs|3s$l~_Q3#3*L@O{_{Z>5~yfP=4u%2J}rDA`fqq z(8v7je=2@*q)8VY*9YaPF%19?^8>am&u8*ThCJ0*!OCiM-7_K>!Yf4-9Za@IBMP)V z(H05d1H4aZsq}*=H`5h6uSeG-W0C(Aqto+?=9)WV@WRQCGC&C0A!GUlKonn3(BkPYCT@?8&=ZgdoU zRE@4!=dlC=bK} z_#;Yo*v;PyjQ_+gTC-QarrtxGRHf^ie(J+T1(;OhZh+EvN;&@VOfZugiLU5^aqLb!xv zLF(f7BG{o2mo>MM@^ea)wrfUZgSn@O3pQC`*fm2fFm7qfg>VTS2+<;Gv{OE=e?!|H zJ*hg*;9pk=c)Zq=s%&sAN5e!wyL61vvVi>h%R1wot~GkY>dT5%G^hrT$#e!Xf!si= z#{!%3Kub7;@mw6F-QbH_%>z^>n%u+1y6&rb7f7E{7|X@WXEPn}c%>8)9O zWXLn1{t$aIXUsom73%xxgbG-nf6e46WB!5i3bgs!cIp}wVS?C6phkMir!;5 zR842_*Gt^F2UD5is0tlde~bu~_k`*R4oAgES~T%AhgzKusuZ6LRJ{#bzuz+^YO+ha zi8U;EN931gB3^f4?*`8`x4^+cd3dP6$Zib+Cc9ZdANZBzpp(tl#1= z)trBsDjvjoArQ)XzVJIYW_?>ci1k7su=UmKp7?Is8DrW2edBcHe*?R!`(1knorXq8 zmko88b({852_2ON9VffULcxr9ZokFmy%t0H?1QZ>H&}yp-x|yQ=`i`bs3A4|XQ)-@)3xgR+}#-85)^TN9>ns=r;|^@(>SDAUOtHZ(+_e!1qf`fu6df))T0vvrVVB5 zgt0A6$LP*i_^$sk@yezT1p-7`6z zXN}OhIVuZ2J1CAO$k@$%x><0N?CEB9r8zxmI!23v+dS3oYfo38Z2p)?9UnlNt7Vy< z5qwosm~1w>e+}Xk(aOQ=3e&MT!}P%hw-%7A-?*x%MA-|+#LDBL(-ejKU#9_zF6 zWIWxMpleW+_`y1$1w9SCw-AFHI4WPqV5cWh*8y6Kf8VAH)-qXU(c38`@y9SxyW&ww zw3fY%FkOp5Dw$JKwd_=>l4$nFq1N(%)52s7>Li!6qg>4F`k2t+LZWTTxI7ta`Vu4! z$`8>h2?LLf06gvLWK5cjG2Qf%rYLveMrFugpXwUl4?IYCf_jBx zZyTZPO@^_T=onG`vlIHud0aLpqOeN+Sn6X$;`dJL_Y=`b5lc=}97{a~P5gcWno97- z)1Jec9!hzX2>|ES3ZJqu&P3k9JL;-B%>Ufn1RZ}fDY+tI`K)5_qa$2M>j&8T8Oe`VD_k6 zq0C-Qs-YN#hd;G5(^<)CG3v`wHn$v-f5V1tAXLrBmKZtvO7G;q$gaYeCQHJaDz$TF zZN(|YK>Vuciqz2-L6O&@R2)weD_TH@(GiphjVu)#SZY-x_HZ`rbxhdHTEPE-DZ&F$ zG8Tx$Dh0SvE4SKVnk+RNPOf)2r8rEyH#b808We<6YL{xNaW ztr!T9v1W}4?J4M)(2+5jyG$hm8|`BZ=J;`8lkxk>W$a+P3^Oo8j!2tq$soOqDd|D1 z0|m0>n7GNt3f!#fMcuZGm5FvIuEkF?*moyE(pLROFGVNrz$t*cJgt**Rjx&9So{~4st>Y5T z8MF>U!rBI2(TFe7MjVxK61+_1v{Frl)#Q{$l%rF{n#^+!L>Azzf8uir>kaFzilKfc zjWa>oNiQ|&ond+zQ#80GASH;{6?kcuc=WG2bOSL{-PuYgK~0i390z|&FL*sKXDW9) zr3^Sk7FNu;Q!=as)hXR4G?XU~tU@exnA#xOF{{N79rnX%6w;UGjHjKzbMAVaQZR45 zLbSr68=+KzY)rrzf4pvziHsyk?e}0||v-`yn!EXLu$nE1eA>a&OH_3U% z^(O_cD$yC1^{4at<&{s+d|s`x_URB4Sc1JvKYKwg*>a2|Jj&Ak=_wYrVpGhei;prm zqiK&4v7b22ug^kBlblz7R_7beP_F8ULE_V{X#M;wmm<+*f0l_9eDYY8rJwi=QS`bgV470;-qjnEv`_lk}%3%W?LLLZLe? z<1C#cQaR@ff8an7FCl>7N99d6q9sAspz{SOZ{z616-jeO`n_i?l6b zGO0);L)xly0WA7#-M-#cV4un!EgbFD0gS%YTA0P{FTRwO1~}{raK1O{^bdfdbun6i44g|!ec1) z0$KLcsVr_3?Ab}PDpP|K1WjV?C!NABy+?8^oJ&?OCq!*HfTWU?xFKQ6A-Lfr|2}5sGp=5kxgI?W9w1 zCdMU0v7barMD3(!iNuem5NTx@xx6EliModXD$$LJILp{QNN_r-I-msCbe!_cMukP# z69i3kVmGNgdn$Q|^nY0~lHBR(W229#BU#~*9DutmNRnJ6ozAXsJliP=6KT&M#YQmA zIyA{I^v^Cua`B#LCK=GvGLE5EshnP_ld#qJqdD0)mm@tY;5cTLq&XBhmu3{lKC@HR z)b4qcl`3V8Q^|3r-Sl=2Cdn|QZM8U6qo@FNYv2sgut6PP{(s&*34w|MIeYWo5d=ar zrYC=3`&4O?*0j>=nZg%@02|bjscx27s-kCH>nE@l_62L0+5BXht!jo$q@0GJlV{}e zhD06^g82nBd{D6a>}w&8NSX>PuEF7>44PH}->1(p1bs`8EId!D#GaBZzX1p?2;(y4 zE$T{9Fb=>L6%2F*QgIVg5C zA1>;aS0}BrB%(jbwc0^!CY;B%Inmsn=d-x2 zrw!a;wR~t-nRX7iJK!CV^GWtBhzV_yeLRCpMm2)wnSl;yw?)a0A0>B|z!BItKyja8qn}V9tgF}@Dyw_F&?m0TrrIo^bBdBqOuwLt_0P`9D%1&Q5=cS$s z7{Am)p*&&Ad@!5g3?;w*vd(x1ca7eEu-1lYJ#`>ifhlkX3QTy5-KBwNaa2naxR1DO zSohR#>1dy#h<%#KZB~FAcGv{nfG#|#&6rc?cjX)oH0QPmk*Ell%8h5W1H1EtK7+1Z z!lCoH(TMYmdI{DYpms-?Alhx6p*Ma?9b8r!C;;^qyK&In(*N=0Y3BA1%4mgu^W8B$ z@Gh*1u+Ku}<#-1FjjIH9USA2Q=lHQp?BM1UZIIEu0H@T!d2FBn%3JULyD`_)vO)Yn zrNJ7_-!IXRZi)zf8XNY*xH>4;I70=H-imOx!LDDVVc_x2D5mqxafM(W8&d)4Gd?fP z19F%piyhSm=CKhCnhuk_6IM=trjFrf?vOnV0%noA8OrSSZg=hnI8(;tss*Sk3Cd7b zg!tfzI4PK^9tw=rLxGukNX&4}yqik#kbW}h z7Emuh8eL8Hxu!iZR0NhO+M>aH)AneI6r*Ji?Fz`e(=aO3^Q z6Y5Mj3$_o*(nX1}Ca@!a9>N=?KxOOsPC;5iqcVEz6b+g>%P{b1Tay&2d88-W>4^sO zjc|DL6t+7#T^o#-#^i&!%9vN+8~|u5e@Pc6n+<5aIz-=eDWZ;|v&8MY(YlFcELf+%mfJss>sX5@xwuhAN0J+q$5XI!X`Cm>DMvQ5Vb zn!}3_t94YAWGe@-Cr!%ey~0M$(?X`3u|L1*@~En44})BP$&sd}AC$oLtGqE$|9@@Q zw%fQ3MBnojeB1)NPB+^EeJBcKop>7{_c%@VNsukt(MFbRUE=ts`VsxI@+y;*c(oP? z5KEjnb4I+)P!i=&-+Q=Q*C+fnT<>pjJ7GEdu-CSJh1+0Q8TNu+=?;Ytaa98t=>0%m zhI$jox{BX_V03VNrw0tFmlh(>r;u#Y6)wb|fE<&4gMEOO)dh0)KvWIpK1};EK?eB~ z*2ZKSKc$KWs;^-xZ-|bh2Mlv8q%(=%qd&-(4v@PKf@(1LfjW}`GRU96&Lq+JDOEI3 zeGOB2gLEb>V3=b8or!%HrpU7$`4n(jH2Goe(SYcG?3I8qYB7S{+HQt(XuCPYmF*_f ziRHQ;&Wk-*kQ-P%)Q^|V=6IIY>v;Y;m^&TJAg>V0h@l+fT)=0oeISEfvw!H?PUFC& zRu6c9SAOZON?}NM4)Wv>C&D%X@k9--BBw1qY-Bd{PT!yoN|?d!F_eRxW6xv^?4FDo z?jXm133YKOr-I!GB~F)(tHB(Q@Pg`tA%r?q*7;!0v{*roAni~mhRK=?h(QhogCceyl^&9W96lW5<+6 zGIbbHPlkTf@a!>2Ggg5-F6my?k9*Lzl{Od;-Y%O<5_9yHFqVqq`5b1jjO&oR!h~by zO`nBRH64l1(mKK`6fl^wir_+auj5EF?5S!!fD1{dXQ>4fFn>$SkP)Fr-Gv`haAX&M z$jX(CBa7g8&gU6k;TkzaYwV*gUX&yqEe|GTHcnrvz>g#yxYw2ol!0oi?782TX!bDD zzB~h{iLONfk%kANy9=Wp41-&IJj*gTui3{+U(%{BmSybqw7CP2&Ivw1_NqE?_y0nD zpQeqS^~>=Lw1jaDqhj_WT9m~qpk3I1aV%7OHLb+`v$?LGnV2fcp5v9qXOYZvsJP(_2y+~VYT_1MM?UGw>Qn_ z(4Rq%y7{U|3RZR`-Fa!q3#5xUgkV2{nWL)XK-2ey9{0C)9AC|$fcRbx8mIk#nMjIe zDooVMWxexWgX&Q0%z^0O+Mvaes81dpAfZmGI|E5w-?}l5uFF$@OcUL%lq-|G&h-KV=Bx%HEg=iJ84QL%%3j7Z}0nmvx9)@PH-a?quiJ@m&s0;87 zBY}rvQE%fC;$ArKT)Qk=MR<%^!^*%(kfLn<%nMMbbq4=Z0mbzQ1ERD})Ts;=R9yFn zvh)>3XmU27_sX-jEfjsiMR=-{?2T{E&ZC=2G`_h$dmK-7W)v}hiasv#x=d&IAyM@E z4d%E>jteyX77JXMH1k?9c0Ikn`1&}#xc;WMw3kuL;EPOVXq>}V67u9(hc|g_olkG)cf|ZbZlXl0@}x4Q=?Xpk(2pS>q%cU5YI@LlR($ zBtC*m+(^-7`t2jwrf8gx!2}6hq6!D&7rc#*T<@r*e?rIBOD2mME}blRjFbDac7$cp z6#eG<`Wf6esF$yx(F93z$cH8>S381=Fj<2~vMkW6xK5*g6Dv9v;z9ARosmKm`{IK2 zg<^fNV4?pJe2-V_47#8S;;b4`@Xpf$a865f#wrI3RSVmHEv=JzN6<8*b77uO877h* zQ9eMnCp6f2k<<>0rP?7EFRS*6t@0virRl=L!Sdp<1LnOQW^a&NS zA9zQiR?~j6YkaRanUm{e?Oyg@@LG8fP1$5~m{aR)khOR`1@vxfFtMcrSi1^e@7t*nE zbczNh=E~^=8uRXfEiS zw$EyUG}jMC^CCf(qpIHNXJt`_g}2vZ9RK4>HpV-(QRSuj;8fFx|!;*dLIOr6{k6^j?~MyH92 zSfYl%SB#oYwgactD{}d+fmOF5Ey{QWw?+9H?c$X-T#%)>0d@|2KfStS13<(2$0Su9 zUcwr+@whb9m>@z;e1mn3E9rrrJd)SBaR7&Ztj#hQ&1p`f1MZXMndH6%a1_2w^7eUj zLK+DA@mVpBt_W&PC^K(Ls5dK0af6a3%JAKi*%Uupz39DS%k%S5S9~&hR;WmuJ?RN4n|iT4K)d8`%m9p$Ki@*gGMp=7 zJ~uwBIfYX$f#VQJB6tH!Nr<7*H~OgBjaV+hugk_1xUgat710A(znPKGq2ivHq1i_Z zq{y)Ww#g9ezNk7r$FV>KKK${!JUh96)RjjHB}IPw?ekoa1##Au@iBP9>QaaPaQSqn zcOr7^5+)rMMm8m?;mL#q?P_lUNmCkIse~`Ql;gxldJPeC(cNMKAD(WMi&4T1qS(G7 zqpi-GBt%Wz(^C=_3fqb`o>|Qau_DIu;dF`mN`G1`?t-EP6?aN8-*{@*-*s4dt$?y9 zFQbrN$h}DQlTJsFx0jqo9Q?F|j-tafA)hyxU#7E-oIk6)%|){H(mSIs&(I-b~?+w?MzQqxlg}vK_nz` zo0CaUw({3E2F|1;0tk>$orkKFXd&W@2+YI~<03<$>m^Z|;XF^Wf28~W6D58T;(z?F zf1rXSC=Gsz`@i}nh<*IwY)KUU-{RN*_5b|izy7bm?;PZ#l+b?yqmPpi0UASv!eDik zgdz5b@U<7BJpachO&S%9Yedj;1eFgyqZ}_r!CY`laCVmJgH zz6tS?IDU|mELatUd5C2m!b{YmE}{fQhTV{&Es3!%@xd7?A49whb3u( zT+}P>TY+K{{KWpUGy&}hpw+U4Cql2-Eqka-W5nL_^*#s5_tAeU3Efqgc&|&s#6HS0 zw0BA3`gj+3i-ssgS&m&&u!bm!*TH5{m<&@M(FTQeUwUbp#dB{5gga+RnyQ**AQ&rD>5Wne?1Ql~8o;GepYnw2JPQ2oMhE z`?qiZetNkqHW7c0iQL%h6qF>*Zy@jd#m2Im219xX&ch^;{qvtWHZ z3(6-PB|Fw%w3s&AMCb#c=x{k8#fD3AF0&17q2Sy#;8O*Aw2+Jt)_Q_eft4tbP*%d6 z%jy%H+r~gO2p?@lTm>!zOK7=IaivI)Vts$Q(*rSzXJCG3WfC9K<;oCvFXs7KU9yp7b zd|vOnZ@=qDt`-FmzFL@Lpk*2*)j)g{jF$1?XoY<@M0=dYNfzk`xF--I%sr9L7xw** zef>tiqA>UgJMKDSf;Ri(Il*rb+2Tw;PY5{>SwVkDa=!9^zNFmGl8YU|gUhe20Hu!-)0rcGvSDo|Gc2FDv)ZR=ANO-9n=PUgmoRfNFYDkZE zF@-q+2pW?D5i};pxqE)O2Xj!LOd2UdBq{)^E+GfPNkWoyQ{>pcN94YMs|SXVs&v+Z zJ=%Xr_5ss8mV#h185c$4_yO9|j36qrJ=zEdGlqd0xbPnTXGFZMnZCYu2cH94%9f-6 zZ=b0Uh(J;?4wz?pOiD11R?#FWz$yw@q&Lf$QsnFBW-0_CH&Zdr1MkW@QOJP^qLAb~@qfeuJur_ADfRH| z0?UJ+AdzHpU?RyRIgd~LUqD~=0aeND1IyDe0C;`N#Xy9R3vwQx**{=@8qg_X*+x}> ztxzcd7pPRSoIedMPbnZ4E_5kvV5|1tD8JjO1*k`{7U4X$X%t*C3Nw+STyxDus5*b6 zEkoTOZ7I(8rL?EXwx{n`KhX$!c5JTH58$@PN}@&aB-h{(=bDzl{SDuz5b^ zdKLuZFBBGdae#&L1J`{UuagCIBT|2zgFzDa^&Mp!_4-JM!$YB0gzSiroPNaa&vYq{ zoRo)wECi5*+zs?se!dJs**dXZ*m?~VAcXrK(59};8#qfAf+DnrU?oNASP~F~HBINP z_Dd45)#}U%tF>6SzA(n36s!l#&IE9iEZ77wO|o2If_RNHH{YTZFH~~`x%+?FB^iqI zG|3f?$^n7k-F{zmu;a3OwGhS95Ej(q-C8`Q8PwaNI952xLR~UGPs-|5+9h*W0a~bXW!;V< zN*Br>BqJ`4cW#bJF@N9u9S47#Epd}L+^bGmv_z=ST(pEZC{4q?i{9T{FAOwXasW(= zh}UUWAW@jAg0q2#_19KmvT}2afXeUI1Z|U&H(NNY%01%bL%zDyZ9UiQ;lc5x$k(2{ z0Hi5Tmg9Yh+yS*}OLir5p7oX?cI1_6_}bDgG!o z6FUweshxd%WakYoq}#oH`SS1i%wH=VC|D(v4|bW*;qL;gL(8RKb=UfPV!(Kuuk8C( z8&4t-6X?{i_((Y^h&U}9$Z1`nU@q*6?K$*^_}f)whQ4zTTOoy?;rA@i&XAQ9U1wHu zoYh*)tO6i0QJ`G>f9-$!2cRI^a?KGtpKm_>cGb`;XT@po8q}?KG5q~ll9C`&SSWOq z`=Aoo2pm-7T-SfOqJQ|_wh<^alT1{zZG&T+1s+g9N@-x8FXcI9wsev2^A|_~%IQa7 z=y4@8+2WK1`qKAblF#%R{Q#VmrrQZ)V2JifK@0^+2saiSA-;bK${qh^I2=QZfK0*Y zb)Vs|FcpbPzPVsj8tQnk2T$~Y>5z<>24hoo9JCF_s5C?#-X@_>`{`5-eVm)B$;MO}3)YyjDrOJO-(Hfd%3}D?oibR@Qa@nJ zpnN8lWXMxEDjc##M@Tyhxl3w zDuAuZ0uInYJ>XF~sL8oufBC`xa6_LziN}V*HpyU3OKE?5=C0V`9InEtAtw#!N{5eA zSH5#Yzt<;?d}CJ^nTpI|!vHMeT?_fGSd*YD*=Axs71nu$=WixMbI>?&t5*AzlD9k3Qma!evxY7DHZd;=8p z8=%{{8lb>vWae&w3|q5A17z5wc^jZPjG4OuGHlZ#4Ul=8W^RDM4cfpjDX8U|X>OQg z-pYJ6w{&3C1ORq^1&fiX<*sxyo^RxH|JN7Rm<+&043wUYL(ht;KMVYCY{p z-8}qyF=MnwZ3Qs0>NnP>i>y;R=}zeGQ2sGf!8_86+f)WVbz4d8Ja z(NXRs%1s64gjxFbq#pan4svWA`I86sK%80sP5LNHEwl9O;fLMiZw1Cbv5VGhAD@A>^#`mCzz^dBmI3{F#?CFEgC#FZQ;&CR<#}oe&O#SGxYe&gP~|oA&7%`!^?jm zbPF=713VI=TAU~T^rd`ga*^CgD|OA|Lw{q77YFhU1gNKh28TVVmoOR&%n{4Qz{T#gRr==as=F(Iqrl}F2>_Q|`d)ure8HxH z*R>*WCt!W3RePosSKmi|TRT^5a<7d}C#(wJ#B)HieVaicW zz~MWptKK8=RQ<6zV8~DyC)*BWwCMnv58Ftv=8KVu$drfE&Qi>d=rRzoBieZM?h&~! z!g&kZL=S+OZa$QA1aLHL)x&=#Zr||x<-_e9(VHb^C#?N@+xb5smS<>Pgb8w!6nyfA zlMjT$)+qz=&OLp?uF<>NqIeU;n|mCY8BiXzOv9u!$%(tA{}Wp?fDS3{vLDg`8=OO0 zoCo^s5BeWJ1_9=nV|n{@4Xa9_&oY6RWAOO&6&MIkUj>g0{Nko*Hfettlf^Oc5x^X| zB^K8CEy2z0qfg(ynJ5bfjL@JLcV(qnlA;X)Jd=&%4ur#cejwg?W=|fnLvpvWWqF;+NO)|ne20493o2$HR zSwMFC%w$TFb!?_4>kBSa>++Q`)Q(`-oZb=bJmWX{d)n2|!I;_MUo4L+Umd+Pe}>C@ zh1n7x6rLS_&KHv7EK|5?YhiYlx*)hbu7REJ$JUM(Ir~3ZpPzp<;_&F^&ZzPuD;ia| z_M8mj_p$Sa>)gm$qs8TlEw*fpm`@wwJPbVSCTkb=ppzMm%8M2MVYzqs9CH;jqg6!s z#%@GN_II#D>^%G%SW{M9N_VSUX~|iZ_#j^6%oROlp0j+iKSwJ95}=S@1JCv%@FSV< z*_DtWcf)|)Y(;kvR z_f?6?GBz@$D=bCajd zQnx3<*@pI%X%{W<_+?SB*=9^UXgtOgIQQ(~b^VOS19&mR6w}mg;@ez#sP_g6$cS{% zh>Xa0_y%DbhMOe2CHUjN%i>`mDkrRyGK+oIb3FU3>sQMlLjRv$@`rP{dn z2M*I^i0XeEl(kkEbfu`rsw>BNLj;J3N`j+P$6IR0RVnmSE$e~7n04mT*&5=m+naJ zQ5z)583Shpedc;$5@WaKJ|7$SDYm(Z{39fr$v=NWwo|R67vBvU(N#2~O^zrORUtvE zupKh^Ja*DvI77SNbuI^-V9-#R(I5`h)y)e(`}|TE3*5j=velT8Iwy(9Tcvl*h?;fX z7{YeTB+WHvXFOvJ<5?pZ_v2R{4#(b}>YS>mYdC<#@9g+lGc8~#+DxQM;dNksGBBf&@xT0aVZi@mCc)UUGZcZHG4Jn2J-@r~ z&G>x>>~nTgmKm=L$!7d6WDh-m&7T|b{m_3L06PLBwS8cywhs=~_P23PQ0(Dv82tv+i38#Sy1-uj74H+Fbbweg~yEnv0|NM zwC7DQm5~-^g3wW6DjFRn-FA(wvtxfT5|o3+fmj7Qgc*}mPT zD9kjRIcbxUG3=*N0@P0`g+V#G6xV;PXtb7pp2VHVnMX-Nwqi+zIITR>I+E;=JatbV zlIT41KX8{`b-@+fgttJ4n#RG_2#_C>&KUboySE#=$`3#6nQFZb*7=-g87~p4c~PIIjesF?(_A;8sl|7)9+Uue3d{?r&q)lZEf>ZXDY8xJ0=^>;bYRpW|aE4PK^mMSz8p1 zDEIM&$!4R`!yXpGwp2KmcjcBhJ>NC#0DeAE94KEOk`L$gLmHeX z@rU}f;H%m&fIv*KYob9q8|Q!Yw#@MjYV79ry&J?Kzg<^-UA5xIG|u29VTcHJ=~Mbc zKVOO>&Pi5yq~6IOPQntEn~&GaVEIAL7rxUB=O@uJ4C~;-&G4EZWx)}8nVaj@AE!6SktD3>8($Imypm)3jiXyt{x6VL(8YswBV z%_2k=V_*l+_+@EY6os&QpPji!f8}OHybMN)XohGzWf=^9z|n%OPKKK<)8fuJd@=n} zEd6WO=jSNTg#Sd{Qk{Pff3YyhhS)=bZ9xf%JtRvzg`bo%bFG$S-d62w|0bpUFuY0Y ztk$1?Q@xL=h0qoQ#^Zdx`Sja8m!P&mZ33Vo*;V5)xqJeOv_o3B~a|A>?ABP4#{4%@)IZ+|4fo}St7A* z_dUqV`A&^o-WR2n)%(I6Dj`@0w9l0W-fM^TQG2 z&v1MWtYro`15!3I?4Oly^MVfCN)M~QgqW9`7d9NFWPcvzjq~>Z#uo>i50f(%C?U)B zI*74YF3P)#H?6ZK)w+xaaaB3*i|~#@qRb+umoJZ#d=?-r_ssUsVdA}(hp}I`EKqaj zF$1QP=lf`tgn`G_UV1tszpfxHZ+1nEASzdThm*+`CM+W~vKUku{b9Oa62e2FSMX(u zVsAzi#OxyOg^}93pccc}V(-=U)*D`q-c*wt7b60_)003KDPSK;R?}0m1O?%mCZKr zeH^@gqFYJ@a7bbs&;?2{57&#w<`xbOfBX(pN zzoMS!*4aayr;mJ7lBQHzq_O2abtxZs%X8C!v&E~*T5uYMq&NQnr&60Hx z8dj1O@l4eP8)vaf5XU=rhcffZ=228c^xwVPX}_S=Nu5wCc>-y9!YDgxOYZ9FQco+P zhE9Knt&fqpH2S!91+`8!$;ehI1gnDJnf0eALs9OAbbvBD#ufc*!p{d0;pDWt`48^D%$L zQU~AxDK1Zz^OrOIohD1(^JM_aQ`AUyQFRjL(#+;gq9PWpJNNF(UtjK|yUCy+-yeT0p?LZ4hcAnvh;H3Ol_|~U37L}*NFllr+|fHFZ1pWjj0Bvq$t#P|QxspS-KJqfw+(GgqW_?rP{C3u_Gf|!?L`M(KD%m9%zYOz!I@ev*V8X{ zP}8w%>Hl)|Ior4GCb7GHUj&NHBj6CFjDiFaUIKy-@sjK{Pm#9>0BudGxGbTbjj+VE z8E%fTzZe3h11-$o2`z;n&Psn-B{HX^03$4#5p&uChnPt0vrtyfKeEMmUb(YNqQhOK z{iRMT(+m$ai`Ud`j!7-UaSR##*Q`xqo3ZI*>o^=2r-{eb5ynzOP19H_oRj9rD7&!o zYWb8kSXf+T!e2vq`-RXRGVvm6!7N^0bjH1N`HnLDlA&N3D8Cgbh01@gom{E@YYT8Z zcA$JlKjpe8jU-4a=c)rJ7oZ7z>R`95M;~E7Km_xXTJwL!$$Nz~v0BVGEoKGFBY+r5X zd;WTD8CcU2UHTHsQF1koc(X#Irvz@}Ds~3{mApj+_Ms(VgambLCEl1y-y>_Cs-*M&LEq%AK=K8mp4+ zG2(9&ke8b?F6S>de~*$*?f4s(b!ye$p|n#o&N3+fZ$LhyJn+t{J?}%GTlpUFpLMw& zSaV+Gd4PYP^ABvO%XUt&{-^nDeSb`}AiS z0=`(h0?mP5jm|TN2BD@Ky7B zh<4}zGB_)6`!gFhA1k?^&XoJ&+_-O9jUjW{hegaz$p2DJ5$2Zr{vpXp%Zfj%bF!Cq zd)t3~YBh2tL8lI`Wbj*I08p41Fdly5;6pPLtrW6@X!=Y}vj9!G0GPxJ;vEVDA7UcR zC}!?%!eoU)YYAU5PTdIQueQ4~LLY9aI-B^wAo=0;v27VR|))=^j zP1Xpejg8g_rp?V(y~n-fOVw9pfNacPkVINA<>p(>DOf56WCoVXamt)^|Hq|%C!>GI zr3>WT!{l#htZ$ZJ#X%ea)UO7ub;2_gkWe;Im{g3H){YtK=RrL zP*(eh#F(z~)dxg2`_T7MpUPRW!{XdL_=$lc2?5#5Xx2Dyz-5p7GEDnixH>&WrzfGN zp8ccqCRqs3DttnO?tA1CMEf;zi$;IEe5eo1RLjT*xO9O)=0}XNm3zUuBTGA3aqPQv zc#+ykC#GvG24s^oxvgkMCS{D_Ld&Yr)?9nRV@5^PbA4-b|0mzAnuG(Dr^!AK1dqO&`(D9!{UqwrLu3oP zMMFqCJ4QoJ+qy<$g!XriMyRdp9t|1o>mZFVTHHk%(%RTb8l$wVn>2!GV@GL-X?s_x zS$$P^l$4h`0fGe{$g9G@1A2c~75lwCd`FsE}_3`9ONg9+TYn1Q6_!)`$E z1_cDOsR*qrE<-d+3(N?pjYVbz)3!o0f@oc_;R}}7SPP)xt7U<-ezmmPkJUw{P$hem z7akP4oX>>~TI%P52CD<+0tT)TV6gcv1hAgD4vF3tgc3k^2SUTbqPTxe;`<^b!4@O` zk{yo7d;?S`DX+4hjOeguF5K+e48-O6P7r(c=7VOJ7q%pv6*u5Kj4rXbVo&Ub70+C< zPxVeVgs7LQBN17Bt6xF2r9sqGZF#+A>UZ=JP-S(9S{^q`HvCUJfTh|kVIZz-#MPc` z9zl6>oLL$+6L)8`NojuoPqw8&xUwy;@@4C`Zy;YDqBesI+_|3_9>hZ?&Ta38#OYIW$q(Lkr?{ zVj)*<1Cy8vv>=byAXaAbhR&K?-XK|)&HG@>=ks2>w{&hF;@E$e=SLXr$oV5+Hs$|= zc*~c9XFhrH_2I>aGSFFZp}8kqhlg@#Yz$|qb^4ktJ?|MjVfAqIA>_1w&5|O$qiU1I z0UQ*jVt!q@oPeRw?=H=rQNAD!d+kw)60W)^0hmtMU4|B}At_Xo+#|(IyT^;p40Jw6juHKR^eB1+kVJlCUg}cDbz&Wt-Dj zEzx{Wv`(ai^vy?AL~9{ai& zO%jIKE3e*mgE%cnz9dig8t^44hFe4Ip`n$|l0ah332hu`toYI+t6;2f4oSpn9D7KX zc1xCc^tp>--?gPGu9)m0Deow>2WXF?&;lI#kXC|m67&4*<25QmvLsNE`iQVMBN>Vo zYO3C8QPO`HeDUNu-6}3Xw{s^bw7YmM7H3W60Q4aakR3v*O2{NPupdj9%YN=$dLI3Y zx0fEF^_3mf@)^h8HbU9!Rc#Ttj%zwL3Br(dWMY$Q5^^1Q#E9%KA8C>igYbVmd^afl zHfC=UupiJm?2ea|f5pjri2V(22j?!ybAuFo&!m4D$qQI>|LGJ^`l7>Qc`VI0GG|Ky?$YD>VtUmSfJQs?eegHVXH5V z$mZ<9PhR9M-%*AuqYT>fjKlR3W&Y*nf@MCF8gNX%-Gs@z8SyL|7`H)fW?4$d4?b|J z=8J#L*uo~9r+4w#uVwmp?DtMiGTJ0>_L6FuPtNgAJ`=X?DTS zEk8_(;VGVpASA`AKA=g}9Y7O1FDV8vy@xTbpN?IpjvU!fC)>q-%>NfA$gj?_ahnG+ zI$L6X>4^d3;VYf4>DRj&@3hNh=q6(6dkBBRIW50v`vk}x<=c|3eC-*E!QfG0A0%3`N9+_u}YliisJw*MwWoa@miFfQf{@{}y zC@53DUw!|U?p3nRArvKsC-lBN1FZxzU!yRA(X{I1EBbtH7M%>LmpAKh+}>P1efnQ2 zhU?i}XxL(_r z!p-g7%a^BT1%owgb(FoVqg1?mY0BV?s(-6j4ohl(yjw|uM5s;wBSy}L_1w36%Xn`I)2TYtm>Yr<`y;_oJ{@zMJ>AUIEv#|{M+nQ193&7qV2 zh^cA1F2i6G^A14ST4{CyG+L|H=nduA297J*h_k_vEiC0Ei!C@WH&&2fc04WX$6Ci@ z1C6+`!w!^)^^C1Rq0tfJA4~E~!($1}OY%7>Vn2vCpOcRTSycCxLvx5NEfH3QsYbFT zRvhRX{e(iBT=deD(<>qj1M+?B+tt}>O`-pllL0Iye@`g(lgOSSaf`{(_?g56 zg*Rc4E{-FrLZA7l+iA6ltjy|Gve}f=+gR2>{e?3Uc*|#Z@&1y9;!L^)F4TCbtJ?;pvZt#*)c~m*g_b(0);ZZ`Yi`ip*<8mbDb~)2f04lS?fef1hGu?GxVM55vA}t8(ImcJem?aZ{G%X(5=|R!8WqG3E!z z_t7c|=TXf5C%-;eRh?UqAe65IcrulWfG2d=->3uKssfTVaiIRFUcP0XR}59-Hr?26 zT3L&EYLX;juAr5tQ`g9ky#KO8A?u~5Q@1S~1(4CasMhug_fo|be~nj zy%JeZcyf}Ll` zd%SYhL^rK$Rnbjdf0ecQFocx$4@8^zh+PZl=3bI5gt7j#q;~O?df~%O=*ZRG@ciiV z44Kb_wEiTQ2iKoufAih?lX70`Dm$e`HJEZ*K!YZyI<6LrxpY+dE^+dtyf&55sM?^k z{O8j1=nuTTR0m?HQAU**Jh~wr;!C&Jnwr@_auLOO{aSJct!!7_eVvgEN0oKPqRR(y z0H|SIH=kxu@)gIJ{@^7^|23~R=XIw1(bgFk5&dD7(LUlrf7)j)8h6Rm^9Z!nII|n( z$sV3)`j%Q2V+!=PRZkyQR_4V%0FS9ATeVH$WJ&iL*vD!a3x4pH- zne8n%4b9ZKe=4=j=iIKsnSJ{-PaDS6I8i=VclGJ})eJ`QwWYHR8e1zi>3I15@?ln; zv+J9!=nZtvT>4r|0_T|z=qj9TN}#89{#k($fLX`Soolz|DNpPbW~-|?lSa`~m{nz` z{hUE(bo>3!In;9DLix#K8muRP%{$8S; z%2@}~*J~cscR&%_12feW_S@+uuwOeXlw((3Ns~-jpP2ctG*8I>M-(5^_{#QlDwx3Fh#}yybNb1g@f}Q=ulGgT?(W_O z3>z38FJElHct$(ybCBg|ygHA%IsIYTd6j7Ef51p^PJ#&03U+Cbb(Z|Zv74@z25I^s z)tcQ!DCo#^>-u=TxH~w=!Vsc+yf6emM?Pn@N7L>u!O=ok+9f&d+bzvNvc1bf6T};r z=aZ#Cb|?(k-QO;XeK&~LIK#1r>8G?HZc>o)mY_w^L}`v?Zf4L#VRLIL{nRF%Y!|gj zf9c#le39>wA5cA2XE~p!)*S{>eY2F!X4NLE|FFFxJ3KKX)xV#df$9{J3y()Nol47{oUx7B?3!MKTIx^bJh}WVy{1}|`qC^} zvzrh%6N_p6`^m+$PT{2Nq~pwyo8W2`e~RS4$WC(|(k~8P&ZH)~roc?3;4?`u9%+fv z_s$3xzb`W*5dJ!Ws=z4zfpA^<4~XZ=f0*MQs^2}-q?(Q!f=(pV^OdfD%70+|q3i?t zoHl`~J`kzZeIU{|K6t8W6nrp*nMlzZ9>CN<1A+SW&_Tv>a`1-!1)@fpgs#l2e^=s$ zY`SIe5MkS?Amwuk`blQ~Nhf!f0(5d?kD)BO>bdR+(qxu|x5qA=e|iET{B?j4%zqP* zm07C~a1O0M0D83k@X*2q@#Pq;GA29sC5Q7Q7YFG^F3|BZ%x2tpH_J=nb+DN{+ERg@ zOsk(2A+jD^R@2H_?M4Nx?i&#be^LJ(VFjxLC!%XH>u10*N{CE(j8;FRPNmku)vO!G z7`5xigPc_zN!9>0)`z2wRBU=yDMaE^(K(FSKO{=i;AWZu=mV!stACUO_w zx06iHgY868YjqE48&{K)jOO0W)Rv<9`D8Py1P+ratAzbTO_Os;|MybFfB3m&-{~6f zpymlG-6;6LzVuS=j>^6aHRJnIxHInAvxn-DrfZIIo&a2-eDd)-wN9Q$-Wfa@Te2N^ z8U_!PpVYd?d4OVhBSktDJ4H%rs`&6dB|+4xq@=7JQpl>;a<;nq7nE%V%$i(3>nNo?j2ve>li4#3f2-CPjaVNdI)Q>ewy4NeT+pPHcR-WY`BHz>Fn*ln z1jH7-zq#TdC>guUWb#Ir37tPKe>}Q>U1*p)f^b52xm8-mPMSpCMw*BnxuIcOPwi=a z3LzA!l&oU`qz3B6Z>ax-;*4JuBWtEeC2yukS6RZVwV)(f1?Bv2DKk zp1$l;Y$utGRL@b8knJOzaIyzVIO%)lTSQ}zcYfetvnAi-aBoZ4s^PkNBf})_ z8LV5A0W}!`J(C+XJ%81&@|4TokZTeU`+q?8YCb6wt2(1h?p!gOpUQJ^l$Ga!J*x> zG^8mVD%SNAuexM&V%1qC|D~6Fa^-nQzw<@>%WKhT<#q~-286?o_xm*coWD76F6_ej z-;3hZikA&iRa`$!F1rRHE^L7KbxDW~zsG|%GKxGm^RAg)ir7LG&?z;N}D|0K8{cc0b+4H6b7qIW~ti=u9iRp zjP3czt1kMKCRl~s5c1vKy?xZ>1#|$pfJy=jKb6&XB8=O5fJ$UJbv{&i7P6pxCZu&{ z?`u|V-wjbg8KFvsCMKa}$`g~IdJ%33(hYA*lv5uf1b;)u#ewsQb7n8aKw2xRM_C~t z#e5+BKOf~<4*OW(soQ6kLVPZ=jh&D>ozFvbQ2oufA9WyS)dAzQG)FU?FQ32`4t9F!D zxU|tyvV9M7?1GfGzBqswb_KF@V7x-Vk1dKCHmYaf&ju3W2KQ zzBui?-xugSK37j5s^eq4nxOrNk9k@v^Z@J%d;JGAN}K) z6^HecC^hh&`5;idaj}z-FE0v1x4MZA`{mIhxq6{6OY4hudTx7wEqfdaII>>p)h)Lj zSAW%W5H60%X*MIe2 zuIL}Wx2-o3&Pm1tSk0I1Pd?*N%8pXBnP`>5oifL}$oJXq)RyWuCNeZl}~01-k7zJW)?ZFT4>jB@29WJ zZ~%MSoE!R}?EGBD{!CqUw@>m^|k#V+595EY^EQ1xwz^q@+E!S`G~WH7Pwwf18AE!=aA=Et%zwlrIUU@tV&ehr zXqzFP9eq5U*Y%Un-)0NvL9@-;yV|GJruLvkYl)4ucCuw`yl>*tVIlog#N_X5Y+Rq7 z9HI1YZYDd}RM;BCY4JA>>JEj0UlmkgZ2q^lw!pavZ2e~9Z)j>TYl9lh+@J>i2K9E4 z1~r&3nu|85bK1FhgMT`wu?se+vzxhSgF2_R%QvVqTf1a~YSz?^;Uz}3k{Oy$riQng zUuRlMG-@2QRpM+9C8zN0NP&e&ZHV=S@t%f z&TMQ)6qb8j^I3vg?IOmJavmhpJ zT;7FhZqHv&nSa^H4b(Ze{K*4*a393?=;K>yv7rbaH5ky+DA{2*e=9KliCwg&e{xOl ztU935Y(x(zb?)i&XYu%wJ)LW()dVL1T2B+#HMq&pw)L2eXIqo=kX5ZAGQ1t*kb!`f zWtis;+I0%}UAnCB8*LFs6`GCSs7mLFKYb}5&bB=Gq<{UOhVv<)d6s$)l$j1{PaO>? zdr~j>o+p?`Yz8zoPs4z?bd;0v*AS`Mt>i;(W zjB+E>0Hfvr*ujpQH*@S9`Q41?nPg|3@qzLhy<*jiCnwr4&@`u2&JBNZLm%+tWWT2- z3;t<)feq#FhD}F~D={B5uBMFFU+gn~*|Mf14u9jZ8dwNsTD=2^cDRe!X9w~&2gjKU z+)=eALwQtjy$S27`eSou-yFf7>=hI4m^J7#hk7Ky3&quVG$TZ!B3z(2A5m;N%12c5 zh~Fb}UxW)5>WRKFBCkRB*NQ zj(&7gMCjAloC$qRb6VwmWv^Wj!TfasTZ;d9{rJ#W44mIi8w_MAEiN1E`b8QB9zVI( zU?v>XWiEze+GwYYKD@4<(HNSn`KK5SWG!EH!Sl-AbU_?ZWiE;%${fC|g@*1X$$xGM z{`l{*7@Cgc3A@z|W}g9r#Xf@ugnbsTQ7>Z@?m<1=hlQZ5UiSWh!_+p``dS|@5+v$f zkwB5}3I*$$^jP5o7ddcQrze42p z>4rjENQbwyk=$Jvk5{E=hNxAlhJUfU;(z(;!gS2Wj5T28ZjPw!=84#DG+Mh0e}-7? zOvZ_w#xg^sgqj&*B@_*#blGYoy>9to;U4~0?yoQzuE1#X zCElMH50_9gL&$`pVc70#J59!FXeP9MX_ACNyfGfIQqK$#D>V&6_RyGWGJj?xmI9er zz0h(E4n^8J%mu5h#i=GO9% zE@z-&%UEUcw=pc9BLl|sWWR;R{Kt%ix6-fphQ%{m8g z+)Q};)=5SOLj+TSX)#0KI)D0RiC{-#w*_Y}j@Vz!2lAlV@b(%Ue2yuaC5H14Ku?(p zZ==aU=%}VyBDcgq^rW@=X0miqVxoUpN;VRlQg*izgnK)B(hwC3NVT2|@J(q?CXlat zQXO`49!;HP8191YK6y{aJeD}@Cc>+ZVY*pI!Y&AdLC{o zdPyYIz+To$2=mHSaAC69K(&&?YM`ycP?)+($^#1T8mpjVbJ!>dYcfn9Y;bEuvlwO? ziO(Trh9Y%Ho%1CAP_?4|Rc$hK@TOR-Vk-N8K=vR}DrEr$p_JyZgHZQ$$Q1I!-GC0G zA3@b&^pY?{1iNJI(tm`0ZYyP+ldSNFTd|2#{zawj)^x9@Y*H&6uZgVcHB+5 zEZkjVT1x%EBerr9`&~)Isc~*n5cW@f2S;u%Fs<@%SYkt7_GSQ8{L)?Rxlirn3~2BF zH6?7`EJDOu#vJWvX&14#8kMlhi3UB#>^j0Cwi7c&S&rLIwSN;c!^m%DBW&br+I;Js zdH4)ZTUq<3QQF7Jw>>PLLwQR1A%;+>Fq#lP;8~yYD{HO=s5AJ8AXW7&t)6CwP4isS z4|fUj6=rtFIphF{w|zztyUSt$_18rd?U@5|OcqwJ@M2#d;Fcx3!88Voxc`=QHdY~C zo?y8w_)Hr#0W>np&DwvM1t?sOfe!Fc5wf!)_%ww|OP~c&cREz6L7rH>9^}cL`-(up z^8)}W%`f%iuVJ!6;Z+i^gUu2s?mN3w2xqxgBx)&|A*to?MoK$AqgZM@|Dsh!ZWBrbeNv7+m)>3ySux$6UmXg2YAbtdD}7v+B&XSJpbnhob9i0YXY ztI*b2@)O5yy0Qr~z0N*glZ;(g(Tl>XG=xIR##;K z*+#jPk|vq5K8LOCckAz}>bp{)XzGnkQ(w1D$C}8qPJb`2gcBK&ExVt79)2IEmR9CJ zAx+&|A=-b_hVDI>zS*Tu)JTA!;FBQ$C`=3(PYpG#j*!5T=b?rK>wtD<)4+QTA)?lk z2;#M#Oy`0B^Cim?p7ZgChf^&_9)Sj;rG2k+;r%mCEhm;*_;mzIawXU)BIoh?@s2%H zY#fe}sUmqHMzJgGa^G>V*%FYd2Mo|p-uaC6b}fH9T|EaK8`gT;AoMdF+ecPrf9tr| zvVY%VzvV?rAC`qedYi%=l)OmbC?$IvO9iKEp0;)m+6rr9N#S5(2xu*s*vZE(B=9X{ ztnZDshpV3L6K$=#O_JB#F2o|`Z^A4n5>fEMdW0GFf?VPNi#|zDjCzNzC&<~NaNT^Z z?nHls!W@@wn%?(~N_XKyne$W5-L;MZM^G7V1 zYQl$T$d!Ftrq^&{DS>{_hKN2uIGp=`-vu#7%3&JkQ3BDOd<0_A8hzIcnq%)Sjq@eM z(M|n2Nyj(JZCMxSDJf#MYUjROX<`9VeR`9&MiPGl>s5q}1oiyg$Z)(Q!vH8u3>c5| zSKa!k(Cm?CZ_GSAnNs!Xh#Wvu6AKz&ACeXF>g{A@detFyqM1z5`h=~2|uG?yBj z0@NX6pKKzNT$6tUNP2nxX)4FdFq^aG58XV)o|u^T>RsR3Zj@VkHVyY9c&pzHm9G-` zkFCJYqa-0)y^&s9hI+i)QuJ$;4iiAVGERTT&w$#Ax2WuFG8WtaKK8=qQ^A zdHc&V|H6mWx?}4}+}gnGHnyseLq#8uRkL32plH|ZV`R=HrMV2#ErJL$OnuneAr=O! z3`v~AJ||su__&8HgnNDzFAnrx^Oz)jVZUnCHx?q6&eK^{XA*B!1flPGVjHE;H(`H; z^_ge@6(KOLM-%1rf7nvI^~NQM`=;AHG|I@%3UL`J+xBcVD&MRqx@Sr1<^+-4PEZ^t z#b)b9I7%pHl3|-O={m{YQRY`K+UOodf5Glif%r zW=vRhH_w=a%`I>5%O%J8n=L&%+8GW!%~`>g42>C~mK1=w-!>D(-0vuz=3AxHka~1_ zZWdL^#?IIkN1uKU6YsTLlXH!e zX-O=9A8BECCSdalK2FKjV!Xg3F0vkztzl=E`4m)SW;<+uDoL!}Sa0(4L57+AG6*fM z_OjQhdB7&RnuAgorz|(9CUL0TgKCLT?>M!DI81lOcFsT$)xv<;_zQc!zel%ce3DiR zlh@_|ZzPRSzQ2Kl`27tixbJVy<&Q_ufOgq`6reUK0r3)(1LGzp$@%*5@(AFcK9vp* zVA!CrY-D=6{9&oL)>MW#Q6z_{s0@H<9us4NF38;wqqSX7VY2CTA4~7Cw|%6U53!4x zutj7p`{)vsv6geiW@+rnjN3PmrhtlpX$hzxXBChXd9P4hr8sat=YjqYq_#UisfgEq zOjJO4Pe_Avo{;DK@%SiCtB|^2P99c|@=;tdWhh^V{}Z;-c#Y3UA{p~(N%oR0?K z;Zhu&e@lT?1q75qM=)Xd?lD0Ux2!CG&gr0!AlKNWK0oj#ON!LZ%e^pXgB{au&3QSR zb;5#pb5_iJMUkDv?KuIEnzj-MSl3n}S+@Xi4&xxMHYdR04)cJfh-1#_WMN@#mm!89jQ2%oDVgTU`R2f0zLcY5R$_E^Nt#3aGiH4u?Dwhih-%CsUYX+SXCd%7 zqTU*6vut`Rs7fn_Goj6{NCIZ{rL)X#Q{7de+A8sty>2l zwldnA6B|0kJL%^|)!hznjt*#Fxk*6CecI+Y*-2UDb5w2mv6(U7N0uXh+Dgc`x9eW* zWSX!1%e(HLT*I8~o%KIgft6KbbqJ!xm1A{$xy6X(ndDv=q;}#h+RecqX(D;)lt>~u zCK8FnX|C!zTgkQcw@jDt`f(5?0=Mo0x!v1AX2Gh~(h}J#(%K0ngV9dflnGZLYbgU?rk|G!<8!^Zm_oLUt#-m0)~zwv}Y;#krMWy-VUOg)DDuvXhQi z*#167QMKpNV$Sx~P&+X*VredHpDbi4q4mKf*t*88b$**-pQV(b)mbTJIEu%~odl96 z;gjHDSYE}TaMO(;CRRWANx}AwiHvRmpu$F-j&h}~IU@vYhqC;C7MBw!(hSw+_r*!B zMWf7L!Y&ST^YHuP3~X=j;t+p4F3(V2mAgC#>wdTI+Flmtd&-VUQrRX44D$5yZE=Ry zy$8#)G>7di&hqkQ`SQyzreFu=ElyH(ld1!O>1bN~CD-YVz;O9^jf#-Cb&ITaua-eF zHrZtx(@5viBDivYp20RQHYaE=lkVUEmWxkQ+U6`aLungRe{%9X`0UDT(T8t0H8y1_wvxVtBd2>k-2k~ZEB({EC_tj@3LuVeSk)pDHU;@x&lJ<>{Y96U1 z==h_O!E((GXId5W7>_*2I*3uoroX-J&56rW#hA8aC{4qE{qijIH*Brz@*s+rmTHl( zwLfyZf_VzbGQ!jFd36_g9uCE{kAm912oMhEvibbP$JL@@ojSxnji$oH9$a>vwJ6Zl z!=HBX{CRz870@4+Iq2V(@$d&ezh9zX3rN&d1V|8i0Ri0`yU#g-=lbn=M zSqh<QjGP~^5@eVqB4PVg~ISND(|?NFbWw_bV@_{jQZ&w z#k8}L8B?{EIUhn^eEH6@{+m?iSw>ym8S1`&vy^&m*pkwHSty~?4)aXB zFKP{x*j!a^^0x(uG4xBH4O|f>S>WZi;It+LtU;oKv;~Ny&Ppris^!i{YwK~|LVjz) z#hd@8qO+A;TfgINvG*!DA2fwV5+jL!;}5rdz)pZIT{9;RSzj;;m57u{)uK@*rvEa- z*XgepZp9&Q{s@UQLJ*VR=SN@@ZR!E2Q!B=xPVe;2iIO|MZ~~MZ4XYGmS~%SZrD-{( zxEx9QN^o9clQdtBsC$^w!hsTR(0IIFiag~9z-~%TfF@(HXB+v*GSUnequ?x`^lZ}<+Z*3ebt^%wfz{06he90YV||x zEA=hFaQNjl?9kWNL$^qCLi>x+Y*3rc`+1I=8pD*v+=aRPrt;I_fczU;)m7c zq@0U3tPF(#Vr!K%=?=G|LJx&r5whvvD4(gEiCNTFym#>jb?i(^lV}yhxcky~7PWjK zRkHdI_DDXT(t<`br>gURvQ09t)yFt>H<-+*EuT+YeayG$&CH~hEm!9?j*AG%@)@gpeS@GpZ$0J3@f;G5>n`ZXUHP2Zwx>|4iAaJYOqeJJ4d{ zu7kN8T%BRcCzn}1o=+{?a7qUr$=b~m?-kGGYNk3#-Eg+nlqt-AXX}{rjePua!MU67 zMsw~eOhoBu73aWld$av~dPVb=vx8Jnxfsv9Vm?j4TO2-EPhV$hacGyfTxa5LP&$v! zGz;RHORG>hV-r_Sz|5nJZB(Hxj88L8jdS&h>7$(p{hEPUGSqS=26G`)3nNof|=@^a?AZE7r#~8 z?=NQ7ydleJ#!IOV$#Ry_eegA--QXAXDcDk8mgesLfGIGqldp znBeg8*9TmDI34m)%FecnK4RMTwDx)YBPag<@e}VK|8>k?wu8wz!as&ql?j|y0!cdd ze_g~zh6E?i6%$~Sk5WtPlwK9!!*WhRS1FxVN)_N&6EuLMYRJ( z+mg001w%w~RXg(f>Ky<(^6ou8vUafKYIfvl7(k7iTCx8mzGVH3;#3KSGv&v2ow>E2Np$_CsCT^vi z050B#ufybB1-GC26lC-JDQN4`7n=;FTerNjPpz;Wmvs`mOzreiYZ$>VSV#&Y?)`%_ zX-u-Jfn8bh=4SXk3z(Z%>rcP&|5dU9EIuV%(&$Q@!z6z>ZE?DGw^l!UC})e8#Lnpq z+zBT%X?f(JY+d(2J3-r24TCl~##!L~Qsx{N`952vyb4fGKSG*1MU$!%O4-i1Y(V#f zXisYwlWu8ifO3bB3F!umUiTRe3!|RsDgkpwB^J;hF^XA!6RQ$U`eZ~AlwUfc0ezE( z$iv$t^f7-wyNaJ2Y0^c9^+9=ROap+!{D5uC^O-!7Ay4&Hu(BFm_lyXJ@JbOydz0w15o5((jhBF9zjlT6P$*BN>@X}= z`KW)cxKz~w;*XA8aF_7S`t$gurwZ0NwQwj8mA!s(b118mj5+9lCJ_G&WCOUTeAh#z z8yy87RHN&d5y3!SNke|&c%GZ`M^6ESCu)%ZJ}_W4gqH2@>fN%g0rCC$Im~hKbzOFCY5&;~LVeZ`O zT1uYtj8u$A6zRHTR5XZhViaM+Q-KGR==-E67Qi2TJ)ldwDj6U8rDopk9@3)gk)d1& zmyj$-UHn!AI~3xw<~CA(j%m_%&8TcJ_Y`r#CJPL^W~c?mEp53FE};Vk0vn*LqTw4X)*Amjxkymkl%kHqke7SMe%GK#dEa!53YZ>j?1Gq-y=HshFaE&5*R%hh#B7RM$w;f%}H zGQTBF*XW$~2G6-;CjbPeJc!}5PWfZNs= ztz)Ls2m`Sg`pwmtWPN)z-%&MCxwpj;GGlOCw!X(10n6Rv2bL&Z5Aahi4mTdHfDrZvq80ZF_LHqh3Mir0x`Pau@_ zTO6jE^Dk4ygIF&FLRrrje&@!lZ;J=9UI+xXzM9<=-%UGXOdFtYoUVU-U{`g&Yww`b z&_mET4Qj`6N7zLz?B~gV;a)Am?L%!1n5zr)f<+Ix%3{ zP_|AO+tPH5?tF#s`X3UnZ2C|jp!Kmy+hbVEV>r8w^wN&00P+DuPi-J^XVwclXbH z=txvFvy>rYR{N-YSM&1Gi9#nEcSww~eSEoF24`AZo|;ucf{a1cVi=fJxCf+7HfE4s z$`p3i2(6o=vf#6W;%I`5-OQ((1t-a#Ze~}S)03uSv?#dEQ|-R?bOp-hkBQXr0i?NF zmgyP6S2cynW}|=GAWjjj9K5bD9g8zeA8c@I0lE5(tBOjLy>LjZj6bv&8!0R^n!3O! z$-Hf;Mf+Baqq0ttH>suXyl5%jqW3p9!bJ7-pktzBOlT#Rv_O+umQ<2Xyw&5!k*e>p zJ}FPe(|rlL21SVd&q{$f5O)qJRau;q?h79(pj$F7&vVd%()+ZUxL&*;!RzGV# zO`=<~o4S8=2JRCion{_OI|*NjbQ;to{mQpx9+7a`T~qJJtd*3vT$@ zS2*^z5z1a=7<-8h5!F9CqQ9KSWpg45tJDvrK13vb@3ekD5sehFLe2x{UE0{h2piG$9G}UPs+qc#ZBRF3ve0%D%L=lpv0{L zTOWVY1LOL>IBC~60#vL~E145yN!uY;Vz+6c%3LR%f=GWXL6jr4lc!G0naJw2Q%(d= zvm&Ny$|8(ZxktrJ!4?UtDg?<<>%mGg%AtuFs5}hlke;R!pY(VSi+S4CH(jP2piF3Fso20$s~YhEXTyh%2_Lc+@PA;6 z@IaJ|1tPIZ0dCaFt#+6uOU;I(>m80M4ioRyjZppyWl2w-Q3mlLgLy-ywwS_2JmL(hhryW#nHIS%cS0h8stPJzJkX&}^J&;{P zkqU;jg~(eb|HkB~6iNP*Z&3QmGQ82PtQrv#uq=@rDLdBEFGBswwJ%75x=AX>PS!5a zY{x*71br*59KOmMk|Q!CkSDx3=_`MFN3pN#<(3QySe{A+7Q(*+NwVM*d@cW`ddl1B zlW(MBlF8lV(n)eeG5gU3%{grk2;S^f5V~nkusGGPWuWq4lcYq8tu10Iq9YQ?kdhQ@ zVb<}Opoz!DtO=j$+ehw}ariIWW(l`+Mv9243W z;W8)EZ%U>dka7~ArE}WpCL@1)iI?aj52Z{)Zl{}!tWB!`D$Q6#(O%!LBVs1FujZ^8r%|)5=86@yfjNZ`d1ygftac8Y$cSSCdn&~gP+n1UQf%J z%H2*W0}hde6?5*C3@br(O!pBD<;er95KA4VHb{2JYVkvd{cswE^rboDX(#ZUx*n$# z%v-Mzt#If@C{-XE6L5b5ubX5dBS{jusr>_qr?By#pXS2sesM&wo4*xu`#6pWIKkIV za+-1dNr9_Mbb@95>9l@%ZE);KQY#y&#utIYts5WNH8O7z!&@A)%^M-RBN(R5p7(;aln3>)0lip zT$Qk&_yk<@^CMiz>OXAnyWBFdUZO)B_0Nvzr-KwaR+d}=)k}0pfBmyb`cstUIQvDR z&>fd?lFkvSoYQ{=aG;2n5J2#Q@+KS6lAvo)`45ymZE;HGh<{|P0kA-cTAq|XFG8Y4 z+7>aHR3wriZPmGe8UKXT$@UE7s1$-B3_wOYE>y!nnasvNX@_i7$^;9_baEl7l$9^% zflW9nRl=m692gVp53n|N#cCK?ebBQ~dL^ZtObz=^t*(C@0Rak`a){JI(ymls%hyTC z61I)VQY&X;P(Xn@En3rHxBx?xyAyIXZ9P@~%40>PUzZHi9Rh9t@(4)1+?D-AhGZn+ zA(VQ7Ec@wH7B>n$*h#Z0Q-c!(O=9gQox(1?M{+FXNxD<}A6r8AI1f;)NYqX|0ZROQ z3Ku&u3uk{jBs+g_M8HYDc9JQK_+yKTT$!++{3KtKUiQ5^fsPE}L8=S^0slr>Hz$C-B1+bNhN!;rSs;#7^I0@SU6 zGem#G26cS-d-o&+DhA}_&3gwB2+f$D{DJLLrAb=TO0Q=MUl0OpP)nw|Sz@V*o^h?8 zz*^WBtYK#JlWDf988VS_8iG!qk;@wrc|Zu}7truQ!S2G>LL8Aa6YlFC0l+25L^((Wy)LBm7)<~9Dpq<=nACbCZ-?`NI<()lUrsv1U{AK zIFp!WM*)YE;AR&Aos;-xCjkzVAZI)R*L{;-XD$K5lZ|IBfBr~mz2FB?K0{CUv{F#V zjj4h5A3t2wEw4^mXGuhVl5bIpjXUv#W>8Na)CKNNTC2J%#I(hd$KAl`gLPe3gAs?R zEChto2Jfs1%L~=uiyT+gs5?i)Fs=~RVLcT_eC8!#5ydWl`4<)8JboKh3h%p~8bkhz zcT@w}x)CDVf4ST@)M>(dZ7oKe=VK-&$!U*q70=_l5tZ<+>nJhpxnO?J1#@^VFypz+ z95X0AP4Ir&7??5Pg2f7idHQE#`ruvFXfWci6#1f-WcB4dJ{wU9@3xK-vz}Y0zg{fi zzMfu#KCCqvab!7O6UWc8%5xq+9#9MK#(^SeFP5)lf2peZ=1C6f1ohvD78A~6+ni`_ z&+}Q_*3$;=uv$K}t4uow+#T=^$oV9D7Q}=$$v&RJC8HWa^UOd8wA-R&$B&Z5_$PMJ zn*Pb1PDe=p-x0O<_tLk_6~b=*)1xaUX~q%M9df*lHR z*%FyAw8zxKdT>;c87~%b!KR?*^x#mX0q?byfP0RPbZMn9-w0}4A*|PWD!_ckzp~TU z&3UP(0>&@3P$*BBG9S!lI77+rzpOLf!Cj+Qf2_4(T2CEFR$vO8fdUiWVs~laSsc~U z1nwg)8`eGbTRPgOC}N)`a+?+4h8;FRH=qlTYBT24`CU1O1I@WDLL@4}rE=p*?ZECl zqR*gfmvHDjZZzUNqh5k_2dLcN$Sw5<9p#MH^&vFTgQ%a2^{dfb!Ox|7OfJ zwQLYSP-(D6^Y=^iqnjc^pT>s$Fs=^DHO^20q_-lRZLsSXX&88XGm7bab66pm$Hr7Z z`i##@^MLGU$zliffq861gQmk|?}U|;f2m{mi92MEgMeA2ZiX^@z1yAp0nU^$xoQFG zN`f+!6(K%&V)+|#<@6Li5RTwQv?bYyV`xX0R+e(dphIbccTy$al&f~LxoKz6MLQ$z zVXM;Ta!^|ryo-c%BTfots)qt&^-y4@9uhNL^Ejui3*JFOx)CR_^8zJrQUy{5fA6MJ zJfxpYx&_n=kVaRNeXeN_3>AT8ineGl-*mnLFC&<$3&ul3>86|%&*q}opo3yV?lIq2 zKBmuvtHArq1M#~VowYOSsGT7vtrK>bF~L+|TFm9G5v3;F)=^`~f2MnmhqamT7;x{g zE8KYh@rXJT&VubjvUE{mtO@Lhe~0i!DNxyZzEhBv(5Q?aJ4J)0&N2*q+SVjRY98r{ zc6y@0d?Os*JcaE}PS*zGr7`(nt}^BoI0pdQ%3soj$z}suuTIeelNGv>!5oue`e1`w zivmPf)0~TrYlLyuAsxzYQ@B}ALZF(_Q|{fX5xSRXs=g>2l`?_8dmCpie}n8Jg0yHG z0h2*l5*bDvJSj36s3`GqsD;*&FxhoHjWnkF&Niy6-@J2JlpJ`SSjGaqxH&d@)&p6oYK99aG!&C}WfBQaqEnEKo+OBQ4aU6-h=PUTw15CT8)4M=FECxtBac_X!<4iJ7 zf^5-_IY;wTZZ=dgBj!%po}=mAMbF=7541R3986)S_gPFN$IBhDA4z{%(;k+&bZag=d{h}#Pe<5o616={(y28o+*uD>M)`M84jaHMvtT1#yZe|OS)Gb#yx1;N*Bxk-aea4Qgducm`FwO ze2z0*#&t+uVZ*WWX3xUunx4dGX&sRn3LH#XNAMxL*YTto_EfbQz=x#Qv(%CaoWG}K zNRQB?f9}JND|oUCWaZ1olSObmH{=;!=^A-NYy6`wgD5FFMjl+sZk)YTK^{puaIY;N zC=1nAdEkCqvf0B(59Jv^O?E8`h%^Qu`nxdd!LYa``m-#<^O}FG^fj&OVp+z6o;LRY zGC3hS$X-2zf=lpU!2Q6`2S(@XxJRP|EiXFp;;7HQ)C0j2}i#-1bCaUOrk>ncQ1j#UVAV&?tk7ldnWIC9a zn}yTnTNWkhJK5eepX1>SderS#MNx3Fqv_6TLth|W#32Ox5$qgw9S@pAU+D3FYsd4| ze;fu#)^ebq9?nEjv{T`tPA==+;5n!sMLQQR>f{m-Cl82_ePd$=$!0&RD8Y3c)arWy z?A$aJ3rA?0L*IZZc!3Q$cXGY|;dG%=HN|e_N-c7j$-aiJss*`bo|!u-`tu+)bnNT^_H} z`8^3gP1s#YW`LhT_wwviS8;Op-2+}oF=#k)!Ct?3pmHGwdY#V8g6uR8U8ED8yzOx( zp8IwfQDm8|yWNyDazx8eTi9dikv_TT;fo}w+F%7Pj~?kmT|Rd9f|?y#;Ii{ff1B-X zp6xtrX~L>KR)|Qc-6)s0SYi?kp8nJ^rjy7;SBP*FxHy72C5h_Y8oTgYLB+yj zvnEM2T#79^2MIItL%+Sbc|i{iwDJ|UnxJV8{m?|^Ye!O%25a<0f2Rd{71wEW>O?0( zA}Ic&(^JS|-+ZvXF|2P6EchSck9f7tun(#v&Z`lEcaavL3szz>R(V*cTG;+;=^V^E zg0>M|2#b8mGLZC$iVm_pVZbJnq;6Ou)pogLTD4DXm458s^mJh=!Xmx+Fa-Lo650I5 ziA;nzBHiVuPpFvveQo(p$qVrjoVWs8m!2FrFZ9^HKMFQMvfU_$5@A|)8b9I+8#|sZpidL&*Rs>FG$q!Jht-T( zRb+=$WJt|)Aw4U{q-bGcube(Wulg}5TA0}Chc8Pz&2qUmf97V3*|Ml}&@c@aIuMzc znU&VQM-t(^xukQ}KC6k++}s(>iv%o3RlPHh%CZa(Z*RsV{wJ4gOm=F?*>tk3M1nyw z36G7j+FpVunQ(kXk5wRRqUy*}leB_ZDPy;YPk(My;3lME1o@Wv!V(~w%;3aki;L{h zg^swpP@E?ae5`~iyofg@O(^1t8u?x^+&b9~omFq(@_mD=?v1o4;}zZ(%@i0ywZX%WyPjIgJUpPgZ1- z`w}Ej#5BqK*U>4xLEy(%%{W{UxSJp|Z%S-7D@zH3iY8?Ee#vY~o-Gh9$2vLRl@ylu z=N0G-FXJo%vPHSFw%(2)1HK}siDTllite5)>dX{fU0qk|@bbWl7339$<~%onvQi$} z(%+hPf4a2y|E++C<>8WFw@T!(3FAE4*U<~f%}vFpqZf^eve}cFkhZB8&jXB8{?-h@ z8u^PYgdE+uCN^Z@!~{ zXQHk&=ucPA_hu&|&n98h;bCM`LJdzQG-%gb3rd>OdY3Br(n~pBe3aJ^2^Z566Z-Ug zt6hu|XBcApiuSf9Yl;vv@xV?gSP-@qYcjIh6Jkw_=fmj|+Dd;~EboG11eJG6u`fKe zeDm)ntX4pqlvhz`D->R&=0T?;DDIb>MI7R^f)3GPn$XW1v#o_~jk-^kFUyD$*7H^; zOPueQ_p^8gy*7Y}vtsy$pC_HC00Nf#$-bUlYwO)-QP7SGO@_H$snnqc+2JeP(u#J4 YW3gn~DzbB8U4WL~#Y=qhKVfbB&6}t6Gynhq diff --git a/latest/py-modindex.html b/latest/py-modindex.html index e66f910e2a8..720e8f6a9dc 100644 --- a/latest/py-modindex.html +++ b/latest/py-modindex.html @@ -2461,7 +2461,7 @@

      Python Module Index

      Built with
      Sphinx using a theme provided by Read the Docs. - +

      diff --git a/latest/search.html b/latest/search.html index 971f5decec0..ab9b5289e8d 100644 --- a/latest/search.html +++ b/latest/search.html @@ -123,7 +123,7 @@ Built with Sphinx using a theme provided by Read the Docs. - +

      diff --git a/latest/searchindex.js b/latest/searchindex.js index d8b0d9f08c8..50efe5f61e5 100644 --- a/latest/searchindex.js +++ b/latest/searchindex.js @@ -1 +1 @@ -Search.setIndex({"alltitles": {"2.0 API": [[501, "api"]], "2.X API User Guide": [[470, "x-api-user-guide"]], "2018 - 2020 (4)": [[545, "id5"]], "2021 (15)": [[545, "id4"]], "2022 (35)": [[545, "id3"]], "2023 (25)": [[545, "id2"]], "2024 (6)": [[545, "id1"]], "3.0 API": [[502, "api"]], "API Document Example": [[503, "api-document-example"]], "API List that Need to Implement": [[496, "api-list-that-need-to-implement"]], "API for TensorFlow": [[481, "api-for-tensorflow"]], "APIs": [[504, "apis"]], "AWQ": [[477, "awq"]], "Accuracy Aware Tuning": [[488, "accuracy-aware-tuning"], [546, "accuracy-aware-tuning"]], "Accuracy Criteria": [[554, "accuracy-criteria"]], "Accuracy-driven mixed precision": [[474, "accuracy-driven-mixed-precision"], [539, "accuracy-driven-mixed-precision"]], "Adaptor": [[495, "adaptor"], [498, "adaptor"]], "Adaptor Support Matrix": [[495, "adaptor-support-matrix"]], "Add quantize API according to tune_cfg": [[496, "add-quantize-api-according-to-tune-cfg"]], "Add query_fw_capability to Adaptor": [[496, "add-query-fw-capability-to-adaptor"]], "Additional Content": [[494, "additional-content"]], "Advanced Topics": [[470, "advanced-topics"]], "Algorithm: Auto-tuning of $\\alpha$.": [[552, "algorithm-auto-tuning-of-alpha"]], "Appendix": [[528, "appendix"]], "Architecture": [[485, "architecture"], [524, "architecture"], [533, "architecture"]], "Attributes": [[211, "attributes"], [281, "attributes"]], "Attribution": [[490, "attribution"]], "Auto": [[554, "auto"]], "Auto-tune the alpha for each layer/block": [[552, "auto-tune-the-alpha-for-each-layer-block"]], "Auto-tune the alpha for the entire model": [[552, "auto-tune-the-alpha-for-the-entire-model"]], "AutoRound": [[477, "autoround"]], "AutoTune": [[482, "autotune"]], "Autotune API": [[478, "autotune-api"]], "Backend and Device": [[481, "backend-and-device"]], "Background": [[495, "background"], [522, "background"]], "Basic": [[554, "basic"]], "Bayesian": [[554, "bayesian"]], "Benchmark": [[483, "benchmark"], [505, "benchmark"], [538, "benchmark"]], "Benchmark Support Matrix": [[520, "benchmark-support-matrix"]], "Benchmarking": [[520, "benchmarking"]], "Benefit of SigOpt Strategy": [[551, "benefit-of-sigopt-strategy"]], "Build Custom Dataloader with Python API": [[523, "build-custom-dataloader-with-python-api"]], "Build Custom Metric with Python API": [[537, "build-custom-metric-with-python-api"]], "Built-in transform/dataset/metric APIs": [[532, "built-in-transform-dataset-metric-apis"]], "Calculate the data range and generate quantized model": [[496, "calculate-the-data-range-and-generate-quantized-model"]], "Calibration Algorithms": [[521, "calibration-algorithms"]], "Calibration Algorithms in Quantization": [[521, "calibration-algorithms-in-quantization"]], "Capability": [[495, "capability"]], "Citation": [[535, "citation"]], "Classes": [[1, "classes"], [2, "classes"], [3, "classes"], [5, "classes"], [6, "classes"], [7, "classes"], [8, "classes"], [9, "classes"], [10, "classes"], [11, "classes"], [12, "classes"], [13, "classes"], [14, "classes"], [15, "classes"], [17, "classes"], [18, "classes"], [19, "classes"], [20, "classes"], [21, "classes"], [22, "classes"], [23, "classes"], [24, "classes"], [25, "classes"], [26, "classes"], [27, "classes"], [28, "classes"], [29, "classes"], [30, "classes"], [32, "classes"], [33, "classes"], [34, "classes"], [35, "classes"], [36, "classes"], [38, "classes"], [39, "classes"], [40, "classes"], [41, "classes"], [42, "classes"], [43, "classes"], [44, "classes"], [45, "classes"], [46, "classes"], [47, "classes"], [48, "classes"], [49, "classes"], [50, "classes"], [51, "classes"], [52, "classes"], [53, "classes"], [54, "classes"], [55, "classes"], [56, "classes"], [57, "classes"], [58, "classes"], [59, "classes"], [60, "classes"], [62, "classes"], [63, "classes"], [64, "classes"], [65, "classes"], [66, "classes"], [67, "classes"], [68, "classes"], [69, "classes"], [70, "classes"], [71, "classes"], [73, "classes"], [74, "classes"], [75, "classes"], [76, "classes"], [77, "classes"], [78, "classes"], [79, "classes"], [81, "classes"], [82, "classes"], [83, "classes"], [84, "classes"], [85, "classes"], [87, "classes"], [88, "classes"], [89, "classes"], [90, "classes"], [92, "classes"], [93, "classes"], [94, "classes"], [95, "classes"], [98, "classes"], [100, "classes"], [104, "classes"], [105, "classes"], [106, "classes"], [107, "classes"], [108, "classes"], [109, "classes"], [110, "classes"], [111, "classes"], [112, "classes"], [113, "classes"], [114, "classes"], [116, "classes"], [117, "classes"], [118, "classes"], [119, "classes"], [120, "classes"], [121, "classes"], [122, "classes"], [123, "classes"], [124, "classes"], [125, "classes"], [126, "classes"], [127, "classes"], [128, "classes"], [129, "classes"], [131, "classes"], [132, "classes"], [134, "classes"], [135, "classes"], [139, "classes"], [142, "classes"], [143, "classes"], [146, "classes"], [147, "classes"], [149, "classes"], [150, "classes"], [152, "classes"], [153, "classes"], [156, "classes"], [157, "classes"], [159, "classes"], [161, "classes"], [162, "classes"], [163, "classes"], [165, "classes"], [169, "classes"], [173, "classes"], [174, "classes"], [175, "classes"], [177, "classes"], [178, "classes"], [179, "classes"], [180, "classes"], [181, "classes"], [182, "classes"], [184, "classes"], [185, "classes"], [186, "classes"], [187, "classes"], [188, "classes"], [189, "classes"], [190, "classes"], [191, "classes"], [195, "classes"], [198, "classes"], [199, "classes"], [200, "classes"], [201, "classes"], [202, "classes"], [203, "classes"], [204, "classes"], [205, "classes"], [206, "classes"], [207, "classes"], [208, "classes"], [209, "classes"], [210, "classes"], [211, "classes"], [212, "classes"], [213, "classes"], [214, "classes"], [216, "classes"], [217, "classes"], [218, "classes"], [221, "classes"], [223, "classes"], [224, "classes"], [225, "classes"], [227, "classes"], [230, "classes"], [234, "classes"], [236, "classes"], [238, "classes"], [239, "classes"], [240, "classes"], [241, "classes"], [242, "classes"], [243, "classes"], [244, "classes"], [245, "classes"], [247, "classes"], [248, "classes"], [249, "classes"], [250, "classes"], [251, "classes"], [252, "classes"], [253, "classes"], [254, "classes"], [255, "classes"], [256, "classes"], [258, "classes"], [259, "classes"], [260, "classes"], [263, "classes"], [264, "classes"], [265, "classes"], [266, "classes"], [267, "classes"], [268, "classes"], [269, "classes"], [271, "classes"], [272, "classes"], [273, "classes"], [274, "classes"], [277, "classes"], [278, "classes"], [279, "classes"], [280, "classes"], [281, "classes"], [283, "classes"], [284, "classes"], [286, "classes"], [288, "classes"], [289, "classes"], [292, "classes"], [293, "classes"], [294, "classes"], [297, "classes"], [298, "classes"], [299, "classes"], [303, "classes"], [306, "classes"], [307, "classes"], [308, "classes"], [310, "classes"], [311, "classes"], [312, "classes"], [313, "classes"], [314, "classes"], [315, "classes"], [316, "classes"], [317, "classes"], [318, "classes"], [319, "classes"], [320, "classes"], [321, "classes"], [322, "classes"], [323, "classes"], [324, "classes"], [325, "classes"], [326, "classes"], [327, "classes"], [328, "classes"], [329, "classes"], [330, "classes"], [331, "classes"], [332, "classes"], [334, "classes"], [335, "classes"], [336, "classes"], [337, "classes"], [338, "classes"], [339, "classes"], [340, "classes"], [341, "classes"], [342, "classes"], [343, "classes"], [345, "classes"], [346, "classes"], [347, "classes"], [348, "classes"], [349, "classes"], [350, "classes"], [352, "classes"], [353, "classes"], [354, "classes"], [355, "classes"], [357, "classes"], [358, "classes"], [359, "classes"], [360, "classes"], [363, "classes"], [364, "classes"], [365, "classes"], [366, "classes"], [367, "classes"], [368, "classes"], [369, "classes"], [371, "classes"], [372, "classes"], [373, "classes"], [374, "classes"], [375, "classes"], [376, "classes"], [377, "classes"], [378, "classes"], [379, "classes"], [380, "classes"], [381, "classes"], [383, "classes"], [384, "classes"], [387, "classes"], [389, "classes"], [390, "classes"], [391, "classes"], [392, "classes"], [398, "classes"], [399, "classes"], [401, "classes"], [403, "classes"], [404, "classes"], [405, "classes"], [406, "classes"], [412, "classes"], [413, "classes"], [416, "classes"], [417, "classes"], [418, "classes"], [419, "classes"], [420, "classes"], [421, "classes"], [422, "classes"], [423, "classes"], [426, "classes"], [427, "classes"], [429, "classes"], [430, "classes"], [431, "classes"], [432, "classes"], [433, "classes"], [439, "classes"], [443, "classes"], [445, "classes"], [449, "classes"], [452, "classes"], [453, "classes"], [461, "classes"], [462, "classes"], [463, "classes"], [464, "classes"], [466, "classes"], [467, "classes"]], "Code Migration from Intel Neural Compressor 1.X to Intel Neural Compressor 2.X": [[538, "code-migration-from-intel-neural-compressor-1-x-to-intel-neural-compressor-2-x"]], "Comments": [[522, "comments"]], "Common Build Issues": [[529, "common-build-issues"]], "Common Problem": [[492, "common-problem"]], "Common Problems": [[478, "common-problems"]], "Common arguments": [[477, "common-arguments"]], "Communication": [[494, "communication"]], "Compression": [[506, "compression"]], "Config": [[507, "config"]], "Config Multiple Objectives": [[542, "config-multiple-objectives"]], "Config Single Objective": [[542, "config-single-objective"]], "Conservative Tuning": [[554, "conservative-tuning"]], "Contribution Guidelines": [[491, "contribution-guidelines"]], "Contributor Covenant Code of Conduct": [[490, "contributor-covenant-code-of-conduct"], [491, "contributor-covenant-code-of-conduct"]], "Create Pull Request": [[491, "create-pull-request"]], "Customize a New Tuning Strategy": [[554, "customize-a-new-tuning-strategy"]], "DataLoader": [[523, "dataloader"]], "Define the Quantization Ability of the Specific Operator": [[497, "define-the-quantization-ability-of-the-specific-operator"]], "Demo Usage": [[472, "demo-usage"]], "Demo usage": [[483, "demo-usage"]], "Design": [[485, "design"], [524, "design"], [554, "design"], [554, "id1"], [554, "id3"], [554, "id5"], [554, "id7"], [554, "id9"], [554, "id11"], [554, "id13"], [554, "id15"], [554, "id17"], [554, "id19"], [554, "id21"]], "Design the framework YAML": [[496, "design-the-framework-yaml"]], "Details": [[492, "details"]], "Determining the alpha through auto-tuning": [[480, "determining-the-alpha-through-auto-tuning"], [552, "determining-the-alpha-through-auto-tuning"]], "Distillation": [[538, "distillation"]], "Distillation for Quantization": [[525, "distillation-for-quantization"]], "Distillation for Quantization Support Matrix": [[525, "distillation-for-quantization-support-matrix"]], "Distributed Training and Inference (Evaluation)": [[526, "distributed-training-and-inference-evaluation"]], "Distributed Tuning": [[554, "distributed-tuning"]], "Documentation": [[494, "documentation"]], "Dump Throughput and Latency Summary": [[483, "dump-throughput-and-latency-summary"]], "During quantization mixed precision": [[539, "during-quantization-mixed-precision"]], "Dynamic Quantization": [[471, "dynamic-quantization"], [488, "dynamic-quantization"], [492, "dynamic-quantization"]], "Efficient Usage on Client-Side": [[477, "efficient-usage-on-client-side"]], "Enforcement": [[490, "enforcement"]], "Engineering": [[552, "engineering"]], "Example": [[537, "example"], [542, "example"]], "Example List": [[527, "example-list"]], "Example of Adding a New Backend Support": [[495, "example-of-adding-a-new-backend-support"]], "Examples": [[471, "examples"], [472, "examples"], [473, "examples"], [474, "examples"], [477, "examples"], [479, "examples"], [480, "examples"], [489, "examples"], [492, "examples"], [520, "examples"], [523, "examples"], [525, "examples"], [526, "examples"], [527, "examples"], [528, "examples"], [538, "examples"], [539, "examples"], [540, "examples"], [541, "examples"], [543, "examples"], [544, "examples"], [546, "examples"], [547, "examples"], [549, "examples"]], "Exceptions": [[138, "exceptions"], [397, "exceptions"]], "Exhaustive": [[554, "exhaustive"]], "Exit Policy": [[554, "exit-policy"]], "Export": [[528, "export"]], "Export Compressed Model": [[549, "export-compressed-model"]], "FP32 Model Export": [[528, "fp32-model-export"], [528, "id1"]], "FP8 Quantization": [[472, "fp8-quantization"], [494, "fp8-quantization"]], "FX": [[492, "fx"]], "FX Mode Support Matrix in Neural Compressor": [[492, "fx-mode-support-matrix-in-neural-compressor"]], "Feature Matrix": [[531, "feature-matrix"]], "Fixed Alpha": [[475, "fixed-alpha"]], "Folder structure": [[522, "folder-structure"]], "Framework YAML Configuration Files": [[530, "framework-yaml-configuration-files"]], "Frequently Asked Questions": [[529, "frequently-asked-questions"]], "Full Publications/Events (85)": [[545, "full-publications-events-85"]], "Functions": [[1, "functions"], [3, "functions"], [21, "functions"], [29, "functions"], [30, "functions"], [31, "functions"], [52, "functions"], [53, "functions"], [55, "functions"], [89, "functions"], [90, "functions"], [101, "functions"], [103, "functions"], [133, "functions"], [134, "functions"], [135, "functions"], [140, "functions"], [141, "functions"], [144, "functions"], [145, "functions"], [146, "functions"], [151, "functions"], [152, "functions"], [153, "functions"], [154, "functions"], [160, "functions"], [161, "functions"], [163, "functions"], [165, "functions"], [166, "functions"], [169, "functions"], [170, "functions"], [171, "functions"], [173, "functions"], [175, "functions"], [176, "functions"], [180, "functions"], [183, "functions"], [188, "functions"], [189, "functions"], [190, "functions"], [191, "functions"], [192, "functions"], [194, "functions"], [201, "functions"], [202, "functions"], [209, "functions"], [211, "functions"], [218, "functions"], [224, "functions"], [225, "functions"], [227, "functions"], [228, "functions"], [230, "functions"], [231, "functions"], [232, "functions"], [234, "functions"], [235, "functions"], [239, "functions"], [243, "functions"], [245, "functions"], [257, "functions"], [261, "functions"], [262, "functions"], [266, "functions"], [274, "functions"], [278, "functions"], [280, "functions"], [281, "functions"], [292, "functions"], [293, "functions"], [294, "functions"], [297, "functions"], [298, "functions"], [299, "functions"], [301, "functions"], [302, "functions"], [303, "functions"], [305, "functions"], [324, "functions"], [325, "functions"], [327, "functions"], [385, "functions"], [387, "functions"], [390, "functions"], [391, "functions"], [396, "functions"], [398, "functions"], [404, "functions"], [406, "functions"], [408, "functions"], [409, "functions"], [411, "functions"], [412, "functions"], [413, "functions"], [415, "functions"], [417, "functions"], [418, "functions"], [420, "functions"], [425, "functions"], [427, "functions"], [431, "functions"], [433, "functions"], [435, "functions"], [437, "functions"], [438, "functions"], [439, "functions"], [441, "functions"], [442, "functions"], [443, "functions"], [444, "functions"], [446, "functions"], [448, "functions"], [449, "functions"], [455, "functions"], [457, "functions"], [458, "functions"], [459, "functions"], [462, "functions"], [463, "functions"], [465, "functions"], [466, "functions"]], "GPTQ": [[477, "gptq"]], "General Use Cases": [[483, "general-use-cases"]], "Get Start with FP8 Quantization": [[472, "get-start-with-fp8-quantization"]], "Get Started": [[476, "get-started"], [477, "get-started"], [479, "get-started"], [484, "get-started"], [492, "get-started"], [546, "get-started"]], "Get Started with Adaptor API": [[495, "get-started-with-adaptor-api"]], "Get Started with Benchmark API": [[520, "get-started-with-benchmark-api"]], "Get Started with DataLoader": [[523, "get-started-with-dataloader"]], "Get Started with Distillation for Quantization API": [[525, "get-started-with-distillation-for-quantization-api"]], "Get Started with Distributed Training and Inference API": [[526, "get-started-with-distributed-training-and-inference-api"]], "Get Started with Metric": [[537, "get-started-with-metric"]], "Get Started with Microscaling Quantization API": [[473, "get-started-with-microscaling-quantization-api"], [541, "get-started-with-microscaling-quantization-api"]], "Get Started with Mixed Precision API": [[539, "get-started-with-mixed-precision-api"]], "Get Started with Objective API": [[542, "get-started-with-objective-api"]], "Get Started with Orchestration API": [[543, "get-started-with-orchestration-api"]], "Get Started with Pruning API": [[544, "get-started-with-pruning-api"]], "Get Started with autotune API": [[474, "get-started-with-autotune-api"]], "Get started with Framework YAML Files": [[530, "get-started-with-framework-yaml-files"]], "Getting Started": [[494, "getting-started"], [531, "getting-started"]], "Getting Started with Dynamic Quantization": [[471, "getting-started-with-dynamic-quantization"]], "HAWQ_V2": [[554, "hawq-v2"]], "HQQ": [[477, "hqq"]], "Hardware and Software requests for BF16": [[474, "hardware-and-software-requests-for-bf16"], [539, "hardware-and-software-requests-for-bf16"]], "Hardware and Software requests for FP16": [[474, "hardware-and-software-requests-for-fp16"], [539, "hardware-and-software-requests-for-fp16"]], "Horovodrun Execution": [[526, "horovodrun-execution"]], "How it Works": [[482, "how-it-works"]], "How to Add An Adaptor": [[496, "how-to-add-an-adaptor"]], "How to Support New Data Type, Like Int4, with a Few Line Changes": [[497, "how-to-support-new-data-type-like-int4-with-a-few-line-changes"]], "INC Coding Conventions": [[522, "inc-coding-conventions"]], "INT8 Model Export": [[528, "int8-model-export"], [528, "id2"]], "Implement ONNXRTAdaptor Class": [[495, "implement-onnxrtadaptor-class"]], "Imports": [[522, "imports"]], "Incompatible Changes": [[550, "incompatible-changes"]], "Incompatible changes between v1.2 and v1.1": [[532, "incompatible-changes-between-v1-2-and-v1-1"]], "Infrastructure of Intel\u00ae Neural Compressor": [[533, "infrastructure-of-intel-neural-compressor"]], "Install Framework": [[494, "install-framework"], [534, "install-framework"]], "Install from AI Kit": [[534, "install-from-ai-kit"]], "Install from Binary": [[534, "install-from-binary"]], "Install from Source": [[534, "install-from-source"]], "Install from pypi": [[494, "install-from-pypi"]], "Install tensorflow": [[494, "install-tensorflow"], [534, "install-tensorflow"]], "Install torch for CPU": [[494, "install-torch-for-cpu"], [534, "install-torch-for-cpu"]], "Install torch for other platform": [[494, "install-torch-for-other-platform"], [534, "install-torch-for-other-platform"]], "Install torch/intel_extension_for_pytorch for Intel GPU": [[494, "install-torch-intel-extension-for-pytorch-for-intel-gpu"], [534, "install-torch-intel-extension-for-pytorch-for-intel-gpu"]], "Installation": [[494, "installation"], [534, "installation"], [534, "id1"]], "Intel\u00ae Neural Compressor": [[494, "intel-neural-compressor"]], "Intel\u00ae Neural Compressor Documentation": [[469, "intel-neural-compressor-documentation"], [556, "intel-neural-compressor-documentation"]], "Intel\u00ae Neural Compressor quantized ONNX models support multiple hardware vendors through ONNX Runtime:": [[534, "intel-neural-compressor-quantized-onnx-models-support-multiple-hardware-vendors-through-onnx-runtime"]], "Intel\u00ae Neural Compressor supports CPUs based on Intel 64 architecture or compatible processors:": [[534, "intel-neural-compressor-supports-cpus-based-on-intel-64-architecture-or-compatible-processors"]], "Intel\u00ae Neural Compressor supports GPUs built on Intel\u2019s Xe architecture:": [[534, "intel-neural-compressor-supports-gpus-built-on-intel-s-xe-architecture"]], "Intel\u00ae Neural Compressor supports HPUs based on heterogeneous architecture with two compute engines (MME and TPC):": [[534, "intel-neural-compressor-supports-hpus-based-on-heterogeneous-architecture-with-two-compute-engines-mme-and-tpc"]], "Introduction": [[471, "introduction"], [472, "introduction"], [473, "introduction"], [474, "introduction"], [475, "introduction"], [476, "introduction"], [477, "introduction"], [478, "introduction"], [479, "introduction"], [480, "introduction"], [481, "introduction"], [483, "introduction"], [484, "introduction"], [488, "introduction"], [489, "introduction"], [492, "introduction"], [495, "introduction"], [496, "introduction"], [497, "introduction"], [520, "introduction"], [521, "introduction"], [523, "introduction"], [525, "introduction"], [526, "introduction"], [528, "introduction"], [530, "introduction"], [533, "introduction"], [537, "introduction"], [539, "introduction"], [540, "introduction"], [541, "introduction"], [542, "introduction"], [543, "introduction"], [544, "introduction"], [547, "introduction"], [549, "introduction"], [551, "introduction"], [552, "introduction"], [553, "introduction"], [554, "introduction"]], "Invoke the Operator Kernel According to the Tuning Configuration": [[497, "invoke-the-operator-kernel-according-to-the-tuning-configuration"]], "Issue 1:": [[529, "issue-1"]], "Issue 2:": [[529, "issue-2"]], "Issue 3:": [[529, "issue-3"]], "Issue 4:": [[529, "issue-4"]], "Issue 5:": [[529, "issue-5"]], "Keras Models with keras 2.15.1": [[555, "keras-models-with-keras-2-15-1"]], "Known Issues": [[550, "known-issues"]], "LLMs Quantization Recipes": [[536, "llms-quantization-recipes"]], "Large Language Model Pruning": [[544, "large-language-model-pruning"]], "Large Language Models Accuracy": [[536, "large-language-models-accuracy"]], "Large Language Models Recipes": [[536, "large-language-models-recipes"]], "Layer Wise Quantization": [[477, "layer-wise-quantization"]], "Layer Wise Quantization (LWQ)": [[547, "layer-wise-quantization-lwq"]], "Legal Information": [[535, "legal-information"]], "License": [[535, "license"]], "Load API": [[478, "load-api"]], "Logger": [[522, "logger"]], "MSE": [[554, "mse"]], "MSE_V2": [[554, "mse-v2"]], "MXNet": [[537, "mxnet"], [553, "mxnet"]], "Matmul quantization example": [[488, "matmul-quantization-example"], [552, "matmul-quantization-example"]], "Metrics": [[537, "metrics"]], "Microscaling Quantization": [[473, "microscaling-quantization"], [541, "microscaling-quantization"]], "Mix Precision": [[508, "mix-precision"], [538, "mix-precision"]], "Mixed Precision": [[481, "mixed-precision"], [539, "mixed-precision"]], "Mixed Precision Support Matrix": [[474, "mixed-precision-support-matrix"], [539, "mixed-precision-support-matrix"]], "Model": [[509, "model"], [540, "model"]], "Model Examples": [[476, "model-examples"]], "Model Examples with PT2E": [[476, "model-examples-with-pt2e"]], "Model Quantization": [[538, "model-quantization"]], "Module Contents": [[1, "module-contents"], [2, "module-contents"], [3, "module-contents"], [5, "module-contents"], [6, "module-contents"], [7, "module-contents"], [8, "module-contents"], [9, "module-contents"], [10, "module-contents"], [11, "module-contents"], [12, "module-contents"], [13, "module-contents"], [14, "module-contents"], [15, "module-contents"], [17, "module-contents"], [18, "module-contents"], [19, "module-contents"], [20, "module-contents"], [21, "module-contents"], [22, "module-contents"], [23, "module-contents"], [24, "module-contents"], [25, "module-contents"], [26, "module-contents"], [27, "module-contents"], [28, "module-contents"], [29, "module-contents"], [30, "module-contents"], [31, "module-contents"], [32, "module-contents"], [33, "module-contents"], [34, "module-contents"], [35, "module-contents"], [36, "module-contents"], [38, "module-contents"], [39, "module-contents"], [40, "module-contents"], [41, "module-contents"], [42, "module-contents"], [43, "module-contents"], [44, "module-contents"], [45, "module-contents"], [46, "module-contents"], [47, "module-contents"], [48, "module-contents"], [49, "module-contents"], [50, "module-contents"], [51, "module-contents"], [52, "module-contents"], [53, "module-contents"], [54, "module-contents"], [55, "module-contents"], [56, "module-contents"], [57, "module-contents"], [58, "module-contents"], [59, "module-contents"], [60, "module-contents"], [62, "module-contents"], [63, "module-contents"], [64, "module-contents"], [65, "module-contents"], [66, "module-contents"], [67, "module-contents"], [68, "module-contents"], [69, "module-contents"], [70, "module-contents"], [71, "module-contents"], [73, "module-contents"], [74, "module-contents"], [75, "module-contents"], [76, "module-contents"], [77, "module-contents"], [78, "module-contents"], [79, "module-contents"], [81, "module-contents"], [82, "module-contents"], [83, "module-contents"], [84, "module-contents"], [85, "module-contents"], [87, "module-contents"], [88, "module-contents"], [89, "module-contents"], [90, "module-contents"], [92, "module-contents"], [93, "module-contents"], [94, "module-contents"], [95, "module-contents"], [98, "module-contents"], [100, "module-contents"], [101, "module-contents"], [103, "module-contents"], [104, "module-contents"], [105, "module-contents"], [106, "module-contents"], [107, "module-contents"], [108, "module-contents"], [109, "module-contents"], [110, "module-contents"], [111, "module-contents"], [112, "module-contents"], [113, "module-contents"], [114, "module-contents"], [116, "module-contents"], [117, "module-contents"], [118, "module-contents"], [119, "module-contents"], [120, "module-contents"], [121, "module-contents"], [122, "module-contents"], [123, "module-contents"], [124, "module-contents"], [125, "module-contents"], [126, "module-contents"], [127, "module-contents"], [128, "module-contents"], [129, "module-contents"], [131, "module-contents"], [132, "module-contents"], [133, "module-contents"], [134, "module-contents"], [135, "module-contents"], [138, "module-contents"], [139, "module-contents"], [140, "module-contents"], [141, "module-contents"], [142, "module-contents"], [143, "module-contents"], [144, "module-contents"], [145, "module-contents"], [146, "module-contents"], [147, "module-contents"], [149, "module-contents"], [150, "module-contents"], [151, "module-contents"], [152, "module-contents"], [153, "module-contents"], [154, "module-contents"], [156, "module-contents"], [157, "module-contents"], [159, "module-contents"], [160, "module-contents"], [161, "module-contents"], [162, "module-contents"], [163, "module-contents"], [165, "module-contents"], [166, "module-contents"], [169, "module-contents"], [171, "module-contents"], [173, "module-contents"], [174, "module-contents"], [175, "module-contents"], [177, "module-contents"], [178, "module-contents"], [179, "module-contents"], [180, "module-contents"], [181, "module-contents"], [182, "module-contents"], [184, "module-contents"], [185, "module-contents"], [186, "module-contents"], [187, "module-contents"], [188, "module-contents"], [189, "module-contents"], [190, "module-contents"], [191, "module-contents"], [192, "module-contents"], [194, "module-contents"], [195, "module-contents"], [198, "module-contents"], [199, "module-contents"], [200, "module-contents"], [201, "module-contents"], [202, "module-contents"], [203, "module-contents"], [204, "module-contents"], [205, "module-contents"], [206, "module-contents"], [207, "module-contents"], [208, "module-contents"], [209, "module-contents"], [210, "module-contents"], [211, "module-contents"], [212, "module-contents"], [213, "module-contents"], [214, "module-contents"], [216, "module-contents"], [217, "module-contents"], [218, "module-contents"], [221, "module-contents"], [223, "module-contents"], [224, "module-contents"], [225, "module-contents"], [227, "module-contents"], [228, "module-contents"], [230, "module-contents"], [231, "module-contents"], [232, "module-contents"], [234, "module-contents"], [235, "module-contents"], [236, "module-contents"], [238, "module-contents"], [239, "module-contents"], [240, "module-contents"], [241, "module-contents"], [242, "module-contents"], [243, "module-contents"], [244, "module-contents"], [245, "module-contents"], [247, "module-contents"], [248, "module-contents"], [249, "module-contents"], [250, "module-contents"], [251, "module-contents"], [252, "module-contents"], [253, "module-contents"], [254, "module-contents"], [255, "module-contents"], [256, "module-contents"], [257, "module-contents"], [258, "module-contents"], [259, "module-contents"], [260, "module-contents"], [261, "module-contents"], [262, "module-contents"], [263, "module-contents"], [264, "module-contents"], [265, "module-contents"], [266, "module-contents"], [267, "module-contents"], [268, "module-contents"], [269, "module-contents"], [271, "module-contents"], [272, "module-contents"], [273, "module-contents"], [274, "module-contents"], [277, "module-contents"], [278, "module-contents"], [279, "module-contents"], [280, "module-contents"], [281, "module-contents"], [283, "module-contents"], [284, "module-contents"], [286, "module-contents"], [288, "module-contents"], [289, "module-contents"], [292, "module-contents"], [293, "module-contents"], [294, "module-contents"], [297, "module-contents"], [298, "module-contents"], [299, "module-contents"], [301, "module-contents"], [302, "module-contents"], [303, "module-contents"], [305, "module-contents"], [306, "module-contents"], [307, "module-contents"], [308, "module-contents"], [310, "module-contents"], [311, "module-contents"], [312, "module-contents"], [313, "module-contents"], [314, "module-contents"], [315, "module-contents"], [316, "module-contents"], [317, "module-contents"], [318, "module-contents"], [319, "module-contents"], [320, "module-contents"], [321, "module-contents"], [322, "module-contents"], [323, "module-contents"], [324, "module-contents"], [325, "module-contents"], [326, "module-contents"], [327, "module-contents"], [328, "module-contents"], [329, "module-contents"], [330, "module-contents"], [331, "module-contents"], [332, "module-contents"], [334, "module-contents"], [335, "module-contents"], [336, "module-contents"], [337, "module-contents"], [338, "module-contents"], [339, "module-contents"], [340, "module-contents"], [341, "module-contents"], [342, "module-contents"], [343, "module-contents"], [345, "module-contents"], [346, "module-contents"], [347, "module-contents"], [348, "module-contents"], [349, "module-contents"], [350, "module-contents"], [352, "module-contents"], [353, "module-contents"], [354, "module-contents"], [355, "module-contents"], [357, "module-contents"], [358, "module-contents"], [359, "module-contents"], [360, "module-contents"], [363, "module-contents"], [364, "module-contents"], [365, "module-contents"], [366, "module-contents"], [367, "module-contents"], [368, "module-contents"], [369, "module-contents"], [371, "module-contents"], [372, "module-contents"], [373, "module-contents"], [374, "module-contents"], [375, "module-contents"], [376, "module-contents"], [377, "module-contents"], [378, "module-contents"], [379, "module-contents"], [380, "module-contents"], [381, "module-contents"], [383, "module-contents"], [384, "module-contents"], [385, "module-contents"], [387, "module-contents"], [389, "module-contents"], [390, "module-contents"], [391, "module-contents"], [392, "module-contents"], [396, "module-contents"], [397, "module-contents"], [398, "module-contents"], [399, "module-contents"], [401, "module-contents"], [403, "module-contents"], [404, "module-contents"], [405, "module-contents"], [406, "module-contents"], [408, "module-contents"], [409, "module-contents"], [411, "module-contents"], [412, "module-contents"], [413, "module-contents"], [415, "module-contents"], [416, "module-contents"], [417, "module-contents"], [418, "module-contents"], [419, "module-contents"], [420, "module-contents"], [421, "module-contents"], [422, "module-contents"], [423, "module-contents"], [425, "module-contents"], [426, "module-contents"], [427, "module-contents"], [429, "module-contents"], [430, "module-contents"], [431, "module-contents"], [432, "module-contents"], [433, "module-contents"], [435, "module-contents"], [437, "module-contents"], [438, "module-contents"], [439, "module-contents"], [441, "module-contents"], [442, "module-contents"], [443, "module-contents"], [444, "module-contents"], [445, "module-contents"], [446, "module-contents"], [448, "module-contents"], [449, "module-contents"], [452, "module-contents"], [453, "module-contents"], [455, "module-contents"], [457, "module-contents"], [458, "module-contents"], [459, "module-contents"], [461, "module-contents"], [462, "module-contents"], [463, "module-contents"], [464, "module-contents"], [465, "module-contents"], [466, "module-contents"], [467, "module-contents"]], "Multiple Objectives": [[542, "multiple-objectives"]], "Neural Compressor Configuration": [[551, "neural-compressor-configuration"]], "Neural Network Pruning": [[544, "neural-network-pruning"]], "Note": [[492, "note"]], "ONNX Models with ONNX Runtime 1.18.1": [[555, "onnx-models-with-onnx-runtime-1-18-1"]], "ONNX Runtime": [[499, "onnx-runtime"]], "ONNX Runtime framework example": [[547, "onnx-runtime-framework-example"]], "ONNXRT": [[537, "onnxrt"], [553, "onnxrt"]], "Objective": [[510, "objective"], [542, "objective"]], "Objective Support Matrix": [[542, "objective-support-matrix"]], "One-shot": [[543, "one-shot"]], "Optimization Orchestration": [[543, "optimization-orchestration"]], "Option 1: Pure Yaml Configuration": [[526, "option-1-pure-yaml-configuration"]], "Option 2: User Defined Training Function": [[526, "option-2-user-defined-training-function"]], "Orchestration": [[538, "orchestration"]], "Orchestration Support Matrix": [[543, "orchestration-support-matrix"]], "Our Pledge": [[490, "our-pledge"]], "Our Responsibilities": [[490, "our-responsibilities"]], "Our Standards": [[490, "our-standards"]], "Our enhancement:": [[552, "our-enhancement"]], "Overview": [[470, "overview"], [482, "overview"]], "Package Contents": [[170, "package-contents"], [176, "package-contents"], [183, "package-contents"]], "Per-channel example": [[488, "per-channel-example"], [552, "per-channel-example"]], "Per-channel limitation": [[488, "per-channel-limitation"], [552, "per-channel-limitation"]], "Per-tensor & Per-channel": [[488, "per-tensor-per-channel"], [552, "per-tensor-per-channel"]], "Per-tensor example": [[488, "per-tensor-example"], [552, "per-tensor-example"]], "Performance": [[551, "performance"]], "Performance Comparison of Different Strategies": [[551, "performance-comparison-of-different-strategies"]], "Post Training Dynamic Quantization": [[492, "post-training-dynamic-quantization"], [546, "post-training-dynamic-quantization"]], "Post Training Quantization": [[546, "post-training-quantization"]], "Post Training Static Quantization": [[481, "post-training-static-quantization"], [492, "post-training-static-quantization"], [546, "post-training-static-quantization"]], "Post-training Quantization": [[538, "post-training-quantization"]], "Preparation": [[551, "preparation"]], "Prepare Dependency Packages": [[489, "prepare-dependency-packages"]], "Prepare calibration model from fp32 graph": [[496, "prepare-calibration-model-from-fp32-graph"]], "Prerequisites": [[534, "prerequisites"]], "Pruning": [[538, "pruning"], [544, "pruning"]], "Pruning Criteria": [[544, "pruning-criteria"]], "Pruning Patterns": [[544, "pruning-patterns"]], "Pruning Schedules": [[544, "pruning-schedules"]], "Pruning Scope": [[544, "pruning-scope"]], "Pruning Support Matrix": [[544, "pruning-support-matrix"]], "Pruning Types": [[544, "pruning-types"]], "Pruning with Hyperparameter Optimization": [[544, "pruning-with-hyperparameter-optimization"]], "Public and Internal Interfaces": [[522, "public-and-internal-interfaces"]], "Pull Request Acceptance Criteria": [[491, "pull-request-acceptance-criteria"]], "Pull Request Checklist": [[491, "pull-request-checklist"]], "Pull Request Status Checks Overview": [[491, "pull-request-status-checks-overview"]], "Pull Request Template": [[491, "pull-request-template"]], "PyTorch": [[537, "pytorch"], [548, "pytorch"]], "PyTorch Examples:": [[526, "pytorch-examples"]], "PyTorch Mixed Precision": [[474, "pytorch-mixed-precision"]], "PyTorch Model": [[528, "pytorch-model"]], "PyTorch Models with Torch 2.3.0+cpu in IPEX Mode": [[555, "pytorch-models-with-torch-2-3-0-cpu-in-ipex-mode"]], "PyTorch Models with Torch 2.3.0+cpu in PTQ Mode": [[555, "pytorch-models-with-torch-2-3-0-cpu-in-ptq-mode"]], "PyTorch Models with Torch 2.3.0+cpu in QAT Mode": [[555, "pytorch-models-with-torch-2-3-0-cpu-in-qat-mode"]], "PyTorch Smooth Quantization": [[475, "pytorch-smooth-quantization"]], "PyTorch Static Quantization": [[476, "pytorch-static-quantization"]], "PyTorch Weight Only Quantization": [[477, "pytorch-weight-only-quantization"]], "PyTorch framework example": [[547, "pytorch-framework-example"]], "Python-based APIs": [[470, "python-based-apis"]], "Pytorch": [[553, "pytorch"]], "Pytorch Quantization AutoTune": [[516, "pytorch-quantization-autotune"]], "Pytorch Quantization Base API": [[517, "pytorch-quantization-base-api"]], "Pytorch Quantization Config": [[518, "pytorch-quantization-config"]], "Quantization": [[488, "quantization"], [511, "quantization"], [546, "quantization"]], "Quantization APIs": [[478, "quantization-apis"]], "Quantization Approaches": [[481, "quantization-approaches"], [546, "quantization-approaches"]], "Quantization Aware Training": [[488, "quantization-aware-training"], [538, "quantization-aware-training"], [546, "quantization-aware-training"], [546, "id1"]], "Quantization Capability": [[549, "quantization-capability"]], "Quantization Fundamentals": [[488, "quantization-fundamentals"], [546, "quantization-fundamentals"], [552, "quantization-fundamentals"]], "Quantization Introduction": [[546, "quantization-introduction"]], "Quantization Scheme": [[481, "quantization-scheme"]], "Quantization Scheme in IPEX": [[488, "quantization-scheme-in-ipex"], [546, "quantization-scheme-in-ipex"]], "Quantization Scheme in MXNet": [[546, "quantization-scheme-in-mxnet"]], "Quantization Scheme in ONNX Runtime": [[546, "quantization-scheme-in-onnx-runtime"]], "Quantization Scheme in PyTorch": [[488, "quantization-scheme-in-pytorch"], [546, "quantization-scheme-in-pytorch"]], "Quantization Scheme in TensorFlow": [[488, "quantization-scheme-in-tensorflow"], [546, "quantization-scheme-in-tensorflow"]], "Quantization Support Matrix": [[546, "quantization-support-matrix"]], "Quantization on Client": [[484, "quantization-on-client"]], "Quantization-Aware Training": [[492, "quantization-aware-training"]], "Query API": [[495, "query-api"]], "Query API Introduction": [[495, "query-api-introduction"]], "Quick Samples": [[531, "quick-samples"]], "RTN": [[477, "rtn"]], "Random": [[554, "random"]], "Recommend VS Code settings.json": [[522, "recommend-vs-code-settings-json"]], "Reference": [[473, "reference"], [477, "reference"], [488, "reference"], [521, "reference"], [522, "reference"], [541, "reference"], [544, "reference"], [546, "reference"], [549, "reference"], [552, "reference"]], "Regularization": [[544, "regularization"]], "Release": [[550, "release"]], "Release Data": [[527, "release-data"]], "Release Notes": [[550, "release-notes"]], "Report a Vulnerability": [[493, "report-a-vulnerability"]], "Retrain-free Pruning API": [[544, "retrain-free-pruning-api"]], "Rules": [[522, "rules"]], "Run sampling iterations of the fp32 graph to calibrate quantizable operators.": [[496, "run-sampling-iterations-of-the-fp32-graph-to-calibrate-quantizable-operators"]], "Saving and Loading": [[477, "saving-and-loading"]], "Scope": [[490, "scope"]], "Sections": [[469, "sections"], [556, "sections"]], "Security": [[526, "security"]], "Security Policy": [[493, "security-policy"]], "Selected Publications/Events": [[494, "selected-publications-events"]], "SigOpt": [[554, "sigopt"]], "SigOpt Platform": [[551, "sigopt-platform"]], "SigOpt Strategy": [[551, "sigopt-strategy"]], "Single Objective": [[542, "single-objective"]], "Smooth Quant": [[480, "smooth-quant"], [552, "smooth-quant"]], "Smooth Quantization": [[481, "smooth-quantization"], [488, "smooth-quantization"]], "SmoothQuant": [[552, "smoothquant"]], "SmoothQuant and Our Enhancement": [[552, "smoothquant-and-our-enhancement"]], "Sparse Model Deployment": [[544, "sparse-model-deployment"]], "Sparsity Decay Types": [[544, "sparsity-decay-types"]], "Specify Quantization Backend and Device": [[546, "specify-quantization-backend-and-device"]], "Specify Quantization Recipes": [[546, "specify-quantization-recipes"]], "Specify Quantization Rules": [[475, "specify-quantization-rules"], [476, "specify-quantization-rules"], [477, "specify-quantization-rules"], [479, "specify-quantization-rules"], [546, "specify-quantization-rules"]], "Static Quantization": [[488, "static-quantization"]], "Static Quantization & Quantization Aware Training": [[492, "static-quantization-quantization-aware-training"]], "Static Quantization with IPEX Backend": [[476, "static-quantization-with-ipex-backend"]], "Static Quantization with PT2E Backend": [[476, "static-quantization-with-pt2e-backend"]], "Step-by-Step guidelines": [[491, "step-by-step-guidelines"]], "Strategy": [[512, "strategy"]], "Strategy Design": [[554, "strategy-design"]], "Strings": [[522, "strings"]], "Submodules": [[0, "submodules"], [4, "submodules"], [16, "submodules"], [37, "submodules"], [61, "submodules"], [72, "submodules"], [80, "submodules"], [86, "submodules"], [91, "submodules"], [96, "submodules"], [97, "submodules"], [99, "submodules"], [102, "submodules"], [115, "submodules"], [130, "submodules"], [136, "submodules"], [137, "submodules"], [148, "submodules"], [155, "submodules"], [158, "submodules"], [164, "submodules"], [167, "submodules"], [170, "submodules"], [172, "submodules"], [176, "submodules"], [183, "submodules"], [193, "submodules"], [197, "submodules"], [215, "submodules"], [219, "submodules"], [222, "submodules"], [226, "submodules"], [233, "submodules"], [237, "submodules"], [270, "submodules"], [276, "submodules"], [285, "submodules"], [287, "submodules"], [295, "submodules"], [300, "submodules"], [304, "submodules"], [309, "submodules"], [333, "submodules"], [344, "submodules"], [351, "submodules"], [356, "submodules"], [361, "submodules"], [362, "submodules"], [370, "submodules"], [382, "submodules"], [388, "submodules"], [394, "submodules"], [395, "submodules"], [400, "submodules"], [402, "submodules"], [407, "submodules"], [410, "submodules"], [414, "submodules"], [424, "submodules"], [428, "submodules"], [434, "submodules"], [440, "submodules"], [447, "submodules"], [451, "submodules"], [456, "submodules"], [460, "submodules"]], "Subpackages": [[4, "subpackages"], [72, "subpackages"], [96, "subpackages"], [97, "subpackages"], [99, "subpackages"], [136, "subpackages"], [155, "subpackages"], [170, "subpackages"], [196, "subpackages"], [220, "subpackages"], [226, "subpackages"], [270, "subpackages"], [282, "subpackages"], [290, "subpackages"], [291, "subpackages"], [304, "subpackages"], [344, "subpackages"], [361, "subpackages"], [362, "subpackages"], [394, "subpackages"], [428, "subpackages"], [436, "subpackages"], [460, "subpackages"]], "Summary": [[497, "summary"]], "Support": [[491, "support"]], "Support Matrix": [[481, "support-matrix"], [521, "support-matrix"]], "Supported Algorithms": [[489, "supported-algorithms"]], "Supported Built-in Metric Matrix": [[537, "supported-built-in-metric-matrix"]], "Supported Feature Matrix": [[526, "supported-feature-matrix"], [530, "supported-feature-matrix"], [533, "supported-feature-matrix"], [546, "supported-feature-matrix"]], "Supported Framework Dataloader Matrix": [[523, "supported-framework-dataloader-matrix"]], "Supported Framework Matrix": [[475, "supported-framework-matrix"], [552, "supported-framework-matrix"]], "Supported Framework Model Matrix": [[528, "supported-framework-model-matrix"], [540, "supported-framework-model-matrix"], [541, "supported-framework-model-matrix"], [547, "supported-framework-model-matrix"], [549, "supported-framework-model-matrix"]], "Supported Matrix": [[477, "supported-matrix"], [478, "supported-matrix"], [483, "supported-matrix"]], "Supported Parameters": [[472, "supported-parameters"]], "Supported quantized ops": [[528, "supported-quantized-ops"]], "Symmetric & Asymmetric": [[488, "symmetric-asymmetric"]], "System Requirements": [[534, "system-requirements"]], "TEQ": [[477, "teq"]], "TODO Comments": [[522, "todo-comments"]], "TPE": [[554, "tpe"]], "TensorFlow": [[481, "tensorflow"], [537, "tensorflow"], [553, "tensorflow"]], "TensorFlow Examples:": [[526, "tensorflow-examples"]], "TensorFlow Models with TensorFlow 2.16.1": [[555, "tensorflow-models-with-tensorflow-2-16-1"]], "TensorFlow Quantization": [[479, "tensorflow-quantization"]], "Tensorflow": [[548, "tensorflow"]], "Tensorflow Model": [[528, "tensorflow-model"]], "Tensorflow Quantization AutoTune": [[513, "tensorflow-quantization-autotune"]], "Tensorflow Quantization Base API": [[514, "tensorflow-quantization-base-api"]], "Tensorflow Quantization Config": [[515, "tensorflow-quantization-config"]], "Torch": [[478, "torch"]], "Torch Utils": [[500, "torch-utils"]], "Torch-like APIs": [[478, "torch-like-apis"]], "Trademarks": [[535, "trademarks"]], "Training": [[519, "training"]], "Training-aware pruning API": [[544, "training-aware-pruning-api"]], "Transform": [[553, "transform"]], "Transform Support List": [[553, "transform-support-list"]], "Transformers-like API": [[489, "transformers-like-api"]], "Tuning Algorithms": [[554, "tuning-algorithms"]], "Tuning Process": [[554, "tuning-process"]], "Tuning Space": [[554, "tuning-space"]], "Tuning Strategies": [[554, "tuning-strategies"]], "Turn OFF Auto Mixed Precision during Quantization": [[548, "turn-off-auto-mixed-precision-during-quantization"]], "Type Annotations": [[522, "type-annotations"]], "Usage": [[475, "usage"], [477, "usage"], [480, "usage"], [483, "usage"], [552, "usage"], [554, "usage"], [554, "id2"], [554, "id4"], [554, "id6"], [554, "id8"], [554, "id10"], [554, "id12"], [554, "id14"], [554, "id16"], [554, "id18"], [554, "id20"], [554, "id22"]], "Usage For CPU": [[489, "usage-for-cpu"]], "Usage For Intel GPU": [[489, "usage-for-intel-gpu"]], "Usage Sample with IPEX": [[476, "usage-sample-with-ipex"]], "Usage Sample with PT2E": [[476, "usage-sample-with-pt2e"]], "Usage examples for CPU device": [[489, "usage-examples-for-cpu-device"]], "Use Docker Image with torch installed for HPU": [[494, "use-docker-image-with-torch-installed-for-hpu"], [534, "use-docker-image-with-torch-installed-for-hpu"]], "Use Intel\u00ae Neural Compressor DataLoader API": [[523, "use-intel-neural-compressor-dataloader-api"]], "Use Intel\u00ae Neural Compressor Metric API": [[537, "use-intel-neural-compressor-metric-api"]], "Use the New Data Type": [[497, "use-the-new-data-type"]], "User Code Example": [[549, "user-code-example"]], "User code example": [[549, "id1"]], "User-facing APIs": [[532, "user-facing-apis"]], "Using a Fixed alpha": [[480, "using-a-fixed-alpha"]], "Using a fixed alpha": [[552, "using-a-fixed-alpha"]], "Validated Hardware Environment": [[534, "validated-hardware-environment"]], "Validated Knowledge Distillation Examples": [[555, "validated-knowledge-distillation-examples"]], "Validated Models": [[475, "validated-models"], [552, "validated-models"], [555, "validated-models"]], "Validated ONNX QDQ INT8 Models on Multiple Hardware through ONNX Runtime": [[555, "validated-onnx-qdq-int8-models-on-multiple-hardware-through-onnx-runtime"]], "Validated Pruning Examples": [[555, "validated-pruning-examples"]], "Validated Quantization Examples": [[555, "validated-quantization-examples"]], "Validated Software Environment": [[534, "validated-software-environment"]], "Version mapping between Intel Neural Compressor to Gaudi Software Stack": [[486, "version-mapping-between-intel-neural-compressor-to-gaudi-software-stack"]], "WOQ Algorithms Tuning": [[549, "woq-algorithms-tuning"]], "Weight Only Quantization": [[488, "weight-only-quantization"]], "Weight Only Quantization (WOQ)": [[549, "weight-only-quantization-woq"]], "Weight-Only Large Language Model Loading (LLMs)": [[494, "weight-only-large-language-model-loading-llms"]], "What\u2019s New": [[494, "what-s-new"]], "With Accuracy Aware Tuning": [[479, "with-accuracy-aware-tuning"]], "Without Accuracy Aware Tuning": [[479, "without-accuracy-aware-tuning"]], "Workflow": [[524, "workflow"]], "Workflows": [[485, "workflows"]], "Working Flow": [[495, "working-flow"], [546, "working-flow"]], "Working with Autotune": [[482, "working-with-autotune"]], "Working with PyTorch Model": [[482, "working-with-pytorch-model"]], "Working with Tensorflow Model": [[482, "working-with-tensorflow-model"]], "neural_compressor": [[226, "module-neural_compressor"]], "neural_compressor.adaptor.mxnet_utils": [[0, "module-neural_compressor.adaptor.mxnet_utils"]], "neural_compressor.adaptor.mxnet_utils.util": [[1, "module-neural_compressor.adaptor.mxnet_utils.util"]], "neural_compressor.adaptor.ox_utils": [[4, "module-neural_compressor.adaptor.ox_utils"]], "neural_compressor.adaptor.ox_utils.calibration": [[2, "module-neural_compressor.adaptor.ox_utils.calibration"]], "neural_compressor.adaptor.ox_utils.calibrator": [[3, "module-neural_compressor.adaptor.ox_utils.calibrator"]], "neural_compressor.adaptor.ox_utils.operators": [[16, "module-neural_compressor.adaptor.ox_utils.operators"]], "neural_compressor.adaptor.ox_utils.operators.activation": [[5, "module-neural_compressor.adaptor.ox_utils.operators.activation"]], "neural_compressor.adaptor.ox_utils.operators.argmax": [[6, "module-neural_compressor.adaptor.ox_utils.operators.argmax"]], "neural_compressor.adaptor.ox_utils.operators.attention": [[7, "module-neural_compressor.adaptor.ox_utils.operators.attention"]], "neural_compressor.adaptor.ox_utils.operators.binary_op": [[8, "module-neural_compressor.adaptor.ox_utils.operators.binary_op"]], "neural_compressor.adaptor.ox_utils.operators.concat": [[9, "module-neural_compressor.adaptor.ox_utils.operators.concat"]], "neural_compressor.adaptor.ox_utils.operators.conv": [[10, "module-neural_compressor.adaptor.ox_utils.operators.conv"]], "neural_compressor.adaptor.ox_utils.operators.direct_q8": [[11, "module-neural_compressor.adaptor.ox_utils.operators.direct_q8"]], "neural_compressor.adaptor.ox_utils.operators.embed_layernorm": [[12, "module-neural_compressor.adaptor.ox_utils.operators.embed_layernorm"]], "neural_compressor.adaptor.ox_utils.operators.gather": [[13, "module-neural_compressor.adaptor.ox_utils.operators.gather"]], "neural_compressor.adaptor.ox_utils.operators.gavgpool": [[14, "module-neural_compressor.adaptor.ox_utils.operators.gavgpool"]], "neural_compressor.adaptor.ox_utils.operators.gemm": [[15, "module-neural_compressor.adaptor.ox_utils.operators.gemm"]], "neural_compressor.adaptor.ox_utils.operators.lstm": [[17, "module-neural_compressor.adaptor.ox_utils.operators.lstm"]], "neural_compressor.adaptor.ox_utils.operators.matmul": [[18, "module-neural_compressor.adaptor.ox_utils.operators.matmul"]], "neural_compressor.adaptor.ox_utils.operators.maxpool": [[19, "module-neural_compressor.adaptor.ox_utils.operators.maxpool"]], "neural_compressor.adaptor.ox_utils.operators.norm": [[20, "module-neural_compressor.adaptor.ox_utils.operators.norm"]], "neural_compressor.adaptor.ox_utils.operators.ops": [[21, "module-neural_compressor.adaptor.ox_utils.operators.ops"]], "neural_compressor.adaptor.ox_utils.operators.pad": [[22, "module-neural_compressor.adaptor.ox_utils.operators.pad"]], "neural_compressor.adaptor.ox_utils.operators.pooling": [[23, "module-neural_compressor.adaptor.ox_utils.operators.pooling"]], "neural_compressor.adaptor.ox_utils.operators.reduce": [[24, "module-neural_compressor.adaptor.ox_utils.operators.reduce"]], "neural_compressor.adaptor.ox_utils.operators.resize": [[25, "module-neural_compressor.adaptor.ox_utils.operators.resize"]], "neural_compressor.adaptor.ox_utils.operators.split": [[26, "module-neural_compressor.adaptor.ox_utils.operators.split"]], "neural_compressor.adaptor.ox_utils.operators.unary_op": [[27, "module-neural_compressor.adaptor.ox_utils.operators.unary_op"]], "neural_compressor.adaptor.ox_utils.quantizer": [[28, "module-neural_compressor.adaptor.ox_utils.quantizer"]], "neural_compressor.adaptor.ox_utils.smooth_quant": [[29, "module-neural_compressor.adaptor.ox_utils.smooth_quant"]], "neural_compressor.adaptor.ox_utils.util": [[30, "module-neural_compressor.adaptor.ox_utils.util"]], "neural_compressor.adaptor.ox_utils.weight_only": [[31, "module-neural_compressor.adaptor.ox_utils.weight_only"]], "neural_compressor.adaptor.tensorflow": [[32, "module-neural_compressor.adaptor.tensorflow"]], "neural_compressor.adaptor.tf_utils": [[96, "module-neural_compressor.adaptor.tf_utils"]], "neural_compressor.adaptor.tf_utils.graph_converter": [[33, "module-neural_compressor.adaptor.tf_utils.graph_converter"]], "neural_compressor.adaptor.tf_utils.graph_converter_without_calib": [[34, "module-neural_compressor.adaptor.tf_utils.graph_converter_without_calib"]], "neural_compressor.adaptor.tf_utils.graph_rewriter": [[72, "module-neural_compressor.adaptor.tf_utils.graph_rewriter"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16": [[37, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert": [[35, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer": [[36, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic": [[61, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd": [[38, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout": [[39, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu": [[40, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random": [[41, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const": [[42, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction": [[43, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd": [[44, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer": [[45, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape": [[46, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm": [[47, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant": [[48, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add": [[49, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul": [[50, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math": [[51, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn": [[52, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in": [[53, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu": [[54, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm": [[55, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv": [[56, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv": [[57, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose": [[58, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer": [[59, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass": [[60, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node": [[62, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu": [[63, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize": [[64, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes": [[65, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm": [[66, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input": [[67, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes": [[68, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes": [[69, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer": [[70, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base": [[71, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8": [[80, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant": [[73, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value": [[74, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib": [[75, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize": [[76, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize": [[77, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize": [[78, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize": [[79, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer": [[81, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter": [[82, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse": [[83, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert": [[84, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation": [[85, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx": [[86, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph": [[87, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node": [[88, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema": [[89, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils": [[90, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq": [[91, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern": [[92, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq": [[93, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern": [[94, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern"]], "neural_compressor.adaptor.tf_utils.graph_util": [[95, "module-neural_compressor.adaptor.tf_utils.graph_util"]], "neural_compressor.adaptor.tf_utils.quantize_graph": [[97, "module-neural_compressor.adaptor.tf_utils.quantize_graph"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat": [[99, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize": [[98, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config": [[100, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper": [[101, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers": [[102, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer": [[103, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add": [[104, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base": [[105, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn": [[106, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper": [[107, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq": [[115, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn": [[108, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2": [[109, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv": [[110, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv": [[111, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in": [[112, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul": [[113, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling": [[114, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq": [[116, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base": [[117, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn": [[118, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2": [[119, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv": [[120, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu": [[121, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul": [[122, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling": [[123, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling"]], "neural_compressor.adaptor.tf_utils.quantize_graph_common": [[124, "module-neural_compressor.adaptor.tf_utils.quantize_graph_common"]], "neural_compressor.adaptor.tf_utils.smooth_quant_calibration": [[125, "module-neural_compressor.adaptor.tf_utils.smooth_quant_calibration"]], "neural_compressor.adaptor.tf_utils.smooth_quant_scaler": [[126, "module-neural_compressor.adaptor.tf_utils.smooth_quant_scaler"]], "neural_compressor.adaptor.tf_utils.tf2onnx_converter": [[127, "module-neural_compressor.adaptor.tf_utils.tf2onnx_converter"]], "neural_compressor.adaptor.tf_utils.transform_graph": [[130, "module-neural_compressor.adaptor.tf_utils.transform_graph"]], "neural_compressor.adaptor.tf_utils.transform_graph.bias_correction": [[128, "module-neural_compressor.adaptor.tf_utils.transform_graph.bias_correction"]], "neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base": [[129, "module-neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base"]], "neural_compressor.adaptor.tf_utils.transform_graph.insert_logging": [[131, "module-neural_compressor.adaptor.tf_utils.transform_graph.insert_logging"]], "neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat": [[132, "module-neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat"]], "neural_compressor.adaptor.tf_utils.util": [[133, "module-neural_compressor.adaptor.tf_utils.util"]], "neural_compressor.adaptor.torch_utils": [[136, "module-neural_compressor.adaptor.torch_utils"]], "neural_compressor.adaptor.torch_utils.bf16_convert": [[134, "module-neural_compressor.adaptor.torch_utils.bf16_convert"]], "neural_compressor.adaptor.torch_utils.hawq_metric": [[135, "module-neural_compressor.adaptor.torch_utils.hawq_metric"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant": [[137, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle": [[138, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize": [[139, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load": [[140, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils": [[141, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.utils"]], "neural_compressor.adaptor.torch_utils.model_wrapper": [[142, "module-neural_compressor.adaptor.torch_utils.model_wrapper"]], "neural_compressor.adaptor.torch_utils.pattern_detector": [[143, "module-neural_compressor.adaptor.torch_utils.pattern_detector"]], "neural_compressor.adaptor.torch_utils.symbolic_trace": [[144, "module-neural_compressor.adaptor.torch_utils.symbolic_trace"]], "neural_compressor.adaptor.torch_utils.util": [[145, "module-neural_compressor.adaptor.torch_utils.util"]], "neural_compressor.algorithm": [[148, "module-neural_compressor.algorithm"]], "neural_compressor.algorithm.algorithm": [[146, "module-neural_compressor.algorithm.algorithm"]], "neural_compressor.algorithm.fast_bias_correction": [[147, "module-neural_compressor.algorithm.fast_bias_correction"]], "neural_compressor.algorithm.smooth_quant": [[149, "module-neural_compressor.algorithm.smooth_quant"]], "neural_compressor.algorithm.weight_correction": [[150, "module-neural_compressor.algorithm.weight_correction"]], "neural_compressor.benchmark": [[151, "module-neural_compressor.benchmark"]], "neural_compressor.common": [[155, "module-neural_compressor.common"]], "neural_compressor.common.base_config": [[152, "module-neural_compressor.common.base_config"]], "neural_compressor.common.base_tuning": [[153, "module-neural_compressor.common.base_tuning"]], "neural_compressor.common.benchmark": [[154, "module-neural_compressor.common.benchmark"]], "neural_compressor.common.tuning_param": [[156, "module-neural_compressor.common.tuning_param"]], "neural_compressor.common.utils": [[158, "module-neural_compressor.common.utils"]], "neural_compressor.common.utils.constants": [[157, "module-neural_compressor.common.utils.constants"]], "neural_compressor.common.utils.logger": [[159, "module-neural_compressor.common.utils.logger"]], "neural_compressor.common.utils.save_load": [[160, "module-neural_compressor.common.utils.save_load"]], "neural_compressor.common.utils.utility": [[161, "module-neural_compressor.common.utils.utility"]], "neural_compressor.compression.callbacks": [[162, "module-neural_compressor.compression.callbacks"]], "neural_compressor.compression.distillation": [[164, "module-neural_compressor.compression.distillation"]], "neural_compressor.compression.distillation.criterions": [[163, "module-neural_compressor.compression.distillation.criterions"]], "neural_compressor.compression.distillation.optimizers": [[165, "module-neural_compressor.compression.distillation.optimizers"]], "neural_compressor.compression.distillation.utility": [[166, "module-neural_compressor.compression.distillation.utility"]], "neural_compressor.compression.hpo": [[167, "module-neural_compressor.compression.hpo"]], "neural_compressor.compression.hpo.sa_optimizer": [[168, "module-neural_compressor.compression.hpo.sa_optimizer"]], "neural_compressor.compression.pruner": [[170, "module-neural_compressor.compression.pruner"]], "neural_compressor.compression.pruner.criteria": [[169, "module-neural_compressor.compression.pruner.criteria"]], "neural_compressor.compression.pruner.model_slim": [[172, "module-neural_compressor.compression.pruner.model_slim"]], "neural_compressor.compression.pruner.model_slim.auto_slim": [[171, "module-neural_compressor.compression.pruner.model_slim.auto_slim"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer": [[173, "module-neural_compressor.compression.pruner.model_slim.pattern_analyzer"]], "neural_compressor.compression.pruner.model_slim.weight_slim": [[174, "module-neural_compressor.compression.pruner.model_slim.weight_slim"]], "neural_compressor.compression.pruner.patterns": [[176, "module-neural_compressor.compression.pruner.patterns"]], "neural_compressor.compression.pruner.patterns.base": [[175, "module-neural_compressor.compression.pruner.patterns.base"]], "neural_compressor.compression.pruner.patterns.mha": [[177, "module-neural_compressor.compression.pruner.patterns.mha"]], "neural_compressor.compression.pruner.patterns.ninm": [[178, "module-neural_compressor.compression.pruner.patterns.ninm"]], "neural_compressor.compression.pruner.patterns.nxm": [[179, "module-neural_compressor.compression.pruner.patterns.nxm"]], "neural_compressor.compression.pruner.pruners": [[183, "module-neural_compressor.compression.pruner.pruners"]], "neural_compressor.compression.pruner.pruners.base": [[180, "module-neural_compressor.compression.pruner.pruners.base"]], "neural_compressor.compression.pruner.pruners.basic": [[181, "module-neural_compressor.compression.pruner.pruners.basic"]], "neural_compressor.compression.pruner.pruners.block_mask": [[182, "module-neural_compressor.compression.pruner.pruners.block_mask"]], "neural_compressor.compression.pruner.pruners.mha": [[184, "module-neural_compressor.compression.pruner.pruners.mha"]], "neural_compressor.compression.pruner.pruners.pattern_lock": [[185, "module-neural_compressor.compression.pruner.pruners.pattern_lock"]], "neural_compressor.compression.pruner.pruners.progressive": [[186, "module-neural_compressor.compression.pruner.pruners.progressive"]], "neural_compressor.compression.pruner.pruners.retrain_free": [[187, "module-neural_compressor.compression.pruner.pruners.retrain_free"]], "neural_compressor.compression.pruner.pruning": [[188, "module-neural_compressor.compression.pruner.pruning"]], "neural_compressor.compression.pruner.regs": [[189, "module-neural_compressor.compression.pruner.regs"]], "neural_compressor.compression.pruner.schedulers": [[190, "module-neural_compressor.compression.pruner.schedulers"]], "neural_compressor.compression.pruner.tf_criteria": [[191, "module-neural_compressor.compression.pruner.tf_criteria"]], "neural_compressor.compression.pruner.utils": [[192, "module-neural_compressor.compression.pruner.utils"]], "neural_compressor.compression.pruner.wanda": [[193, "module-neural_compressor.compression.pruner.wanda"]], "neural_compressor.compression.pruner.wanda.utils": [[194, "module-neural_compressor.compression.pruner.wanda.utils"]], "neural_compressor.config": [[195, "module-neural_compressor.config"]], "neural_compressor.contrib": [[196, "module-neural_compressor.contrib"]], "neural_compressor.contrib.strategy": [[197, "module-neural_compressor.contrib.strategy"]], "neural_compressor.contrib.strategy.sigopt": [[198, "module-neural_compressor.contrib.strategy.sigopt"]], "neural_compressor.contrib.strategy.tpe": [[199, "module-neural_compressor.contrib.strategy.tpe"]], "neural_compressor.data": [[220, "module-neural_compressor.data"]], "neural_compressor.data.dataloaders.base_dataloader": [[200, "module-neural_compressor.data.dataloaders.base_dataloader"]], "neural_compressor.data.dataloaders.dataloader": [[201, "module-neural_compressor.data.dataloaders.dataloader"]], "neural_compressor.data.dataloaders.default_dataloader": [[202, "module-neural_compressor.data.dataloaders.default_dataloader"]], "neural_compressor.data.dataloaders.fetcher": [[203, "module-neural_compressor.data.dataloaders.fetcher"]], "neural_compressor.data.dataloaders.mxnet_dataloader": [[204, "module-neural_compressor.data.dataloaders.mxnet_dataloader"]], "neural_compressor.data.dataloaders.onnxrt_dataloader": [[205, "module-neural_compressor.data.dataloaders.onnxrt_dataloader"]], "neural_compressor.data.dataloaders.pytorch_dataloader": [[206, "module-neural_compressor.data.dataloaders.pytorch_dataloader"]], "neural_compressor.data.dataloaders.sampler": [[207, "module-neural_compressor.data.dataloaders.sampler"]], "neural_compressor.data.dataloaders.tensorflow_dataloader": [[208, "module-neural_compressor.data.dataloaders.tensorflow_dataloader"]], "neural_compressor.data.datasets": [[215, "module-neural_compressor.data.datasets"]], "neural_compressor.data.datasets.bert_dataset": [[209, "module-neural_compressor.data.datasets.bert_dataset"]], "neural_compressor.data.datasets.coco_dataset": [[210, "module-neural_compressor.data.datasets.coco_dataset"]], "neural_compressor.data.datasets.dataset": [[211, "module-neural_compressor.data.datasets.dataset"]], "neural_compressor.data.datasets.dummy_dataset": [[212, "module-neural_compressor.data.datasets.dummy_dataset"]], "neural_compressor.data.datasets.dummy_dataset_v2": [[213, "module-neural_compressor.data.datasets.dummy_dataset_v2"]], "neural_compressor.data.datasets.imagenet_dataset": [[214, "module-neural_compressor.data.datasets.imagenet_dataset"]], "neural_compressor.data.datasets.style_transfer_dataset": [[216, "module-neural_compressor.data.datasets.style_transfer_dataset"]], "neural_compressor.data.filters": [[219, "module-neural_compressor.data.filters"]], "neural_compressor.data.filters.coco_filter": [[217, "module-neural_compressor.data.filters.coco_filter"]], "neural_compressor.data.filters.filter": [[218, "module-neural_compressor.data.filters.filter"]], "neural_compressor.data.transforms": [[222, "module-neural_compressor.data.transforms"]], "neural_compressor.data.transforms.imagenet_transform": [[221, "module-neural_compressor.data.transforms.imagenet_transform"]], "neural_compressor.data.transforms.postprocess": [[223, "module-neural_compressor.data.transforms.postprocess"]], "neural_compressor.data.transforms.tokenization": [[224, "module-neural_compressor.data.transforms.tokenization"]], "neural_compressor.data.transforms.transform": [[225, "module-neural_compressor.data.transforms.transform"]], "neural_compressor.metric": [[233, "module-neural_compressor.metric"]], "neural_compressor.metric.bleu": [[227, "module-neural_compressor.metric.bleu"]], "neural_compressor.metric.bleu_util": [[228, "module-neural_compressor.metric.bleu_util"]], "neural_compressor.metric.coco_label_map": [[229, "module-neural_compressor.metric.coco_label_map"]], "neural_compressor.metric.coco_tools": [[230, "module-neural_compressor.metric.coco_tools"]], "neural_compressor.metric.evaluate_squad": [[231, "module-neural_compressor.metric.evaluate_squad"]], "neural_compressor.metric.f1": [[232, "module-neural_compressor.metric.f1"]], "neural_compressor.metric.metric": [[234, "module-neural_compressor.metric.metric"]], "neural_compressor.mix_precision": [[235, "module-neural_compressor.mix_precision"]], "neural_compressor.model": [[237, "module-neural_compressor.model"]], "neural_compressor.model.base_model": [[236, "module-neural_compressor.model.base_model"]], "neural_compressor.model.keras_model": [[238, "module-neural_compressor.model.keras_model"]], "neural_compressor.model.model": [[239, "module-neural_compressor.model.model"]], "neural_compressor.model.mxnet_model": [[240, "module-neural_compressor.model.mxnet_model"]], "neural_compressor.model.nets_factory": [[241, "module-neural_compressor.model.nets_factory"]], "neural_compressor.model.onnx_model": [[242, "module-neural_compressor.model.onnx_model"]], "neural_compressor.model.tensorflow_model": [[243, "module-neural_compressor.model.tensorflow_model"]], "neural_compressor.model.torch_model": [[244, "module-neural_compressor.model.torch_model"]], "neural_compressor.objective": [[245, "module-neural_compressor.objective"]], "neural_compressor.profiling": [[246, "module-neural_compressor.profiling"]], "neural_compressor.profiling.parser.factory": [[247, "module-neural_compressor.profiling.parser.factory"]], "neural_compressor.profiling.parser.onnx_parser.factory": [[248, "module-neural_compressor.profiling.parser.onnx_parser.factory"]], "neural_compressor.profiling.parser.onnx_parser.parser": [[249, "module-neural_compressor.profiling.parser.onnx_parser.parser"]], "neural_compressor.profiling.parser.parser": [[250, "module-neural_compressor.profiling.parser.parser"]], "neural_compressor.profiling.parser.result": [[251, "module-neural_compressor.profiling.parser.result"]], "neural_compressor.profiling.parser.tensorflow_parser.factory": [[252, "module-neural_compressor.profiling.parser.tensorflow_parser.factory"]], "neural_compressor.profiling.parser.tensorflow_parser.parser": [[253, "module-neural_compressor.profiling.parser.tensorflow_parser.parser"]], "neural_compressor.profiling.profiler.factory": [[254, "module-neural_compressor.profiling.profiler.factory"]], "neural_compressor.profiling.profiler.onnxrt_profiler.factory": [[255, "module-neural_compressor.profiling.profiler.onnxrt_profiler.factory"]], "neural_compressor.profiling.profiler.onnxrt_profiler.profiler": [[256, "module-neural_compressor.profiling.profiler.onnxrt_profiler.profiler"]], "neural_compressor.profiling.profiler.onnxrt_profiler.utils": [[257, "module-neural_compressor.profiling.profiler.onnxrt_profiler.utils"]], "neural_compressor.profiling.profiler.profiler": [[258, "module-neural_compressor.profiling.profiler.profiler"]], "neural_compressor.profiling.profiler.tensorflow_profiler.factory": [[259, "module-neural_compressor.profiling.profiler.tensorflow_profiler.factory"]], "neural_compressor.profiling.profiler.tensorflow_profiler.profiler": [[260, "module-neural_compressor.profiling.profiler.tensorflow_profiler.profiler"]], "neural_compressor.profiling.profiler.tensorflow_profiler.utils": [[261, "module-neural_compressor.profiling.profiler.tensorflow_profiler.utils"]], "neural_compressor.quantization": [[262, "module-neural_compressor.quantization"]], "neural_compressor.strategy": [[270, "module-neural_compressor.strategy"]], "neural_compressor.strategy.auto": [[263, "module-neural_compressor.strategy.auto"]], "neural_compressor.strategy.auto_mixed_precision": [[264, "module-neural_compressor.strategy.auto_mixed_precision"]], "neural_compressor.strategy.basic": [[265, "module-neural_compressor.strategy.basic"]], "neural_compressor.strategy.bayesian": [[266, "module-neural_compressor.strategy.bayesian"]], "neural_compressor.strategy.conservative": [[267, "module-neural_compressor.strategy.conservative"]], "neural_compressor.strategy.exhaustive": [[268, "module-neural_compressor.strategy.exhaustive"]], "neural_compressor.strategy.hawq_v2": [[269, "module-neural_compressor.strategy.hawq_v2"]], "neural_compressor.strategy.mse": [[271, "module-neural_compressor.strategy.mse"]], "neural_compressor.strategy.mse_v2": [[272, "module-neural_compressor.strategy.mse_v2"]], "neural_compressor.strategy.random": [[273, "module-neural_compressor.strategy.random"]], "neural_compressor.strategy.strategy": [[274, "module-neural_compressor.strategy.strategy"]], "neural_compressor.strategy.utils": [[276, "module-neural_compressor.strategy.utils"]], "neural_compressor.strategy.utils.constant": [[275, "module-neural_compressor.strategy.utils.constant"]], "neural_compressor.strategy.utils.tuning_sampler": [[277, "module-neural_compressor.strategy.utils.tuning_sampler"]], "neural_compressor.strategy.utils.tuning_space": [[278, "module-neural_compressor.strategy.utils.tuning_space"]], "neural_compressor.strategy.utils.tuning_structs": [[279, "module-neural_compressor.strategy.utils.tuning_structs"]], "neural_compressor.strategy.utils.utility": [[280, "module-neural_compressor.strategy.utils.utility"]], "neural_compressor.template.api_doc_example": [[281, "module-neural_compressor.template.api_doc_example"]], "neural_compressor.tensorflow": [[290, "module-neural_compressor.tensorflow"]], "neural_compressor.tensorflow.algorithms": [[282, "module-neural_compressor.tensorflow.algorithms"]], "neural_compressor.tensorflow.algorithms.smoother": [[285, "module-neural_compressor.tensorflow.algorithms.smoother"]], "neural_compressor.tensorflow.algorithms.smoother.calibration": [[283, "module-neural_compressor.tensorflow.algorithms.smoother.calibration"]], "neural_compressor.tensorflow.algorithms.smoother.core": [[284, "module-neural_compressor.tensorflow.algorithms.smoother.core"]], "neural_compressor.tensorflow.algorithms.smoother.scaler": [[286, "module-neural_compressor.tensorflow.algorithms.smoother.scaler"]], "neural_compressor.tensorflow.algorithms.static_quant": [[287, "module-neural_compressor.tensorflow.algorithms.static_quant"]], "neural_compressor.tensorflow.algorithms.static_quant.keras": [[288, "module-neural_compressor.tensorflow.algorithms.static_quant.keras"]], "neural_compressor.tensorflow.algorithms.static_quant.tensorflow": [[289, "module-neural_compressor.tensorflow.algorithms.static_quant.tensorflow"]], "neural_compressor.tensorflow.keras": [[291, "module-neural_compressor.tensorflow.keras"]], "neural_compressor.tensorflow.keras.layers": [[295, "module-neural_compressor.tensorflow.keras.layers"]], "neural_compressor.tensorflow.keras.layers.conv2d": [[292, "module-neural_compressor.tensorflow.keras.layers.conv2d"]], "neural_compressor.tensorflow.keras.layers.dense": [[293, "module-neural_compressor.tensorflow.keras.layers.dense"]], "neural_compressor.tensorflow.keras.layers.depthwise_conv2d": [[294, "module-neural_compressor.tensorflow.keras.layers.depthwise_conv2d"]], "neural_compressor.tensorflow.keras.layers.layer_initializer": [[296, "module-neural_compressor.tensorflow.keras.layers.layer_initializer"]], "neural_compressor.tensorflow.keras.layers.pool2d": [[297, "module-neural_compressor.tensorflow.keras.layers.pool2d"]], "neural_compressor.tensorflow.keras.layers.separable_conv2d": [[298, "module-neural_compressor.tensorflow.keras.layers.separable_conv2d"]], "neural_compressor.tensorflow.keras.quantization": [[300, "module-neural_compressor.tensorflow.keras.quantization"]], "neural_compressor.tensorflow.keras.quantization.config": [[299, "module-neural_compressor.tensorflow.keras.quantization.config"]], "neural_compressor.tensorflow.quantization": [[304, "module-neural_compressor.tensorflow.quantization"]], "neural_compressor.tensorflow.quantization.algorithm_entry": [[301, "module-neural_compressor.tensorflow.quantization.algorithm_entry"]], "neural_compressor.tensorflow.quantization.autotune": [[302, "module-neural_compressor.tensorflow.quantization.autotune"]], "neural_compressor.tensorflow.quantization.config": [[303, "module-neural_compressor.tensorflow.quantization.config"]], "neural_compressor.tensorflow.quantization.quantize": [[305, "module-neural_compressor.tensorflow.quantization.quantize"]], "neural_compressor.tensorflow.quantization.utils": [[361, "module-neural_compressor.tensorflow.quantization.utils"]], "neural_compressor.tensorflow.quantization.utils.graph_converter": [[306, "module-neural_compressor.tensorflow.quantization.utils.graph_converter"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter": [[344, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16": [[309, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert": [[307, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer": [[308, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic": [[333, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd": [[310, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout": [[311, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu": [[312, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random": [[313, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const": [[314, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction": [[315, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd": [[316, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer": [[317, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape": [[318, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm": [[319, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant": [[320, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add": [[321, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul": [[322, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math": [[323, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn": [[324, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in": [[325, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu": [[326, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm": [[327, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv": [[328, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv": [[329, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose": [[330, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer": [[331, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass": [[332, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node": [[334, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu": [[335, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize": [[336, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes": [[337, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm": [[338, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input": [[339, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes": [[340, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes": [[341, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer": [[342, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base": [[343, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8": [[351, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant": [[345, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value": [[346, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize": [[347, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize": [[348, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize": [[349, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize": [[350, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer": [[352, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter": [[353, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse": [[354, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation": [[355, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq": [[356, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern": [[357, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq": [[358, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern": [[359, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern"]], "neural_compressor.tensorflow.quantization.utils.graph_util": [[360, "module-neural_compressor.tensorflow.quantization.utils.graph_util"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph": [[362, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq": [[370, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn": [[363, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2": [[364, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv": [[365, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv": [[366, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in": [[367, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul": [[368, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling": [[369, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq": [[371, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base": [[372, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn": [[373, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2": [[374, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv": [[375, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu": [[376, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul": [[377, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling": [[378, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph_common": [[379, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph_common"]], "neural_compressor.tensorflow.quantization.utils.transform_graph": [[382, "module-neural_compressor.tensorflow.quantization.utils.transform_graph"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction": [[380, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base": [[381, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging": [[383, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat": [[384, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat"]], "neural_compressor.tensorflow.quantization.utils.utility": [[385, "module-neural_compressor.tensorflow.quantization.utils.utility"]], "neural_compressor.tensorflow.utils": [[388, "module-neural_compressor.tensorflow.utils"]], "neural_compressor.tensorflow.utils.constants": [[386, "module-neural_compressor.tensorflow.utils.constants"]], "neural_compressor.tensorflow.utils.data": [[387, "module-neural_compressor.tensorflow.utils.data"]], "neural_compressor.tensorflow.utils.model": [[389, "module-neural_compressor.tensorflow.utils.model"]], "neural_compressor.tensorflow.utils.model_wrappers": [[390, "module-neural_compressor.tensorflow.utils.model_wrappers"]], "neural_compressor.tensorflow.utils.utility": [[391, "module-neural_compressor.tensorflow.utils.utility"]], "neural_compressor.torch": [[436, "module-neural_compressor.torch"]], "neural_compressor.torch.algorithms": [[394, "module-neural_compressor.torch.algorithms"]], "neural_compressor.torch.algorithms.base_algorithm": [[392, "module-neural_compressor.torch.algorithms.base_algorithm"]], "neural_compressor.torch.algorithms.fp8_quant.utils.logger": [[393, "module-neural_compressor.torch.algorithms.fp8_quant.utils.logger"]], "neural_compressor.torch.algorithms.layer_wise": [[395, "module-neural_compressor.torch.algorithms.layer_wise"]], "neural_compressor.torch.algorithms.layer_wise.load": [[396, "module-neural_compressor.torch.algorithms.layer_wise.load"]], "neural_compressor.torch.algorithms.layer_wise.modified_pickle": [[397, "module-neural_compressor.torch.algorithms.layer_wise.modified_pickle"]], "neural_compressor.torch.algorithms.layer_wise.utils": [[398, "module-neural_compressor.torch.algorithms.layer_wise.utils"]], "neural_compressor.torch.algorithms.mixed_precision": [[400, "module-neural_compressor.torch.algorithms.mixed_precision"]], "neural_compressor.torch.algorithms.mixed_precision.half_precision_convert": [[399, "module-neural_compressor.torch.algorithms.mixed_precision.half_precision_convert"]], "neural_compressor.torch.algorithms.mixed_precision.module_wrappers": [[401, "module-neural_compressor.torch.algorithms.mixed_precision.module_wrappers"]], "neural_compressor.torch.algorithms.mx_quant": [[402, "module-neural_compressor.torch.algorithms.mx_quant"]], "neural_compressor.torch.algorithms.mx_quant.mx": [[403, "module-neural_compressor.torch.algorithms.mx_quant.mx"]], "neural_compressor.torch.algorithms.mx_quant.utils": [[404, "module-neural_compressor.torch.algorithms.mx_quant.utils"]], "neural_compressor.torch.algorithms.pt2e_quant": [[407, "module-neural_compressor.torch.algorithms.pt2e_quant"]], "neural_compressor.torch.algorithms.pt2e_quant.core": [[405, "module-neural_compressor.torch.algorithms.pt2e_quant.core"]], "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter": [[406, "module-neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter"]], "neural_compressor.torch.algorithms.pt2e_quant.save_load": [[408, "module-neural_compressor.torch.algorithms.pt2e_quant.save_load"]], "neural_compressor.torch.algorithms.pt2e_quant.utility": [[409, "module-neural_compressor.torch.algorithms.pt2e_quant.utility"]], "neural_compressor.torch.algorithms.smooth_quant": [[410, "module-neural_compressor.torch.algorithms.smooth_quant"]], "neural_compressor.torch.algorithms.smooth_quant.save_load": [[411, "module-neural_compressor.torch.algorithms.smooth_quant.save_load"]], "neural_compressor.torch.algorithms.smooth_quant.smooth_quant": [[412, "module-neural_compressor.torch.algorithms.smooth_quant.smooth_quant"]], "neural_compressor.torch.algorithms.smooth_quant.utility": [[413, "module-neural_compressor.torch.algorithms.smooth_quant.utility"]], "neural_compressor.torch.algorithms.static_quant": [[414, "module-neural_compressor.torch.algorithms.static_quant"]], "neural_compressor.torch.algorithms.static_quant.save_load": [[415, "module-neural_compressor.torch.algorithms.static_quant.save_load"]], "neural_compressor.torch.algorithms.static_quant.static_quant": [[416, "module-neural_compressor.torch.algorithms.static_quant.static_quant"]], "neural_compressor.torch.algorithms.static_quant.utility": [[417, "module-neural_compressor.torch.algorithms.static_quant.utility"]], "neural_compressor.torch.algorithms.weight_only": [[428, "module-neural_compressor.torch.algorithms.weight_only"]], "neural_compressor.torch.algorithms.weight_only.autoround": [[418, "module-neural_compressor.torch.algorithms.weight_only.autoround"]], "neural_compressor.torch.algorithms.weight_only.awq": [[419, "module-neural_compressor.torch.algorithms.weight_only.awq"]], "neural_compressor.torch.algorithms.weight_only.gptq": [[420, "module-neural_compressor.torch.algorithms.weight_only.gptq"]], "neural_compressor.torch.algorithms.weight_only.hqq": [[424, "module-neural_compressor.torch.algorithms.weight_only.hqq"]], "neural_compressor.torch.algorithms.weight_only.hqq.bitpack": [[421, "module-neural_compressor.torch.algorithms.weight_only.hqq.bitpack"]], "neural_compressor.torch.algorithms.weight_only.hqq.config": [[422, "module-neural_compressor.torch.algorithms.weight_only.hqq.config"]], "neural_compressor.torch.algorithms.weight_only.hqq.core": [[423, "module-neural_compressor.torch.algorithms.weight_only.hqq.core"]], "neural_compressor.torch.algorithms.weight_only.hqq.optimizer": [[425, "module-neural_compressor.torch.algorithms.weight_only.hqq.optimizer"]], "neural_compressor.torch.algorithms.weight_only.hqq.qtensor": [[426, "module-neural_compressor.torch.algorithms.weight_only.hqq.qtensor"]], "neural_compressor.torch.algorithms.weight_only.hqq.quantizer": [[427, "module-neural_compressor.torch.algorithms.weight_only.hqq.quantizer"]], "neural_compressor.torch.algorithms.weight_only.modules": [[429, "module-neural_compressor.torch.algorithms.weight_only.modules"]], "neural_compressor.torch.algorithms.weight_only.rtn": [[430, "module-neural_compressor.torch.algorithms.weight_only.rtn"]], "neural_compressor.torch.algorithms.weight_only.save_load": [[431, "module-neural_compressor.torch.algorithms.weight_only.save_load"]], "neural_compressor.torch.algorithms.weight_only.teq": [[432, "module-neural_compressor.torch.algorithms.weight_only.teq"]], "neural_compressor.torch.algorithms.weight_only.utility": [[433, "module-neural_compressor.torch.algorithms.weight_only.utility"]], "neural_compressor.torch.export": [[434, "module-neural_compressor.torch.export"]], "neural_compressor.torch.export.pt2e_export": [[435, "module-neural_compressor.torch.export.pt2e_export"]], "neural_compressor.torch.quantization": [[440, "module-neural_compressor.torch.quantization"]], "neural_compressor.torch.quantization.algorithm_entry": [[437, "module-neural_compressor.torch.quantization.algorithm_entry"]], "neural_compressor.torch.quantization.autotune": [[438, "module-neural_compressor.torch.quantization.autotune"]], "neural_compressor.torch.quantization.config": [[439, "module-neural_compressor.torch.quantization.config"]], "neural_compressor.torch.quantization.load_entry": [[441, "module-neural_compressor.torch.quantization.load_entry"]], "neural_compressor.torch.quantization.quantize": [[442, "module-neural_compressor.torch.quantization.quantize"]], "neural_compressor.torch.utils": [[447, "module-neural_compressor.torch.utils"]], "neural_compressor.torch.utils.auto_accelerator": [[443, "module-neural_compressor.torch.utils.auto_accelerator"]], "neural_compressor.torch.utils.bit_packer": [[444, "module-neural_compressor.torch.utils.bit_packer"]], "neural_compressor.torch.utils.constants": [[445, "module-neural_compressor.torch.utils.constants"]], "neural_compressor.torch.utils.environ": [[446, "module-neural_compressor.torch.utils.environ"]], "neural_compressor.torch.utils.utility": [[448, "module-neural_compressor.torch.utils.utility"]], "neural_compressor.training": [[449, "module-neural_compressor.training"]], "neural_compressor.transformers.quantization.utils": [[450, "module-neural_compressor.transformers.quantization.utils"]], "neural_compressor.transformers.utils": [[451, "module-neural_compressor.transformers.utils"]], "neural_compressor.transformers.utils.quantization_config": [[452, "module-neural_compressor.transformers.utils.quantization_config"]], "neural_compressor.utils": [[460, "module-neural_compressor.utils"]], "neural_compressor.utils.collect_layer_histogram": [[453, "module-neural_compressor.utils.collect_layer_histogram"]], "neural_compressor.utils.constant": [[454, "module-neural_compressor.utils.constant"]], "neural_compressor.utils.create_obj_from_config": [[455, "module-neural_compressor.utils.create_obj_from_config"]], "neural_compressor.utils.export": [[456, "module-neural_compressor.utils.export"]], "neural_compressor.utils.export.qlinear2qdq": [[457, "module-neural_compressor.utils.export.qlinear2qdq"]], "neural_compressor.utils.export.tf2onnx": [[458, "module-neural_compressor.utils.export.tf2onnx"]], "neural_compressor.utils.export.torch2onnx": [[459, "module-neural_compressor.utils.export.torch2onnx"]], "neural_compressor.utils.kl_divergence": [[461, "module-neural_compressor.utils.kl_divergence"]], "neural_compressor.utils.load_huggingface": [[462, "module-neural_compressor.utils.load_huggingface"]], "neural_compressor.utils.logger": [[463, "module-neural_compressor.utils.logger"]], "neural_compressor.utils.options": [[464, "module-neural_compressor.utils.options"]], "neural_compressor.utils.pytorch": [[465, "module-neural_compressor.utils.pytorch"]], "neural_compressor.utils.utility": [[466, "module-neural_compressor.utils.utility"]], "neural_compressor.utils.weights_details": [[467, "module-neural_compressor.utils.weights_details"]], "neural_compressor.version": [[468, "module-neural_compressor.version"]], "}": [[145, "id3"]]}, "docnames": ["autoapi/neural_compressor/adaptor/mxnet_utils/index", "autoapi/neural_compressor/adaptor/mxnet_utils/util/index", "autoapi/neural_compressor/adaptor/ox_utils/calibration/index", "autoapi/neural_compressor/adaptor/ox_utils/calibrator/index", "autoapi/neural_compressor/adaptor/ox_utils/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/activation/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/argmax/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/attention/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/binary_op/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/concat/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/conv/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/direct_q8/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/embed_layernorm/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/gather/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/gavgpool/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/gemm/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/lstm/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/matmul/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/maxpool/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/norm/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/ops/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/pad/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/pooling/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/reduce/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/resize/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/split/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/unary_op/index", "autoapi/neural_compressor/adaptor/ox_utils/quantizer/index", "autoapi/neural_compressor/adaptor/ox_utils/smooth_quant/index", "autoapi/neural_compressor/adaptor/ox_utils/util/index", "autoapi/neural_compressor/adaptor/ox_utils/weight_only/index", "autoapi/neural_compressor/adaptor/tensorflow/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_converter/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_converter_without_calib/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/bf16_convert/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/dequantize_cast_optimizer/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_add_to_biasadd/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_layout/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_leakyrelu/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_nan_to_random/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_placeholder_to_const/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dilated_contraction/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dummy_biasadd/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/expanddims_optimizer/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fetch_weight_from_reshape/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_batch_norm/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_constant/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_biasadd_add/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_column_wise_mul/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_bn/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_in/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_layer_norm/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_conv/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_reshape_transpose/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/graph_cse_optimizer/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/grappler_pass/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/insert_print_node/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/move_squeeze_after_relu/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/pre_optimize/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/remove_training_nodes/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/rename_batch_norm/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/split_shared_input/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_equivalent_nodes/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/switch_optimizer/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/graph_base/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_fake_quant/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value_without_calib/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_requantize/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_requantize/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/meta_op_optimizer/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_hostconst_converter/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_quantized_op_cse/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/rnn_convert/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/scale_propagation/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_graph/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_node/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_schema/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/tf2onnx_utils/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/merge_duplicated_qdq/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/share_qdq_y_pattern/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_util/index", "autoapi/neural_compressor/adaptor/tf_utils/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/fake_quantize/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_config/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_helper/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/optimize_layer/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_add/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_base/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_bn/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_wrapper/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_bn/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_concatv2/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_conv/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_deconv/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_in/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_matmul/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_pooling/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/optimize_qdq/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_base/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_bn/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_concatv2/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_conv/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_for_intel_cpu/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_matmul/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_pooling/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph_common/index", "autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_calibration/index", "autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_scaler/index", "autoapi/neural_compressor/adaptor/tf_utils/tf2onnx_converter/index", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/bias_correction/index", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/graph_transform_base/index", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/index", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/insert_logging/index", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/rerange_quantized_concat/index", "autoapi/neural_compressor/adaptor/tf_utils/util/index", "autoapi/neural_compressor/adaptor/torch_utils/bf16_convert/index", "autoapi/neural_compressor/adaptor/torch_utils/hawq_metric/index", "autoapi/neural_compressor/adaptor/torch_utils/index", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/index", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/modified_pickle/index", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/quantize/index", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/torch_load/index", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/utils/index", "autoapi/neural_compressor/adaptor/torch_utils/model_wrapper/index", "autoapi/neural_compressor/adaptor/torch_utils/pattern_detector/index", "autoapi/neural_compressor/adaptor/torch_utils/symbolic_trace/index", "autoapi/neural_compressor/adaptor/torch_utils/util/index", "autoapi/neural_compressor/algorithm/algorithm/index", "autoapi/neural_compressor/algorithm/fast_bias_correction/index", "autoapi/neural_compressor/algorithm/index", "autoapi/neural_compressor/algorithm/smooth_quant/index", "autoapi/neural_compressor/algorithm/weight_correction/index", "autoapi/neural_compressor/benchmark/index", "autoapi/neural_compressor/common/base_config/index", "autoapi/neural_compressor/common/base_tuning/index", "autoapi/neural_compressor/common/benchmark/index", "autoapi/neural_compressor/common/index", "autoapi/neural_compressor/common/tuning_param/index", "autoapi/neural_compressor/common/utils/constants/index", "autoapi/neural_compressor/common/utils/index", "autoapi/neural_compressor/common/utils/logger/index", "autoapi/neural_compressor/common/utils/save_load/index", "autoapi/neural_compressor/common/utils/utility/index", "autoapi/neural_compressor/compression/callbacks/index", "autoapi/neural_compressor/compression/distillation/criterions/index", "autoapi/neural_compressor/compression/distillation/index", "autoapi/neural_compressor/compression/distillation/optimizers/index", "autoapi/neural_compressor/compression/distillation/utility/index", "autoapi/neural_compressor/compression/hpo/index", "autoapi/neural_compressor/compression/hpo/sa_optimizer/index", "autoapi/neural_compressor/compression/pruner/criteria/index", "autoapi/neural_compressor/compression/pruner/index", "autoapi/neural_compressor/compression/pruner/model_slim/auto_slim/index", "autoapi/neural_compressor/compression/pruner/model_slim/index", "autoapi/neural_compressor/compression/pruner/model_slim/pattern_analyzer/index", "autoapi/neural_compressor/compression/pruner/model_slim/weight_slim/index", "autoapi/neural_compressor/compression/pruner/patterns/base/index", "autoapi/neural_compressor/compression/pruner/patterns/index", "autoapi/neural_compressor/compression/pruner/patterns/mha/index", "autoapi/neural_compressor/compression/pruner/patterns/ninm/index", "autoapi/neural_compressor/compression/pruner/patterns/nxm/index", "autoapi/neural_compressor/compression/pruner/pruners/base/index", "autoapi/neural_compressor/compression/pruner/pruners/basic/index", "autoapi/neural_compressor/compression/pruner/pruners/block_mask/index", "autoapi/neural_compressor/compression/pruner/pruners/index", "autoapi/neural_compressor/compression/pruner/pruners/mha/index", "autoapi/neural_compressor/compression/pruner/pruners/pattern_lock/index", "autoapi/neural_compressor/compression/pruner/pruners/progressive/index", "autoapi/neural_compressor/compression/pruner/pruners/retrain_free/index", "autoapi/neural_compressor/compression/pruner/pruning/index", "autoapi/neural_compressor/compression/pruner/regs/index", "autoapi/neural_compressor/compression/pruner/schedulers/index", "autoapi/neural_compressor/compression/pruner/tf_criteria/index", "autoapi/neural_compressor/compression/pruner/utils/index", "autoapi/neural_compressor/compression/pruner/wanda/index", "autoapi/neural_compressor/compression/pruner/wanda/utils/index", "autoapi/neural_compressor/config/index", "autoapi/neural_compressor/contrib/index", "autoapi/neural_compressor/contrib/strategy/index", "autoapi/neural_compressor/contrib/strategy/sigopt/index", "autoapi/neural_compressor/contrib/strategy/tpe/index", "autoapi/neural_compressor/data/dataloaders/base_dataloader/index", "autoapi/neural_compressor/data/dataloaders/dataloader/index", "autoapi/neural_compressor/data/dataloaders/default_dataloader/index", "autoapi/neural_compressor/data/dataloaders/fetcher/index", "autoapi/neural_compressor/data/dataloaders/mxnet_dataloader/index", "autoapi/neural_compressor/data/dataloaders/onnxrt_dataloader/index", "autoapi/neural_compressor/data/dataloaders/pytorch_dataloader/index", "autoapi/neural_compressor/data/dataloaders/sampler/index", "autoapi/neural_compressor/data/dataloaders/tensorflow_dataloader/index", "autoapi/neural_compressor/data/datasets/bert_dataset/index", "autoapi/neural_compressor/data/datasets/coco_dataset/index", "autoapi/neural_compressor/data/datasets/dataset/index", "autoapi/neural_compressor/data/datasets/dummy_dataset/index", "autoapi/neural_compressor/data/datasets/dummy_dataset_v2/index", "autoapi/neural_compressor/data/datasets/imagenet_dataset/index", "autoapi/neural_compressor/data/datasets/index", "autoapi/neural_compressor/data/datasets/style_transfer_dataset/index", "autoapi/neural_compressor/data/filters/coco_filter/index", "autoapi/neural_compressor/data/filters/filter/index", "autoapi/neural_compressor/data/filters/index", "autoapi/neural_compressor/data/index", "autoapi/neural_compressor/data/transforms/imagenet_transform/index", "autoapi/neural_compressor/data/transforms/index", "autoapi/neural_compressor/data/transforms/postprocess/index", "autoapi/neural_compressor/data/transforms/tokenization/index", "autoapi/neural_compressor/data/transforms/transform/index", "autoapi/neural_compressor/index", "autoapi/neural_compressor/metric/bleu/index", "autoapi/neural_compressor/metric/bleu_util/index", "autoapi/neural_compressor/metric/coco_label_map/index", "autoapi/neural_compressor/metric/coco_tools/index", "autoapi/neural_compressor/metric/evaluate_squad/index", "autoapi/neural_compressor/metric/f1/index", "autoapi/neural_compressor/metric/index", "autoapi/neural_compressor/metric/metric/index", "autoapi/neural_compressor/mix_precision/index", "autoapi/neural_compressor/model/base_model/index", "autoapi/neural_compressor/model/index", "autoapi/neural_compressor/model/keras_model/index", "autoapi/neural_compressor/model/model/index", "autoapi/neural_compressor/model/mxnet_model/index", "autoapi/neural_compressor/model/nets_factory/index", "autoapi/neural_compressor/model/onnx_model/index", "autoapi/neural_compressor/model/tensorflow_model/index", "autoapi/neural_compressor/model/torch_model/index", "autoapi/neural_compressor/objective/index", "autoapi/neural_compressor/profiling/index", "autoapi/neural_compressor/profiling/parser/factory/index", "autoapi/neural_compressor/profiling/parser/onnx_parser/factory/index", "autoapi/neural_compressor/profiling/parser/onnx_parser/parser/index", "autoapi/neural_compressor/profiling/parser/parser/index", "autoapi/neural_compressor/profiling/parser/result/index", "autoapi/neural_compressor/profiling/parser/tensorflow_parser/factory/index", "autoapi/neural_compressor/profiling/parser/tensorflow_parser/parser/index", "autoapi/neural_compressor/profiling/profiler/factory/index", "autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/factory/index", "autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/profiler/index", "autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/utils/index", "autoapi/neural_compressor/profiling/profiler/profiler/index", "autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/factory/index", "autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/profiler/index", "autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/utils/index", "autoapi/neural_compressor/quantization/index", "autoapi/neural_compressor/strategy/auto/index", "autoapi/neural_compressor/strategy/auto_mixed_precision/index", "autoapi/neural_compressor/strategy/basic/index", "autoapi/neural_compressor/strategy/bayesian/index", "autoapi/neural_compressor/strategy/conservative/index", "autoapi/neural_compressor/strategy/exhaustive/index", "autoapi/neural_compressor/strategy/hawq_v2/index", "autoapi/neural_compressor/strategy/index", "autoapi/neural_compressor/strategy/mse/index", "autoapi/neural_compressor/strategy/mse_v2/index", "autoapi/neural_compressor/strategy/random/index", "autoapi/neural_compressor/strategy/strategy/index", "autoapi/neural_compressor/strategy/utils/constant/index", "autoapi/neural_compressor/strategy/utils/index", "autoapi/neural_compressor/strategy/utils/tuning_sampler/index", "autoapi/neural_compressor/strategy/utils/tuning_space/index", "autoapi/neural_compressor/strategy/utils/tuning_structs/index", "autoapi/neural_compressor/strategy/utils/utility/index", "autoapi/neural_compressor/template/api_doc_example/index", "autoapi/neural_compressor/tensorflow/algorithms/index", "autoapi/neural_compressor/tensorflow/algorithms/smoother/calibration/index", "autoapi/neural_compressor/tensorflow/algorithms/smoother/core/index", "autoapi/neural_compressor/tensorflow/algorithms/smoother/index", "autoapi/neural_compressor/tensorflow/algorithms/smoother/scaler/index", "autoapi/neural_compressor/tensorflow/algorithms/static_quant/index", "autoapi/neural_compressor/tensorflow/algorithms/static_quant/keras/index", "autoapi/neural_compressor/tensorflow/algorithms/static_quant/tensorflow/index", "autoapi/neural_compressor/tensorflow/index", "autoapi/neural_compressor/tensorflow/keras/index", "autoapi/neural_compressor/tensorflow/keras/layers/conv2d/index", "autoapi/neural_compressor/tensorflow/keras/layers/dense/index", "autoapi/neural_compressor/tensorflow/keras/layers/depthwise_conv2d/index", "autoapi/neural_compressor/tensorflow/keras/layers/index", "autoapi/neural_compressor/tensorflow/keras/layers/layer_initializer/index", "autoapi/neural_compressor/tensorflow/keras/layers/pool2d/index", "autoapi/neural_compressor/tensorflow/keras/layers/separable_conv2d/index", "autoapi/neural_compressor/tensorflow/keras/quantization/config/index", "autoapi/neural_compressor/tensorflow/keras/quantization/index", "autoapi/neural_compressor/tensorflow/quantization/algorithm_entry/index", "autoapi/neural_compressor/tensorflow/quantization/autotune/index", "autoapi/neural_compressor/tensorflow/quantization/config/index", "autoapi/neural_compressor/tensorflow/quantization/index", "autoapi/neural_compressor/tensorflow/quantization/quantize/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_converter/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/bf16_convert/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/dequantize_cast_optimizer/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_add_to_biasadd/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_layout/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_leakyrelu/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_nan_to_random/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_placeholder_to_const/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dilated_contraction/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dummy_biasadd/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/expanddims_optimizer/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fetch_weight_from_reshape/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_batch_norm/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_constant/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_biasadd_add/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_column_wise_mul/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_conv_with_math/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_bn/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_in/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_gelu/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_layer_norm/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_conv/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_reshape_transpose/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/graph_cse_optimizer/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/grappler_pass/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/insert_print_node/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/move_squeeze_after_relu/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/remove_training_nodes/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/rename_batch_norm/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/split_shared_input/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_equivalent_nodes/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_unused_nodes/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/switch_optimizer/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/graph_base/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_fake_quant/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_value/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_requantize/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_requantize/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/meta_op_optimizer/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_hostconst_converter/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_quantized_op_cse/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/scale_propagation/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/insert_qdq_pattern/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/merge_duplicated_qdq/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/share_qdq_y_pattern/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_util/index", "autoapi/neural_compressor/tensorflow/quantization/utils/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_bn/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_concatv2/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_conv/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_deconv/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_in/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_matmul/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_pooling/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/optimize_qdq/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_base/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_bn/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_concatv2/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_conv/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_for_intel_cpu/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_matmul/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_pooling/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph_common/index", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/bias_correction/index", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/graph_transform_base/index", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/index", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/insert_logging/index", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/rerange_quantized_concat/index", "autoapi/neural_compressor/tensorflow/quantization/utils/utility/index", "autoapi/neural_compressor/tensorflow/utils/constants/index", "autoapi/neural_compressor/tensorflow/utils/data/index", "autoapi/neural_compressor/tensorflow/utils/index", "autoapi/neural_compressor/tensorflow/utils/model/index", "autoapi/neural_compressor/tensorflow/utils/model_wrappers/index", "autoapi/neural_compressor/tensorflow/utils/utility/index", "autoapi/neural_compressor/torch/algorithms/base_algorithm/index", "autoapi/neural_compressor/torch/algorithms/fp8_quant/utils/logger/index", "autoapi/neural_compressor/torch/algorithms/index", "autoapi/neural_compressor/torch/algorithms/layer_wise/index", "autoapi/neural_compressor/torch/algorithms/layer_wise/load/index", "autoapi/neural_compressor/torch/algorithms/layer_wise/modified_pickle/index", "autoapi/neural_compressor/torch/algorithms/layer_wise/utils/index", "autoapi/neural_compressor/torch/algorithms/mixed_precision/half_precision_convert/index", "autoapi/neural_compressor/torch/algorithms/mixed_precision/index", "autoapi/neural_compressor/torch/algorithms/mixed_precision/module_wrappers/index", "autoapi/neural_compressor/torch/algorithms/mx_quant/index", "autoapi/neural_compressor/torch/algorithms/mx_quant/mx/index", "autoapi/neural_compressor/torch/algorithms/mx_quant/utils/index", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/core/index", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/half_precision_rewriter/index", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/index", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/save_load/index", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/utility/index", "autoapi/neural_compressor/torch/algorithms/smooth_quant/index", "autoapi/neural_compressor/torch/algorithms/smooth_quant/save_load/index", "autoapi/neural_compressor/torch/algorithms/smooth_quant/smooth_quant/index", "autoapi/neural_compressor/torch/algorithms/smooth_quant/utility/index", "autoapi/neural_compressor/torch/algorithms/static_quant/index", "autoapi/neural_compressor/torch/algorithms/static_quant/save_load/index", "autoapi/neural_compressor/torch/algorithms/static_quant/static_quant/index", "autoapi/neural_compressor/torch/algorithms/static_quant/utility/index", "autoapi/neural_compressor/torch/algorithms/weight_only/autoround/index", "autoapi/neural_compressor/torch/algorithms/weight_only/awq/index", "autoapi/neural_compressor/torch/algorithms/weight_only/gptq/index", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/bitpack/index", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/config/index", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/core/index", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/index", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/optimizer/index", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/qtensor/index", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/quantizer/index", "autoapi/neural_compressor/torch/algorithms/weight_only/index", "autoapi/neural_compressor/torch/algorithms/weight_only/modules/index", "autoapi/neural_compressor/torch/algorithms/weight_only/rtn/index", "autoapi/neural_compressor/torch/algorithms/weight_only/save_load/index", "autoapi/neural_compressor/torch/algorithms/weight_only/teq/index", "autoapi/neural_compressor/torch/algorithms/weight_only/utility/index", "autoapi/neural_compressor/torch/export/index", "autoapi/neural_compressor/torch/export/pt2e_export/index", "autoapi/neural_compressor/torch/index", "autoapi/neural_compressor/torch/quantization/algorithm_entry/index", "autoapi/neural_compressor/torch/quantization/autotune/index", "autoapi/neural_compressor/torch/quantization/config/index", "autoapi/neural_compressor/torch/quantization/index", "autoapi/neural_compressor/torch/quantization/load_entry/index", "autoapi/neural_compressor/torch/quantization/quantize/index", "autoapi/neural_compressor/torch/utils/auto_accelerator/index", "autoapi/neural_compressor/torch/utils/bit_packer/index", "autoapi/neural_compressor/torch/utils/constants/index", "autoapi/neural_compressor/torch/utils/environ/index", "autoapi/neural_compressor/torch/utils/index", "autoapi/neural_compressor/torch/utils/utility/index", "autoapi/neural_compressor/training/index", "autoapi/neural_compressor/transformers/quantization/utils/index", "autoapi/neural_compressor/transformers/utils/index", "autoapi/neural_compressor/transformers/utils/quantization_config/index", "autoapi/neural_compressor/utils/collect_layer_histogram/index", "autoapi/neural_compressor/utils/constant/index", "autoapi/neural_compressor/utils/create_obj_from_config/index", "autoapi/neural_compressor/utils/export/index", "autoapi/neural_compressor/utils/export/qlinear2qdq/index", "autoapi/neural_compressor/utils/export/tf2onnx/index", "autoapi/neural_compressor/utils/export/torch2onnx/index", "autoapi/neural_compressor/utils/index", "autoapi/neural_compressor/utils/kl_divergence/index", "autoapi/neural_compressor/utils/load_huggingface/index", "autoapi/neural_compressor/utils/logger/index", "autoapi/neural_compressor/utils/options/index", "autoapi/neural_compressor/utils/pytorch/index", "autoapi/neural_compressor/utils/utility/index", "autoapi/neural_compressor/utils/weights_details/index", "autoapi/neural_compressor/version/index", "docs/build_docs/source/index", "docs/source/2x_user_guide", "docs/source/3x/PT_DynamicQuant", "docs/source/3x/PT_FP8Quant", "docs/source/3x/PT_MXQuant", "docs/source/3x/PT_MixedPrecision", "docs/source/3x/PT_SmoothQuant", "docs/source/3x/PT_StaticQuant", "docs/source/3x/PT_WeightOnlyQuant", "docs/source/3x/PyTorch", "docs/source/3x/TF_Quant", "docs/source/3x/TF_SQ", "docs/source/3x/TensorFlow", "docs/source/3x/autotune", "docs/source/3x/benchmark", "docs/source/3x/client_quant", "docs/source/3x/design", "docs/source/3x/gaudi_version_map", "docs/source/3x/llm_recipes", "docs/source/3x/quantization", "docs/source/3x/transformers_like_api", "docs/source/CODE_OF_CONDUCT", "docs/source/CONTRIBUTING", "docs/source/FX", "docs/source/SECURITY", "docs/source/Welcome", "docs/source/adaptor", "docs/source/add_new_adaptor", "docs/source/add_new_data_type", "docs/source/api-doc/adaptor", "docs/source/api-doc/adaptor/onnxrt", "docs/source/api-doc/adaptor/torch_utils", "docs/source/api-doc/api_2", "docs/source/api-doc/api_3", "docs/source/api-doc/api_doc_example", "docs/source/api-doc/apis", "docs/source/api-doc/benchmark", "docs/source/api-doc/compression", "docs/source/api-doc/config", "docs/source/api-doc/mix_precision", "docs/source/api-doc/model", "docs/source/api-doc/objective", "docs/source/api-doc/quantization", "docs/source/api-doc/strategy", "docs/source/api-doc/tf_quantization_autotune", "docs/source/api-doc/tf_quantization_common", "docs/source/api-doc/tf_quantization_config", "docs/source/api-doc/torch_quantization_autotune", "docs/source/api-doc/torch_quantization_common", "docs/source/api-doc/torch_quantization_config", "docs/source/api-doc/training", "docs/source/benchmark", "docs/source/calibration", "docs/source/coding_style", "docs/source/dataloader", "docs/source/design", "docs/source/distillation_quantization", "docs/source/distributed", "docs/source/examples_readme", "docs/source/export", "docs/source/faq", "docs/source/framework_yaml", "docs/source/get_started", "docs/source/incompatible_changes", "docs/source/infrastructure", "docs/source/installation_guide", "docs/source/legal_information", "docs/source/llm_recipes", "docs/source/metric", "docs/source/migration", "docs/source/mixed_precision", "docs/source/model", "docs/source/mx_quantization", "docs/source/objective", "docs/source/orchestration", "docs/source/pruning", "docs/source/publication_list", "docs/source/quantization", "docs/source/quantization_layer_wise", "docs/source/quantization_mixed_precision", "docs/source/quantization_weight_only", "docs/source/releases_info", "docs/source/sigopt_strategy", "docs/source/smooth_quant", "docs/source/transform", "docs/source/tuning_strategies", "docs/source/validated_model_list", "index"], "envversion": {"sphinx": 61, "sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2}, "filenames": ["autoapi/neural_compressor/adaptor/mxnet_utils/index.rst", "autoapi/neural_compressor/adaptor/mxnet_utils/util/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/calibration/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/calibrator/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/activation/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/argmax/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/attention/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/binary_op/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/concat/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/conv/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/direct_q8/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/embed_layernorm/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/gather/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/gavgpool/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/gemm/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/lstm/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/matmul/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/maxpool/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/norm/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/ops/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/pad/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/pooling/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/reduce/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/resize/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/split/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/unary_op/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/quantizer/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/smooth_quant/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/util/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/weight_only/index.rst", "autoapi/neural_compressor/adaptor/tensorflow/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_converter/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_converter_without_calib/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/bf16_convert/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/dequantize_cast_optimizer/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_add_to_biasadd/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_layout/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_leakyrelu/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_nan_to_random/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_placeholder_to_const/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dilated_contraction/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dummy_biasadd/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/expanddims_optimizer/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fetch_weight_from_reshape/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_batch_norm/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_constant/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_biasadd_add/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_column_wise_mul/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_bn/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_in/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_layer_norm/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_conv/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_reshape_transpose/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/graph_cse_optimizer/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/grappler_pass/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/insert_print_node/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/move_squeeze_after_relu/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/pre_optimize/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/remove_training_nodes/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/rename_batch_norm/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/split_shared_input/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_equivalent_nodes/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/switch_optimizer/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/graph_base/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_fake_quant/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value_without_calib/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_requantize/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_requantize/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/meta_op_optimizer/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_hostconst_converter/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_quantized_op_cse/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/rnn_convert/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/scale_propagation/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_graph/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_node/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_schema/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/tf2onnx_utils/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/merge_duplicated_qdq/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/share_qdq_y_pattern/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_util/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/fake_quantize/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_config/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_helper/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/optimize_layer/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_add/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_base/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_bn/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_wrapper/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_bn/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_concatv2/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_conv/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_deconv/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_in/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_matmul/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_pooling/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/optimize_qdq/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_base/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_bn/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_concatv2/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_conv/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_for_intel_cpu/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_matmul/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_pooling/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph_common/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_calibration/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_scaler/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/tf2onnx_converter/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/bias_correction/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/graph_transform_base/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/insert_logging/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/rerange_quantized_concat/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/util/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/bf16_convert/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/hawq_metric/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/modified_pickle/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/quantize/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/torch_load/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/utils/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/model_wrapper/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/pattern_detector/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/symbolic_trace/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/util/index.rst", "autoapi/neural_compressor/algorithm/algorithm/index.rst", "autoapi/neural_compressor/algorithm/fast_bias_correction/index.rst", "autoapi/neural_compressor/algorithm/index.rst", "autoapi/neural_compressor/algorithm/smooth_quant/index.rst", "autoapi/neural_compressor/algorithm/weight_correction/index.rst", "autoapi/neural_compressor/benchmark/index.rst", "autoapi/neural_compressor/common/base_config/index.rst", "autoapi/neural_compressor/common/base_tuning/index.rst", "autoapi/neural_compressor/common/benchmark/index.rst", "autoapi/neural_compressor/common/index.rst", "autoapi/neural_compressor/common/tuning_param/index.rst", "autoapi/neural_compressor/common/utils/constants/index.rst", "autoapi/neural_compressor/common/utils/index.rst", "autoapi/neural_compressor/common/utils/logger/index.rst", "autoapi/neural_compressor/common/utils/save_load/index.rst", "autoapi/neural_compressor/common/utils/utility/index.rst", "autoapi/neural_compressor/compression/callbacks/index.rst", "autoapi/neural_compressor/compression/distillation/criterions/index.rst", "autoapi/neural_compressor/compression/distillation/index.rst", "autoapi/neural_compressor/compression/distillation/optimizers/index.rst", "autoapi/neural_compressor/compression/distillation/utility/index.rst", "autoapi/neural_compressor/compression/hpo/index.rst", "autoapi/neural_compressor/compression/hpo/sa_optimizer/index.rst", "autoapi/neural_compressor/compression/pruner/criteria/index.rst", "autoapi/neural_compressor/compression/pruner/index.rst", "autoapi/neural_compressor/compression/pruner/model_slim/auto_slim/index.rst", "autoapi/neural_compressor/compression/pruner/model_slim/index.rst", "autoapi/neural_compressor/compression/pruner/model_slim/pattern_analyzer/index.rst", "autoapi/neural_compressor/compression/pruner/model_slim/weight_slim/index.rst", "autoapi/neural_compressor/compression/pruner/patterns/base/index.rst", "autoapi/neural_compressor/compression/pruner/patterns/index.rst", "autoapi/neural_compressor/compression/pruner/patterns/mha/index.rst", "autoapi/neural_compressor/compression/pruner/patterns/ninm/index.rst", "autoapi/neural_compressor/compression/pruner/patterns/nxm/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/base/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/basic/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/block_mask/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/mha/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/pattern_lock/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/progressive/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/retrain_free/index.rst", "autoapi/neural_compressor/compression/pruner/pruning/index.rst", "autoapi/neural_compressor/compression/pruner/regs/index.rst", "autoapi/neural_compressor/compression/pruner/schedulers/index.rst", "autoapi/neural_compressor/compression/pruner/tf_criteria/index.rst", "autoapi/neural_compressor/compression/pruner/utils/index.rst", "autoapi/neural_compressor/compression/pruner/wanda/index.rst", "autoapi/neural_compressor/compression/pruner/wanda/utils/index.rst", "autoapi/neural_compressor/config/index.rst", "autoapi/neural_compressor/contrib/index.rst", "autoapi/neural_compressor/contrib/strategy/index.rst", "autoapi/neural_compressor/contrib/strategy/sigopt/index.rst", "autoapi/neural_compressor/contrib/strategy/tpe/index.rst", "autoapi/neural_compressor/data/dataloaders/base_dataloader/index.rst", "autoapi/neural_compressor/data/dataloaders/dataloader/index.rst", "autoapi/neural_compressor/data/dataloaders/default_dataloader/index.rst", "autoapi/neural_compressor/data/dataloaders/fetcher/index.rst", "autoapi/neural_compressor/data/dataloaders/mxnet_dataloader/index.rst", "autoapi/neural_compressor/data/dataloaders/onnxrt_dataloader/index.rst", "autoapi/neural_compressor/data/dataloaders/pytorch_dataloader/index.rst", "autoapi/neural_compressor/data/dataloaders/sampler/index.rst", "autoapi/neural_compressor/data/dataloaders/tensorflow_dataloader/index.rst", "autoapi/neural_compressor/data/datasets/bert_dataset/index.rst", "autoapi/neural_compressor/data/datasets/coco_dataset/index.rst", "autoapi/neural_compressor/data/datasets/dataset/index.rst", "autoapi/neural_compressor/data/datasets/dummy_dataset/index.rst", "autoapi/neural_compressor/data/datasets/dummy_dataset_v2/index.rst", "autoapi/neural_compressor/data/datasets/imagenet_dataset/index.rst", "autoapi/neural_compressor/data/datasets/index.rst", "autoapi/neural_compressor/data/datasets/style_transfer_dataset/index.rst", "autoapi/neural_compressor/data/filters/coco_filter/index.rst", "autoapi/neural_compressor/data/filters/filter/index.rst", "autoapi/neural_compressor/data/filters/index.rst", "autoapi/neural_compressor/data/index.rst", "autoapi/neural_compressor/data/transforms/imagenet_transform/index.rst", "autoapi/neural_compressor/data/transforms/index.rst", "autoapi/neural_compressor/data/transforms/postprocess/index.rst", "autoapi/neural_compressor/data/transforms/tokenization/index.rst", "autoapi/neural_compressor/data/transforms/transform/index.rst", "autoapi/neural_compressor/index.rst", "autoapi/neural_compressor/metric/bleu/index.rst", "autoapi/neural_compressor/metric/bleu_util/index.rst", "autoapi/neural_compressor/metric/coco_label_map/index.rst", "autoapi/neural_compressor/metric/coco_tools/index.rst", "autoapi/neural_compressor/metric/evaluate_squad/index.rst", "autoapi/neural_compressor/metric/f1/index.rst", "autoapi/neural_compressor/metric/index.rst", "autoapi/neural_compressor/metric/metric/index.rst", "autoapi/neural_compressor/mix_precision/index.rst", "autoapi/neural_compressor/model/base_model/index.rst", "autoapi/neural_compressor/model/index.rst", "autoapi/neural_compressor/model/keras_model/index.rst", "autoapi/neural_compressor/model/model/index.rst", "autoapi/neural_compressor/model/mxnet_model/index.rst", "autoapi/neural_compressor/model/nets_factory/index.rst", "autoapi/neural_compressor/model/onnx_model/index.rst", "autoapi/neural_compressor/model/tensorflow_model/index.rst", "autoapi/neural_compressor/model/torch_model/index.rst", "autoapi/neural_compressor/objective/index.rst", "autoapi/neural_compressor/profiling/index.rst", "autoapi/neural_compressor/profiling/parser/factory/index.rst", "autoapi/neural_compressor/profiling/parser/onnx_parser/factory/index.rst", "autoapi/neural_compressor/profiling/parser/onnx_parser/parser/index.rst", "autoapi/neural_compressor/profiling/parser/parser/index.rst", "autoapi/neural_compressor/profiling/parser/result/index.rst", "autoapi/neural_compressor/profiling/parser/tensorflow_parser/factory/index.rst", "autoapi/neural_compressor/profiling/parser/tensorflow_parser/parser/index.rst", "autoapi/neural_compressor/profiling/profiler/factory/index.rst", "autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/factory/index.rst", "autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/profiler/index.rst", "autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/utils/index.rst", "autoapi/neural_compressor/profiling/profiler/profiler/index.rst", "autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/factory/index.rst", "autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/profiler/index.rst", "autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/utils/index.rst", "autoapi/neural_compressor/quantization/index.rst", "autoapi/neural_compressor/strategy/auto/index.rst", "autoapi/neural_compressor/strategy/auto_mixed_precision/index.rst", "autoapi/neural_compressor/strategy/basic/index.rst", "autoapi/neural_compressor/strategy/bayesian/index.rst", "autoapi/neural_compressor/strategy/conservative/index.rst", "autoapi/neural_compressor/strategy/exhaustive/index.rst", "autoapi/neural_compressor/strategy/hawq_v2/index.rst", "autoapi/neural_compressor/strategy/index.rst", "autoapi/neural_compressor/strategy/mse/index.rst", "autoapi/neural_compressor/strategy/mse_v2/index.rst", "autoapi/neural_compressor/strategy/random/index.rst", "autoapi/neural_compressor/strategy/strategy/index.rst", "autoapi/neural_compressor/strategy/utils/constant/index.rst", "autoapi/neural_compressor/strategy/utils/index.rst", "autoapi/neural_compressor/strategy/utils/tuning_sampler/index.rst", "autoapi/neural_compressor/strategy/utils/tuning_space/index.rst", "autoapi/neural_compressor/strategy/utils/tuning_structs/index.rst", "autoapi/neural_compressor/strategy/utils/utility/index.rst", "autoapi/neural_compressor/template/api_doc_example/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/smoother/calibration/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/smoother/core/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/smoother/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/smoother/scaler/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/static_quant/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/static_quant/keras/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/static_quant/tensorflow/index.rst", "autoapi/neural_compressor/tensorflow/index.rst", "autoapi/neural_compressor/tensorflow/keras/index.rst", "autoapi/neural_compressor/tensorflow/keras/layers/conv2d/index.rst", "autoapi/neural_compressor/tensorflow/keras/layers/dense/index.rst", "autoapi/neural_compressor/tensorflow/keras/layers/depthwise_conv2d/index.rst", "autoapi/neural_compressor/tensorflow/keras/layers/index.rst", "autoapi/neural_compressor/tensorflow/keras/layers/layer_initializer/index.rst", "autoapi/neural_compressor/tensorflow/keras/layers/pool2d/index.rst", "autoapi/neural_compressor/tensorflow/keras/layers/separable_conv2d/index.rst", "autoapi/neural_compressor/tensorflow/keras/quantization/config/index.rst", "autoapi/neural_compressor/tensorflow/keras/quantization/index.rst", "autoapi/neural_compressor/tensorflow/quantization/algorithm_entry/index.rst", "autoapi/neural_compressor/tensorflow/quantization/autotune/index.rst", "autoapi/neural_compressor/tensorflow/quantization/config/index.rst", "autoapi/neural_compressor/tensorflow/quantization/index.rst", "autoapi/neural_compressor/tensorflow/quantization/quantize/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_converter/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/bf16_convert/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/dequantize_cast_optimizer/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_add_to_biasadd/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_layout/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_leakyrelu/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_nan_to_random/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_placeholder_to_const/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dilated_contraction/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dummy_biasadd/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/expanddims_optimizer/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fetch_weight_from_reshape/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_batch_norm/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_constant/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_biasadd_add/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_column_wise_mul/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_conv_with_math/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_bn/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_in/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_gelu/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_layer_norm/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_conv/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_reshape_transpose/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/graph_cse_optimizer/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/grappler_pass/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/insert_print_node/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/move_squeeze_after_relu/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/remove_training_nodes/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/rename_batch_norm/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/split_shared_input/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_equivalent_nodes/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_unused_nodes/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/switch_optimizer/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/graph_base/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_fake_quant/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_value/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_requantize/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_requantize/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/meta_op_optimizer/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_hostconst_converter/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_quantized_op_cse/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/scale_propagation/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/insert_qdq_pattern/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/merge_duplicated_qdq/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/share_qdq_y_pattern/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_util/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_bn/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_concatv2/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_conv/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_deconv/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_in/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_matmul/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_pooling/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/optimize_qdq/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_base/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_bn/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_concatv2/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_conv/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_for_intel_cpu/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_matmul/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_pooling/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph_common/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/bias_correction/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/graph_transform_base/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/insert_logging/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/rerange_quantized_concat/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/utility/index.rst", "autoapi/neural_compressor/tensorflow/utils/constants/index.rst", "autoapi/neural_compressor/tensorflow/utils/data/index.rst", "autoapi/neural_compressor/tensorflow/utils/index.rst", "autoapi/neural_compressor/tensorflow/utils/model/index.rst", "autoapi/neural_compressor/tensorflow/utils/model_wrappers/index.rst", "autoapi/neural_compressor/tensorflow/utils/utility/index.rst", "autoapi/neural_compressor/torch/algorithms/base_algorithm/index.rst", "autoapi/neural_compressor/torch/algorithms/fp8_quant/utils/logger/index.rst", "autoapi/neural_compressor/torch/algorithms/index.rst", "autoapi/neural_compressor/torch/algorithms/layer_wise/index.rst", "autoapi/neural_compressor/torch/algorithms/layer_wise/load/index.rst", "autoapi/neural_compressor/torch/algorithms/layer_wise/modified_pickle/index.rst", "autoapi/neural_compressor/torch/algorithms/layer_wise/utils/index.rst", "autoapi/neural_compressor/torch/algorithms/mixed_precision/half_precision_convert/index.rst", "autoapi/neural_compressor/torch/algorithms/mixed_precision/index.rst", "autoapi/neural_compressor/torch/algorithms/mixed_precision/module_wrappers/index.rst", "autoapi/neural_compressor/torch/algorithms/mx_quant/index.rst", "autoapi/neural_compressor/torch/algorithms/mx_quant/mx/index.rst", "autoapi/neural_compressor/torch/algorithms/mx_quant/utils/index.rst", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/core/index.rst", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/half_precision_rewriter/index.rst", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/index.rst", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/save_load/index.rst", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/utility/index.rst", "autoapi/neural_compressor/torch/algorithms/smooth_quant/index.rst", "autoapi/neural_compressor/torch/algorithms/smooth_quant/save_load/index.rst", "autoapi/neural_compressor/torch/algorithms/smooth_quant/smooth_quant/index.rst", "autoapi/neural_compressor/torch/algorithms/smooth_quant/utility/index.rst", "autoapi/neural_compressor/torch/algorithms/static_quant/index.rst", "autoapi/neural_compressor/torch/algorithms/static_quant/save_load/index.rst", "autoapi/neural_compressor/torch/algorithms/static_quant/static_quant/index.rst", "autoapi/neural_compressor/torch/algorithms/static_quant/utility/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/autoround/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/awq/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/gptq/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/bitpack/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/config/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/core/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/optimizer/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/qtensor/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/quantizer/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/modules/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/rtn/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/save_load/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/teq/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/utility/index.rst", "autoapi/neural_compressor/torch/export/index.rst", "autoapi/neural_compressor/torch/export/pt2e_export/index.rst", "autoapi/neural_compressor/torch/index.rst", "autoapi/neural_compressor/torch/quantization/algorithm_entry/index.rst", "autoapi/neural_compressor/torch/quantization/autotune/index.rst", "autoapi/neural_compressor/torch/quantization/config/index.rst", "autoapi/neural_compressor/torch/quantization/index.rst", "autoapi/neural_compressor/torch/quantization/load_entry/index.rst", "autoapi/neural_compressor/torch/quantization/quantize/index.rst", "autoapi/neural_compressor/torch/utils/auto_accelerator/index.rst", "autoapi/neural_compressor/torch/utils/bit_packer/index.rst", "autoapi/neural_compressor/torch/utils/constants/index.rst", "autoapi/neural_compressor/torch/utils/environ/index.rst", "autoapi/neural_compressor/torch/utils/index.rst", "autoapi/neural_compressor/torch/utils/utility/index.rst", "autoapi/neural_compressor/training/index.rst", "autoapi/neural_compressor/transformers/quantization/utils/index.rst", "autoapi/neural_compressor/transformers/utils/index.rst", "autoapi/neural_compressor/transformers/utils/quantization_config/index.rst", "autoapi/neural_compressor/utils/collect_layer_histogram/index.rst", "autoapi/neural_compressor/utils/constant/index.rst", "autoapi/neural_compressor/utils/create_obj_from_config/index.rst", "autoapi/neural_compressor/utils/export/index.rst", "autoapi/neural_compressor/utils/export/qlinear2qdq/index.rst", "autoapi/neural_compressor/utils/export/tf2onnx/index.rst", "autoapi/neural_compressor/utils/export/torch2onnx/index.rst", "autoapi/neural_compressor/utils/index.rst", "autoapi/neural_compressor/utils/kl_divergence/index.rst", "autoapi/neural_compressor/utils/load_huggingface/index.rst", "autoapi/neural_compressor/utils/logger/index.rst", "autoapi/neural_compressor/utils/options/index.rst", "autoapi/neural_compressor/utils/pytorch/index.rst", "autoapi/neural_compressor/utils/utility/index.rst", "autoapi/neural_compressor/utils/weights_details/index.rst", "autoapi/neural_compressor/version/index.rst", "docs/build_docs/source/index.rst", "docs/source/2x_user_guide.md", "docs/source/3x/PT_DynamicQuant.md", "docs/source/3x/PT_FP8Quant.md", "docs/source/3x/PT_MXQuant.md", "docs/source/3x/PT_MixedPrecision.md", "docs/source/3x/PT_SmoothQuant.md", "docs/source/3x/PT_StaticQuant.md", "docs/source/3x/PT_WeightOnlyQuant.md", "docs/source/3x/PyTorch.md", "docs/source/3x/TF_Quant.md", "docs/source/3x/TF_SQ.md", "docs/source/3x/TensorFlow.md", "docs/source/3x/autotune.md", "docs/source/3x/benchmark.md", "docs/source/3x/client_quant.md", "docs/source/3x/design.md", "docs/source/3x/gaudi_version_map.md", "docs/source/3x/llm_recipes.md", "docs/source/3x/quantization.md", "docs/source/3x/transformers_like_api.md", "docs/source/CODE_OF_CONDUCT.md", "docs/source/CONTRIBUTING.md", "docs/source/FX.md", "docs/source/SECURITY.md", "docs/source/Welcome.md", "docs/source/adaptor.md", "docs/source/add_new_adaptor.md", "docs/source/add_new_data_type.md", "docs/source/api-doc/adaptor.rst", "docs/source/api-doc/adaptor/onnxrt.rst", "docs/source/api-doc/adaptor/torch_utils.rst", "docs/source/api-doc/api_2.rst", "docs/source/api-doc/api_3.rst", "docs/source/api-doc/api_doc_example.rst", "docs/source/api-doc/apis.rst", "docs/source/api-doc/benchmark.rst", "docs/source/api-doc/compression.rst", "docs/source/api-doc/config.rst", "docs/source/api-doc/mix_precision.rst", "docs/source/api-doc/model.rst", "docs/source/api-doc/objective.rst", "docs/source/api-doc/quantization.rst", "docs/source/api-doc/strategy.rst", "docs/source/api-doc/tf_quantization_autotune.rst", "docs/source/api-doc/tf_quantization_common.rst", "docs/source/api-doc/tf_quantization_config.rst", "docs/source/api-doc/torch_quantization_autotune.rst", "docs/source/api-doc/torch_quantization_common.rst", "docs/source/api-doc/torch_quantization_config.rst", "docs/source/api-doc/training.rst", "docs/source/benchmark.md", "docs/source/calibration.md", "docs/source/coding_style.md", "docs/source/dataloader.md", "docs/source/design.md", "docs/source/distillation_quantization.md", "docs/source/distributed.md", "docs/source/examples_readme.md", "docs/source/export.md", "docs/source/faq.md", "docs/source/framework_yaml.md", "docs/source/get_started.md", "docs/source/incompatible_changes.md", "docs/source/infrastructure.md", "docs/source/installation_guide.md", "docs/source/legal_information.md", "docs/source/llm_recipes.md", "docs/source/metric.md", "docs/source/migration.md", "docs/source/mixed_precision.md", "docs/source/model.md", "docs/source/mx_quantization.md", "docs/source/objective.md", "docs/source/orchestration.md", "docs/source/pruning.md", "docs/source/publication_list.md", "docs/source/quantization.md", "docs/source/quantization_layer_wise.md", "docs/source/quantization_mixed_precision.md", "docs/source/quantization_weight_only.md", "docs/source/releases_info.md", "docs/source/sigopt_strategy.md", "docs/source/smooth_quant.md", "docs/source/transform.md", "docs/source/tuning_strategies.md", "docs/source/validated_model_list.md", "index.rst"], "indexentries": {"_epoch_ran (neural_compressor.compression.callbacks.distillationcallbacks attribute)": [[162, "neural_compressor.compression.callbacks.DistillationCallbacks._epoch_ran", false]], "acceleratorregistry (class in neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.AcceleratorRegistry", false]], "accuracy (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.Accuracy", false]], "accuracy (class in neural_compressor.objective)": [[245, "neural_compressor.objective.Accuracy", false]], "accuracycriterion (class in neural_compressor.config)": [[195, "neural_compressor.config.AccuracyCriterion", false]], "acq_max() (in module neural_compressor.strategy.bayesian)": [[266, "neural_compressor.strategy.bayesian.acq_max", false]], "activationoperator (class in neural_compressor.adaptor.ox_utils.operators.activation)": [[5, "neural_compressor.adaptor.ox_utils.operators.activation.ActivationOperator", false]], "add_port_to_name() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.add_port_to_name", false]], "algorithm (class in neural_compressor.algorithm.algorithm)": [[146, "neural_compressor.algorithm.algorithm.Algorithm", false]], "algorithm_registry() (in module neural_compressor.algorithm.algorithm)": [[146, "neural_compressor.algorithm.algorithm.algorithm_registry", false]], "algorithms (class in neural_compressor.algorithm.algorithm)": [[146, "neural_compressor.algorithm.algorithm.ALGORITHMS", false]], "algorithmscheduler (class in neural_compressor.algorithm.algorithm)": [[146, "neural_compressor.algorithm.algorithm.AlgorithmScheduler", false]], "alias_param() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.alias_param", false]], "alignimagechanneltransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.AlignImageChannelTransform", false]], "alpha (neural_compressor.compression.pruner.regs.grouplasso attribute)": [[189, "neural_compressor.compression.pruner.regs.GroupLasso.alpha", false]], "amp_convert() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.amp_convert", false]], "append_attr() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.append_attr", false]], "apply_awq_clip() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.apply_awq_clip", false]], "apply_awq_scale() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.apply_awq_scale", false]], "apply_inlining() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.apply_inlining", false]], "apply_inlining() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.apply_inlining", false]], "apply_single_pattern_pair() (in module neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.apply_single_pattern_pair", false]], "are_shapes_equal() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.are_shapes_equal", false]], "argmaxoperator (class in neural_compressor.adaptor.ox_utils.operators.argmax)": [[6, "neural_compressor.adaptor.ox_utils.operators.argmax.ArgMaxOperator", false]], "assert_error() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.assert_error", false]], "attentionoperator (class in neural_compressor.adaptor.ox_utils.operators.attention)": [[7, "neural_compressor.adaptor.ox_utils.operators.attention.AttentionOperator", false]], "attr1 (neural_compressor.template.api_doc_example.exampleclass attribute)": [[281, "neural_compressor.template.api_doc_example.ExampleClass.attr1", false]], "attr2 (neural_compressor.template.api_doc_example.exampleclass attribute)": [[281, "neural_compressor.template.api_doc_example.ExampleClass.attr2", false]], "attr5 (neural_compressor.template.api_doc_example.exampleclass attribute)": [[281, "neural_compressor.template.api_doc_example.ExampleClass.attr5", false]], "attribute1 (in module neural_compressor.template.api_doc_example)": [[281, "neural_compressor.template.api_doc_example.attribute1", false]], "attribute_to_kwarg() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.attribute_to_kwarg", false]], "auto_accelerator (class in neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.Auto_Accelerator", false]], "auto_copy() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.auto_copy", false]], "auto_detect_accelerator() (in module neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.auto_detect_accelerator", false]], "autoalpha (class in neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.AutoAlpha", false]], "automixedprecisiontunestrategy (class in neural_compressor.strategy.auto_mixed_precision)": [[264, "neural_compressor.strategy.auto_mixed_precision.AutoMixedPrecisionTuneStrategy", false]], "autoround_quantize_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.autoround_quantize_entry", false]], "autoroundconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.AutoRoundConfig", false]], "autoroundconfig (class in neural_compressor.transformers.utils.quantization_config)": [[452, "neural_compressor.transformers.utils.quantization_config.AutoRoundConfig", false]], "autoroundquantizer (class in neural_compressor.torch.algorithms.weight_only.autoround)": [[418, "neural_compressor.torch.algorithms.weight_only.autoround.AutoRoundQuantizer", false]], "autotune() (in module neural_compressor.tensorflow.quantization.autotune)": [[302, "neural_compressor.tensorflow.quantization.autotune.autotune", false]], "autotune() (in module neural_compressor.torch.quantization.autotune)": [[438, "neural_compressor.torch.quantization.autotune.autotune", false]], "autotunestrategy (class in neural_compressor.strategy.auto)": [[263, "neural_compressor.strategy.auto.AutoTuneStrategy", false]], "awq_quantize() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.awq_quantize", false]], "awq_quantize_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.awq_quantize_entry", false]], "awqconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.AWQConfig", false]], "awqconfig (class in neural_compressor.transformers.utils.quantization_config)": [[452, "neural_compressor.transformers.utils.quantization_config.AwqConfig", false]], "awqquantizer (class in neural_compressor.torch.algorithms.weight_only.awq)": [[419, "neural_compressor.torch.algorithms.weight_only.awq.AWQQuantizer", false]], "axis (neural_compressor.torch.algorithms.weight_only.hqq.qtensor.qtensormetainfo attribute)": [[426, "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensorMetaInfo.axis", false]], "basecallbacks (class in neural_compressor.compression.callbacks)": [[162, "neural_compressor.compression.callbacks.BaseCallbacks", false]], "baseconfig (class in neural_compressor.common.base_config)": [[152, "neural_compressor.common.base_config.BaseConfig", false]], "basedataloader (class in neural_compressor.data.dataloaders.base_dataloader)": [[200, "neural_compressor.data.dataloaders.base_dataloader.BaseDataLoader", false]], "basedataloader (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.BaseDataLoader", false]], "basemetric (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.BaseMetric", false]], "basemodel (class in neural_compressor.model.base_model)": [[236, "neural_compressor.model.base_model.BaseModel", false]], "basemodel (class in neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.BaseModel", false]], "basepattern (class in neural_compressor.compression.pruner.patterns.base)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern", false]], "basepruner (class in neural_compressor.compression.pruner.pruners.base)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner", false]], "basepruning (class in neural_compressor.compression.pruner.pruning)": [[188, "neural_compressor.compression.pruner.pruning.BasePruning", false]], "basereg (class in neural_compressor.compression.pruner.regs)": [[189, "neural_compressor.compression.pruner.regs.BaseReg", false]], "basetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.BaseTransform", false]], "basicpruning (class in neural_compressor.compression.pruner.pruning)": [[188, "neural_compressor.compression.pruner.pruning.BasicPruning", false]], "basictokenizer (class in neural_compressor.data.transforms.tokenization)": [[224, "neural_compressor.data.transforms.tokenization.BasicTokenizer", false]], "basictunestrategy (class in neural_compressor.strategy.basic)": [[265, "neural_compressor.strategy.basic.BasicTuneStrategy", false]], "batchnormalizationoperator (class in neural_compressor.adaptor.ox_utils.operators.norm)": [[20, "neural_compressor.adaptor.ox_utils.operators.norm.BatchNormalizationOperator", false]], "batchsampler (class in neural_compressor.data.dataloaders.sampler)": [[207, "neural_compressor.data.dataloaders.sampler.BatchSampler", false]], "batchsampler (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.BatchSampler", false]], "bayesianoptimization (class in neural_compressor.strategy.bayesian)": [[266, "neural_compressor.strategy.bayesian.BayesianOptimization", false]], "bayesiantunestrategy (class in neural_compressor.strategy.bayesian)": [[266, "neural_compressor.strategy.bayesian.BayesianTuneStrategy", false]], "benchmark() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.benchmark", false]], "benchmark_with_raw_cmd() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.benchmark_with_raw_cmd", false]], "benchmarkconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.BenchmarkConfig", false]], "best_model (neural_compressor.compression.callbacks.distillationcallbacks attribute)": [[162, "neural_compressor.compression.callbacks.DistillationCallbacks.best_model", false]], "best_score (neural_compressor.compression.callbacks.distillationcallbacks attribute)": [[162, "neural_compressor.compression.callbacks.DistillationCallbacks.best_score", false]], "bf16convert (class in neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert)": [[35, "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert.BF16Convert", false]], "bf16convert (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert)": [[307, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert.BF16Convert", false]], "bf16modulewrapper (class in neural_compressor.adaptor.torch_utils.bf16_convert)": [[134, "neural_compressor.adaptor.torch_utils.bf16_convert.BF16ModuleWrapper", false]], "biascorrection (class in neural_compressor.adaptor.tf_utils.transform_graph.bias_correction)": [[128, "neural_compressor.adaptor.tf_utils.transform_graph.bias_correction.BiasCorrection", false]], "biascorrection (class in neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction)": [[380, "neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction.BiasCorrection", false]], "bilinearimagenettransform (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.BilinearImagenetTransform", false]], "binarydirect8bitoperator (class in neural_compressor.adaptor.ox_utils.operators.binary_op)": [[8, "neural_compressor.adaptor.ox_utils.operators.binary_op.BinaryDirect8BitOperator", false]], "binaryoperator (class in neural_compressor.adaptor.ox_utils.operators.binary_op)": [[8, "neural_compressor.adaptor.ox_utils.operators.binary_op.BinaryOperator", false]], "bleu (class in neural_compressor.metric.bleu)": [[227, "neural_compressor.metric.bleu.BLEU", false]], "bleu_tokenize() (in module neural_compressor.metric.bleu)": [[227, "neural_compressor.metric.bleu.bleu_tokenize", false]], "block_size (neural_compressor.compression.pruner.patterns.nxm.keraspatternnxm attribute)": [[179, "neural_compressor.compression.pruner.patterns.nxm.KerasPatternNxM.block_size", false]], "block_size (neural_compressor.compression.pruner.patterns.nxm.pytorchpatternnxm attribute)": [[179, "neural_compressor.compression.pruner.patterns.nxm.PytorchPatternNxM.block_size", false]], "blockfallbacktuningsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.BlockFallbackTuningSampler", false]], "blockmaskcriterion (class in neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.BlockMaskCriterion", false]], "build_captured_dataloader() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.build_captured_dataloader", false]], "build_slave_faker_model() (in module neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.build_slave_faker_model", false]], "bypass_reshape() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn)": [[52, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn.bypass_reshape", false]], "bypass_reshape() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in)": [[53, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in.bypass_reshape", false]], "bypass_reshape() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn)": [[324, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn.bypass_reshape", false]], "bypass_reshape() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in)": [[325, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in.bypass_reshape", false]], "cal_scale() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.cal_scale", false]], "calculate_md5() (in module neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.calculate_md5", false]], "calculate_mse() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.calculate_mse", false]], "calculate_quant_min_max() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.calculate_quant_min_max", false]], "calculate_scale_zp() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.calculate_scale_zp", false]], "calib_model() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.calib_model", false]], "calib_registry() (in module neural_compressor.adaptor.ox_utils.calibrator)": [[3, "neural_compressor.adaptor.ox_utils.calibrator.calib_registry", false]], "calibcollector (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.CalibCollector", false]], "calibdata (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.CalibData", false]], "calibration (class in neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.Calibration", false]], "calibration() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.calibration", false]], "calibratorbase (class in neural_compressor.adaptor.ox_utils.calibrator)": [[3, "neural_compressor.adaptor.ox_utils.calibrator.CalibratorBase", false]], "call_counter() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.call_counter", false]], "call_one() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.call_one", false]], "callbacks (class in neural_compressor.training)": [[449, "neural_compressor.training.CallBacks", false]], "captureoutputtofile (class in neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.CaptureOutputToFile", false]], "captureoutputtofile (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.CaptureOutputToFile", false]], "cast_tensor() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.cast_tensor", false]], "castonnxtransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CastONNXTransform", false]], "castpytorchtransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CastPyTorchTransform", false]], "casttftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CastTFTransform", false]], "centercroptftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CenterCropTFTransform", false]], "centercroptransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CenterCropTransform", false]], "cfg_to_qconfig() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.cfg_to_qconfig", false]], "cfg_to_qconfig() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.cfg_to_qconfig", false]], "check_cfg_and_qconfig() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.check_cfg_and_qconfig", false]], "check_cfg_and_qconfig() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.check_cfg_and_qconfig", false]], "check_cfg_and_qconfig() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.check_cfg_and_qconfig", false]], "check_config() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.check_config", false]], "check_dataloader() (in module neural_compressor.data.dataloaders.dataloader)": [[201, "neural_compressor.data.dataloaders.dataloader.check_dataloader", false]], "check_integrity() (in module neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.check_integrity", false]], "check_key_exist() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.check_key_exist", false]], "check_key_validity() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.check_key_validity", false]], "check_model() (in module neural_compressor.utils.export.qlinear2qdq)": [[457, "neural_compressor.utils.export.qlinear2qdq.check_model", false]], "check_mx_version() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.check_mx_version", false]], "checkpoint_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.checkpoint_session", false]], "checkpoint_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.checkpoint_session", false]], "cifar10 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.CIFAR10", false]], "cifar100 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.CIFAR100", false]], "classifierheadsearcher (class in neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcher", false]], "classifierheadsearchertf (class in neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcherTF", false]], "classregister (class in neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.ClassRegister", false]], "clean_module_weight() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.clean_module_weight", false]], "cocoevalwrapper (class in neural_compressor.metric.coco_tools)": [[230, "neural_compressor.metric.coco_tools.COCOEvalWrapper", false]], "cocomapv2 (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.COCOmAPv2", false]], "coconpy (class in neural_compressor.data.datasets.coco_dataset)": [[210, "neural_compressor.data.datasets.coco_dataset.COCONpy", false]], "cocoraw (class in neural_compressor.data.datasets.coco_dataset)": [[210, "neural_compressor.data.datasets.coco_dataset.COCORaw", false]], "cocorecorddataset (class in neural_compressor.data.datasets.coco_dataset)": [[210, "neural_compressor.data.datasets.coco_dataset.COCORecordDataset", false]], "cocowrapper (class in neural_compressor.metric.coco_tools)": [[230, "neural_compressor.metric.coco_tools.COCOWrapper", false]], "collate_preds() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.collate_preds", false]], "collate_tf_preds() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.collate_tf_preds", false]], "collate_tf_preds() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.collate_tf_preds", false]], "collate_torch_preds() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.collate_torch_preds", false]], "collect_layer_inputs() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.collect_layer_inputs", false]], "collect_weight_info() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.collect_weight_info", false]], "collectorbase (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.CollectorBase", false]], "collecttransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CollectTransform", false]], "combine_capabilities() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.combine_capabilities", false]], "combine_histogram() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.combine_histogram", false]], "combine_histogram() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.combine_histogram", false]], "compare_label (neural_compressor.metric.metric.mae attribute)": [[234, "neural_compressor.metric.metric.MAE.compare_label", false]], "compare_label (neural_compressor.metric.metric.mse attribute)": [[234, "neural_compressor.metric.metric.MSE.compare_label", false]], "compare_objects() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.compare_objects", false]], "compare_weights() (in module neural_compressor.adaptor.torch_utils.hawq_metric)": [[135, "neural_compressor.adaptor.torch_utils.hawq_metric.compare_weights", false]], "composableconfig (class in neural_compressor.common.base_config)": [[152, "neural_compressor.common.base_config.ComposableConfig", false]], "composetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ComposeTransform", false]], "compressionmanager (class in neural_compressor.training)": [[449, "neural_compressor.training.CompressionManager", false]], "compute_bleu() (in module neural_compressor.metric.bleu_util)": [[228, "neural_compressor.metric.bleu_util.compute_bleu", false]], "compute_const_folding_using_tf() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.compute_const_folding_using_tf", false]], "compute_sparsity() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.compute_sparsity", false]], "concatoperator (class in neural_compressor.adaptor.ox_utils.operators.concat)": [[9, "neural_compressor.adaptor.ox_utils.operators.concat.ConcatOperator", false]], "config (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.config", false]], "config (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.config", false]], "config (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.config", false]], "config (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.config", false]], "config (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.config", false]], "config (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.config", false]], "config (neural_compressor.compression.pruner.schedulers.pruningscheduler attribute)": [[190, "neural_compressor.compression.pruner.schedulers.PruningScheduler.config", false]], "config_file_path (neural_compressor.compression.pruner.pruning.basepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasePruning.config_file_path", false]], "config_file_path (neural_compressor.compression.pruner.pruning.basicpruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasicPruning.config_file_path", false]], "config_file_path (neural_compressor.compression.pruner.pruning.retrainfreepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.RetrainFreePruning.config_file_path", false]], "config_instance() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.config_instance", false]], "config_list (neural_compressor.common.base_config.composableconfig attribute)": [[152, "neural_compressor.common.base_config.ComposableConfig.config_list", false]], "config_list (neural_compressor.common.base_tuning.configset attribute)": [[153, "neural_compressor.common.base_tuning.ConfigSet.config_list", false]], "config_quantizable_layers() (in module neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer)": [[103, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer.config_quantizable_layers", false]], "configloader (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.ConfigLoader", false]], "configregistry (class in neural_compressor.common.base_config)": [[152, "neural_compressor.common.base_config.ConfigRegistry", false]], "configset (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.ConfigSet", false]], "conservativetunestrategy (class in neural_compressor.strategy.conservative)": [[267, "neural_compressor.strategy.conservative.ConservativeTuneStrategy", false]], "construct_function_from_graph_def() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.construct_function_from_graph_def", false]], "construct_function_from_graph_def() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.construct_function_from_graph_def", false]], "convert() (in module neural_compressor.adaptor.torch_utils.bf16_convert)": [[134, "neural_compressor.adaptor.torch_utils.bf16_convert.Convert", false]], "convert() (in module neural_compressor.torch.quantization.quantize)": [[442, "neural_compressor.torch.quantization.quantize.convert", false]], "convert_by_vocab() (in module neural_compressor.data.transforms.tokenization)": [[224, "neural_compressor.data.transforms.tokenization.convert_by_vocab", false]], "convert_examples_to_features() (in module neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.convert_examples_to_features", false]], "convert_examples_to_features() (in module neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.convert_examples_to_features", false]], "convert_tensorflow_tensor_to_onnx() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.convert_tensorflow_tensor_to_onnx", false]], "convert_to_unicode() (in module neural_compressor.data.transforms.tokenization)": [[224, "neural_compressor.data.transforms.tokenization.convert_to_unicode", false]], "convertaddtobiasaddoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd)": [[38, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd.ConvertAddToBiasAddOptimizer", false]], "convertaddtobiasaddoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd)": [[310, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd.ConvertAddToBiasAddOptimizer", false]], "convertlayoutoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout)": [[39, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout.ConvertLayoutOptimizer", false]], "convertlayoutoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout)": [[311, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout.ConvertLayoutOptimizer", false]], "convertleakyreluoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu)": [[40, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu.ConvertLeakyReluOptimizer", false]], "convertleakyreluoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu)": [[312, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu.ConvertLeakyReluOptimizer", false]], "convertnantorandom (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random)": [[41, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random.ConvertNanToRandom", false]], "convertnantorandom (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random)": [[313, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random.ConvertNanToRandom", false]], "convertplaceholdertoconst (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const)": [[42, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const.ConvertPlaceholderToConst", false]], "convertplaceholdertoconst (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const)": [[314, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const.ConvertPlaceholderToConst", false]], "convoperator (class in neural_compressor.adaptor.ox_utils.operators.conv)": [[10, "neural_compressor.adaptor.ox_utils.operators.conv.ConvOperator", false]], "cpu_accelerator (class in neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.CPU_Accelerator", false]], "cpuinfo (class in neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.CpuInfo", false]], "cpuinfo (class in neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.CpuInfo", false]], "cpuinfo (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.CpuInfo", false]], "create_data_example() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.create_data_example", false]], "create_dataloader() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.create_dataloader", false]], "create_dataset() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.create_dataset", false]], "create_eval_func() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.create_eval_func", false]], "create_onnx_config() (in module neural_compressor.profiling.profiler.onnxrt_profiler.utils)": [[257, "neural_compressor.profiling.profiler.onnxrt_profiler.utils.create_onnx_config", false]], "create_quant_spec_from_config() (in module neural_compressor.torch.algorithms.pt2e_quant.utility)": [[409, "neural_compressor.torch.algorithms.pt2e_quant.utility.create_quant_spec_from_config", false]], "create_tf_config() (in module neural_compressor.profiling.profiler.tensorflow_profiler.utils)": [[261, "neural_compressor.profiling.profiler.tensorflow_profiler.utils.create_tf_config", false]], "create_train_func() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.create_train_func", false]], "create_xiq_quantizer_from_pt2e_config() (in module neural_compressor.torch.algorithms.pt2e_quant.utility)": [[409, "neural_compressor.torch.algorithms.pt2e_quant.utility.create_xiq_quantizer_from_pt2e_config", false]], "criterion (neural_compressor.compression.pruner.pruners.basic.kerasbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.KerasBasicPruner.criterion", false]], "criterion (neural_compressor.compression.pruner.pruners.basic.pytorchbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.PytorchBasicPruner.criterion", false]], "criterion (neural_compressor.compression.pruner.pruners.block_mask.pytorchblockmaskpruner attribute)": [[182, "neural_compressor.compression.pruner.pruners.block_mask.PytorchBlockMaskPruner.criterion", false]], "criterion (neural_compressor.compression.pruner.pruners.retrain_free.pytorchretrainfreepruner attribute)": [[187, "neural_compressor.compression.pruner.pruners.retrain_free.PytorchRetrainFreePruner.criterion", false]], "criterion_registry() (in module neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.criterion_registry", false]], "criterions (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.Criterions", false]], "cropresizetftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CropResizeTFTransform", false]], "cropresizetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CropResizeTransform", false]], "croptoboundingbox (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CropToBoundingBox", false]], "cuda_accelerator (class in neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.CUDA_Accelerator", false]], "current_pattern (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.current_pattern", false]], "current_sparsity_ratio (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.current_sparsity_ratio", false]], "current_sparsity_ratio (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.current_sparsity_ratio", false]], "current_sparsity_ratio (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.current_sparsity_ratio", false]], "dataiterloader (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.DataIterLoader", false]], "dataloader (class in neural_compressor.data.dataloaders.dataloader)": [[201, "neural_compressor.data.dataloaders.dataloader.DataLoader", false]], "dataloaderwrap (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.DataLoaderWrap", false]], "dataset (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.Dataset", false]], "dataset (neural_compressor.metric.coco_tools.cocowrapper attribute)": [[230, "neural_compressor.metric.coco_tools.COCOWrapper.dataset", false]], "dataset_registry() (in module neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.dataset_registry", false]], "datasets (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.Datasets", false]], "debug() (in module neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.debug", false]], "deep_get() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.deep_get", false]], "deep_get() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.deep_get", false]], "deep_set() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.deep_set", false]], "default_collate() (in module neural_compressor.data.dataloaders.default_dataloader)": [[202, "neural_compressor.data.dataloaders.default_dataloader.default_collate", false]], "default_collate() (in module neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.default_collate", false]], "defaultdataloader (class in neural_compressor.data.dataloaders.default_dataloader)": [[202, "neural_compressor.data.dataloaders.default_dataloader.DefaultDataLoader", false]], "delete_assign() (in module neural_compressor.profiling.profiler.tensorflow_profiler.utils)": [[261, "neural_compressor.profiling.profiler.tensorflow_profiler.utils.delete_assign", false]], "dequantize() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.Dequantize", false]], "dequantize_data() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.dequantize_data", false]], "dequantize_data_with_scale_zero() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.dequantize_data_with_scale_zero", false]], "dequantize_weight() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.dequantize_weight", false]], "dequantizecastoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer)": [[36, "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer.DequantizeCastOptimizer", false]], "dequantizecastoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer)": [[308, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer.DequantizeCastOptimizer", false]], "detect_processor_type_based_on_hw() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.detect_processor_type_based_on_hw", false]], "detection_type (neural_compressor.metric.coco_tools.cocowrapper attribute)": [[230, "neural_compressor.metric.coco_tools.COCOWrapper.detection_type", false]], "device (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcher.device", false]], "device (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearchertf attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcherTF.device", false]], "device (neural_compressor.compression.pruner.model_slim.pattern_analyzer.jitbasicsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher.device", false]], "device (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.device", false]], "device (neural_compressor.compression.pruner.model_slim.pattern_analyzer.selfmhasearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.SelfMHASearcher.device", false]], "device (neural_compressor.compression.pruner.model_slim.weight_slim.linearcompression attribute)": [[174, "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompression.device", false]], "device_synchronize() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.device_synchronize", false]], "dilatedcontraction (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction)": [[43, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction.DilatedContraction", false]], "dilatedcontraction (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction)": [[315, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction.DilatedContraction", false]], "direct8bitoperator (class in neural_compressor.adaptor.ox_utils.operators.direct_q8)": [[11, "neural_compressor.adaptor.ox_utils.operators.direct_q8.Direct8BitOperator", false]], "disable_random() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.disable_random", false]], "disable_random() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.disable_random", false]], "distillationcallbacks (class in neural_compressor.compression.callbacks)": [[162, "neural_compressor.compression.callbacks.DistillationCallbacks", false]], "distillationconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.DistillationConfig", false]], "distribute_calib_tensors() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.distribute_calib_tensors", false]], "dotdict (class in neural_compressor.config)": [[195, "neural_compressor.config.DotDict", false]], "dotdict (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.DotDict", false]], "dowload_hf_model() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.dowload_hf_model", false]], "dowload_hf_model() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.dowload_hf_model", false]], "dowload_hf_model() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.dowload_hf_model", false]], "download_url() (in module neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.download_url", false]], "dtype_to_name() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.dtype_to_name", false]], "dummydataset (class in neural_compressor.data.datasets.dummy_dataset)": [[212, "neural_compressor.data.datasets.dummy_dataset.DummyDataset", false]], "dummydataset (class in neural_compressor.data.datasets.dummy_dataset_v2)": [[213, "neural_compressor.data.datasets.dummy_dataset_v2.DummyDataset", false]], "dummydataset (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.DummyDataset", false]], "dummydatasetv2 (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.DummyDatasetV2", false]], "dump_class_attrs() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.dump_class_attrs", false]], "dump_data_to_local() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.dump_data_to_local", false]], "dump_elapsed_time() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.dump_elapsed_time", false]], "dump_elapsed_time() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.dump_elapsed_time", false]], "dump_elapsed_time() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.dump_elapsed_time", false]], "dump_model_op_stats() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.dump_model_op_stats", false]], "dump_model_op_stats() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.dump_model_op_stats", false]], "dump_model_op_stats() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.dump_model_op_stats", false]], "dump_numa_info() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.dump_numa_info", false]], "dump_table() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.dump_table", false]], "dump_table_to_csv() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.dump_table_to_csv", false]], "dynamic_quant_export() (in module neural_compressor.utils.export.torch2onnx)": [[459, "neural_compressor.utils.export.torch2onnx.dynamic_quant_export", false]], "dynamicquantconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.DynamicQuantConfig", false]], "elemformat (class in neural_compressor.torch.algorithms.mx_quant.utils)": [[404, "neural_compressor.torch.algorithms.mx_quant.utils.ElemFormat", false]], "embedlayernormalizationoperator (class in neural_compressor.adaptor.ox_utils.operators.embed_layernorm)": [[12, "neural_compressor.adaptor.ox_utils.operators.embed_layernorm.EmbedLayerNormalizationOperator", false]], "end_step (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.end_step", false]], "end_step (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.end_step", false]], "end_step (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.end_step", false]], "enough_memo_store_scale() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.enough_memo_store_scale", false]], "ensure_list() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.ensure_list", false]], "equal_dicts() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.equal_dicts", false]], "error() (in module neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.error", false]], "estimator_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.estimator_session", false]], "estimator_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.estimator_session", false]], "eval_frequency (neural_compressor.compression.callbacks.distillationcallbacks attribute)": [[162, "neural_compressor.compression.callbacks.DistillationCallbacks.eval_frequency", false]], "evaluate() (in module neural_compressor.metric.evaluate_squad)": [[231, "neural_compressor.metric.evaluate_squad.evaluate", false]], "evaluate() (in module neural_compressor.metric.f1)": [[232, "neural_compressor.metric.f1.evaluate", false]], "evaluationfuncwrapper (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.EvaluationFuncWrapper", false]], "evaluator (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.Evaluator", false]], "exact_match_score() (in module neural_compressor.metric.evaluate_squad)": [[231, "neural_compressor.metric.evaluate_squad.exact_match_score", false]], "exampleclass (class in neural_compressor.template.api_doc_example)": [[281, "neural_compressor.template.api_doc_example.ExampleClass", false]], "exhaustivetunestrategy (class in neural_compressor.strategy.exhaustive)": [[268, "neural_compressor.strategy.exhaustive.ExhaustiveTuneStrategy", false]], "expanddimsoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer)": [[45, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer.ExpandDimsOptimizer", false]], "expanddimsoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer)": [[317, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer.ExpandDimsOptimizer", false]], "export() (in module neural_compressor.torch.export.pt2e_export)": [[435, "neural_compressor.torch.export.pt2e_export.export", false]], "export_compressed_model() (in module neural_compressor.utils.load_huggingface)": [[462, "neural_compressor.utils.load_huggingface.export_compressed_model", false]], "export_model_for_pt2e_quant() (in module neural_compressor.torch.export.pt2e_export)": [[435, "neural_compressor.torch.export.pt2e_export.export_model_for_pt2e_quant", false]], "exportconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.ExportConfig", false]], "exportsingleimagedetectionboxestococo() (in module neural_compressor.metric.coco_tools)": [[230, "neural_compressor.metric.coco_tools.ExportSingleImageDetectionBoxesToCoco", false]], "exportsingleimagedetectionmaskstococo() (in module neural_compressor.metric.coco_tools)": [[230, "neural_compressor.metric.coco_tools.ExportSingleImageDetectionMasksToCoco", false]], "exportsingleimagegroundtruthtococo() (in module neural_compressor.metric.coco_tools)": [[230, "neural_compressor.metric.coco_tools.ExportSingleImageGroundtruthToCoco", false]], "extract_data_type() (in module neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.extract_data_type", false]], "f1 (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.F1", false]], "f1_score() (in module neural_compressor.metric.evaluate_squad)": [[231, "neural_compressor.metric.evaluate_squad.f1_score", false]], "f1_score() (in module neural_compressor.metric.f1)": [[232, "neural_compressor.metric.f1.f1_score", false]], "fakeaffinetensorquantfunction (class in neural_compressor.adaptor.torch_utils.model_wrapper)": [[142, "neural_compressor.adaptor.torch_utils.model_wrapper.FakeAffineTensorQuantFunction", false]], "fakeaffinetensorquantfunction (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.FakeAffineTensorQuantFunction", false]], "fakequantize (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize)": [[98, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize.FakeQuantize", false]], "fakequantizebase (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize)": [[98, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize.FakeQuantizeBase", false]], "fallbacktuningsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.FallbackTuningSampler", false]], "fashionmnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.FashionMNIST", false]], "fastbiascorrection (class in neural_compressor.algorithm.fast_bias_correction)": [[147, "neural_compressor.algorithm.fast_bias_correction.FastBiasCorrection", false]], "fatal() (in module neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.fatal", false]], "fault_tolerant_file() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.fault_tolerant_file", false]], "fetch_module() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.fetch_module", false]], "fetch_module() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.fetch_module", false]], "fetch_module() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.fetch_module", false]], "fetcher (class in neural_compressor.data.dataloaders.fetcher)": [[203, "neural_compressor.data.dataloaders.fetcher.Fetcher", false]], "fetchweightfromreshapeoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape)": [[46, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape.FetchWeightFromReshapeOptimizer", false]], "fetchweightfromreshapeoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape)": [[318, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape.FetchWeightFromReshapeOptimizer", false]], "filter (class in neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.Filter", false]], "filter_fn() (in module neural_compressor.torch.algorithms.weight_only.hqq.quantizer)": [[427, "neural_compressor.torch.algorithms.weight_only.hqq.quantizer.filter_fn", false]], "filter_registry() (in module neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.filter_registry", false]], "filters (class in neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.FILTERS", false]], "finalize_calibration() (in module neural_compressor.torch.quantization.quantize)": [[442, "neural_compressor.torch.quantization.quantize.finalize_calibration", false]], "find_by_name() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.find_by_name", false]], "find_layers() (in module neural_compressor.compression.pruner.wanda.utils)": [[194, "neural_compressor.compression.pruner.wanda.utils.find_layers", false]], "find_layers() (in module neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.find_layers", false]], "find_layers_name() (in module neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.find_layers_name", false]], "find_opset() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.find_opset", false]], "fit() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.fit", false]], "fit() (in module neural_compressor.mix_precision)": [[235, "neural_compressor.mix_precision.fit", false]], "fit() (in module neural_compressor.quantization)": [[262, "neural_compressor.quantization.fit", false]], "fit() (in module neural_compressor.training)": [[449, "neural_compressor.training.fit", false]], "fix_ref_type_of_graph_def() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.fix_ref_type_of_graph_def", false]], "fix_ref_type_of_graph_def() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.fix_ref_type_of_graph_def", false]], "flatten_static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcher.flatten_static_graph", false]], "flatten_static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearchertf attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcherTF.flatten_static_graph", false]], "flatten_static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.jitbasicsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher.flatten_static_graph", false]], "flatten_static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.flatten_static_graph", false]], "flatten_static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.selfmhasearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.SelfMHASearcher.flatten_static_graph", false]], "float16activationoperator (class in neural_compressor.adaptor.ox_utils.operators.activation)": [[5, "neural_compressor.adaptor.ox_utils.operators.activation.Float16ActivationOperator", false]], "float16binaryoperator (class in neural_compressor.adaptor.ox_utils.operators.binary_op)": [[8, "neural_compressor.adaptor.ox_utils.operators.binary_op.Float16BinaryOperator", false]], "float_to_bfloat16() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.float_to_bfloat16", false]], "float_to_float16() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.float_to_float16", false]], "fn (neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.patternpair attribute)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.PatternPair.fn", false]], "foldbatchnormnodesoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm)": [[47, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm.FoldBatchNormNodesOptimizer", false]], "foldbatchnormnodesoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm)": [[319, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm.FoldBatchNormNodesOptimizer", false]], "footprint (class in neural_compressor.objective)": [[245, "neural_compressor.objective.Footprint", false]], "format_list2str() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.format_list2str", false]], "forward_wrapper() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.forward_wrapper", false]], "forward_wrapper() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.forward_wrapper", false]], "forward_wrapper() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.forward_wrapper", false]], "fp8_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.fp8_entry", false]], "fp8config (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.FP8Config", false]], "framework_datasets (in module neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.framework_datasets", false]], "freezefakequantopoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant)": [[73, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant.FreezeFakeQuantOpOptimizer", false]], "freezefakequantopoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant)": [[345, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant.FreezeFakeQuantOpOptimizer", false]], "freezevaluetransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value)": [[74, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value.FreezeValueTransformer", false]], "freezevaluetransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value)": [[346, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value.FreezeValueTransformer", false]], "freezevaluewithoutcalibtransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib)": [[75, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib.FreezeValueWithoutCalibTransformer", false]], "frozen_pb_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.frozen_pb_session", false]], "frozen_pb_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.frozen_pb_session", false]], "fulltokenizer (class in neural_compressor.data.transforms.tokenization)": [[224, "neural_compressor.data.transforms.tokenization.FullTokenizer", false]], "function1() (in module neural_compressor.template.api_doc_example)": [[281, "neural_compressor.template.api_doc_example.function1", false]], "function2() (in module neural_compressor.template.api_doc_example)": [[281, "neural_compressor.template.api_doc_example.function2", false]], "function3() (in module neural_compressor.template.api_doc_example)": [[281, "neural_compressor.template.api_doc_example.function3", false]], "fuse() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.fuse", false]], "fusebiasaddandaddoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add)": [[49, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add.FuseBiasAddAndAddOptimizer", false]], "fusebiasaddandaddoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add)": [[321, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add.FuseBiasAddAndAddOptimizer", false]], "fusecolumnwisemuloptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul)": [[50, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul.FuseColumnWiseMulOptimizer", false]], "fusecolumnwisemuloptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul)": [[322, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul.FuseColumnWiseMulOptimizer", false]], "fuseconvredundantdequantizetransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize)": [[76, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize.FuseConvRedundantDequantizeTransformer", false]], "fuseconvredundantdequantizetransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize)": [[347, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize.FuseConvRedundantDequantizeTransformer", false]], "fuseconvrequantizetransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize)": [[77, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize.FuseConvRequantizeTransformer", false]], "fuseconvrequantizetransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize)": [[348, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize.FuseConvRequantizeTransformer", false]], "fuseconvwithmathoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math)": [[51, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math.FuseConvWithMathOptimizer", false]], "fuseconvwithmathoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math)": [[323, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math.FuseConvWithMathOptimizer", false]], "fusedecomposedbnoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn)": [[52, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn.FuseDecomposedBNOptimizer", false]], "fusedecomposedbnoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn)": [[324, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn.FuseDecomposedBNOptimizer", false]], "fusedecomposedinoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in)": [[53, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in.FuseDecomposedINOptimizer", false]], "fusedecomposedinoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in)": [[325, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in.FuseDecomposedINOptimizer", false]], "fusedmatmuloperator (class in neural_compressor.adaptor.ox_utils.operators.matmul)": [[18, "neural_compressor.adaptor.ox_utils.operators.matmul.FusedMatMulOperator", false]], "fusegeluoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu)": [[54, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu.FuseGeluOptimizer", false]], "fusegeluoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu)": [[326, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu.FuseGeluOptimizer", false]], "fuselayernormoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm)": [[55, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm.FuseLayerNormOptimizer", false]], "fuselayernormoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm)": [[327, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm.FuseLayerNormOptimizer", false]], "fusematmulredundantdequantizetransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize)": [[78, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize.FuseMatMulRedundantDequantizeTransformer", false]], "fusematmulredundantdequantizetransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize)": [[349, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize.FuseMatMulRedundantDequantizeTransformer", false]], "fusematmulrequantizedequantizenewapitransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize)": [[79, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeDequantizeNewAPITransformer", false]], "fusematmulrequantizedequantizenewapitransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize)": [[350, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeDequantizeNewAPITransformer", false]], "fusematmulrequantizedequantizetransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize)": [[79, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeDequantizeTransformer", false]], "fusematmulrequantizedequantizetransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize)": [[350, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeDequantizeTransformer", false]], "fusematmulrequantizenewapitransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize)": [[79, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeNewAPITransformer", false]], "fusematmulrequantizenewapitransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize)": [[350, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeNewAPITransformer", false]], "fusematmulrequantizetransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize)": [[79, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeTransformer", false]], "fusematmulrequantizetransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize)": [[350, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeTransformer", false]], "fusenodestartwithconcatv2 (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2)": [[109, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2.FuseNodeStartWithConcatV2", false]], "fusenodestartwithconcatv2 (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2)": [[119, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2.FuseNodeStartWithConcatV2", false]], "fusenodestartwithconcatv2 (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2)": [[364, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2.FuseNodeStartWithConcatV2", false]], "fusenodestartwithconcatv2 (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2)": [[374, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2.FuseNodeStartWithConcatV2", false]], "fusenodestartwithconv2d (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv)": [[110, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv.FuseNodeStartWithConv2d", false]], "fusenodestartwithconv2d (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv)": [[120, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv.FuseNodeStartWithConv2d", false]], "fusenodestartwithconv2d (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv)": [[365, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv.FuseNodeStartWithConv2d", false]], "fusenodestartwithconv2d (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv)": [[375, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv.FuseNodeStartWithConv2d", false]], "fusenodestartwithdeconv2d (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv)": [[111, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv.FuseNodeStartWithDeconv2d", false]], "fusenodestartwithdeconv2d (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv)": [[366, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv.FuseNodeStartWithDeconv2d", false]], "fusenodestartwithfusedbatchnormv3 (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn)": [[108, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn.FuseNodeStartWithFusedBatchNormV3", false]], "fusenodestartwithfusedbatchnormv3 (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn)": [[118, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn.FuseNodeStartWithFusedBatchNormV3", false]], "fusenodestartwithfusedbatchnormv3 (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn)": [[363, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn.FuseNodeStartWithFusedBatchNormV3", false]], "fusenodestartwithfusedbatchnormv3 (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn)": [[373, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn.FuseNodeStartWithFusedBatchNormV3", false]], "fusenodestartwithfusedinstancenorm (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in)": [[112, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in.FuseNodeStartWithFusedInstanceNorm", false]], "fusenodestartwithfusedinstancenorm (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in)": [[367, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in.FuseNodeStartWithFusedInstanceNorm", false]], "fusenodestartwithmatmul (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul)": [[113, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul.FuseNodeStartWithMatmul", false]], "fusenodestartwithmatmul (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul)": [[122, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul.FuseNodeStartWithMatmul", false]], "fusenodestartwithmatmul (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul)": [[368, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul.FuseNodeStartWithMatmul", false]], "fusenodestartwithmatmul (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul)": [[377, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul.FuseNodeStartWithMatmul", false]], "fusenodestartwithpooling (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling)": [[114, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling.FuseNodeStartWithPooling", false]], "fusenodestartwithpooling (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling)": [[123, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling.FuseNodeStartWithPooling", false]], "fusenodestartwithpooling (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling)": [[369, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling.FuseNodeStartWithPooling", false]], "fusenodestartwithpooling (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling)": [[378, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling.FuseNodeStartWithPooling", false]], "fusepadwithconv2doptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv)": [[56, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv.FusePadWithConv2DOptimizer", false]], "fusepadwithconv2doptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv)": [[328, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv.FusePadWithConv2DOptimizer", false]], "fusepadwithfp32conv2doptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv)": [[57, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv.FusePadWithFP32Conv2DOptimizer", false]], "fusepadwithfp32conv2doptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv)": [[329, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv.FusePadWithFP32Conv2DOptimizer", false]], "fusetransposereshapeoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose)": [[58, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose.FuseTransposeReshapeOptimizer", false]], "fusetransposereshapeoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose)": [[330, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose.FuseTransposeReshapeOptimizer", false]], "gatheroperator (class in neural_compressor.adaptor.ox_utils.operators.gather)": [[13, "neural_compressor.adaptor.ox_utils.operators.gather.GatherOperator", false]], "gemmoperator (class in neural_compressor.adaptor.ox_utils.operators.gemm)": [[15, "neural_compressor.adaptor.ox_utils.operators.gemm.GemmOperator", false]], "gen_bar_updater() (in module neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.gen_bar_updater", false]], "generaltopk (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.GeneralTopK", false]], "generate_activation_observer() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.generate_activation_observer", false]], "generate_activation_observer() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.generate_activation_observer", false]], "generate_feed_dict() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.generate_feed_dict", false]], "generate_feed_dict() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.generate_feed_dict", false]], "generate_ffn2_pruning_config() (in module neural_compressor.compression.pruner.model_slim.auto_slim)": [[171, "neural_compressor.compression.pruner.model_slim.auto_slim.generate_ffn2_pruning_config", false]], "generate_mha_pruning_config() (in module neural_compressor.compression.pruner.model_slim.auto_slim)": [[171, "neural_compressor.compression.pruner.model_slim.auto_slim.generate_mha_pruning_config", false]], "generate_prefix() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.generate_prefix", false]], "generate_prefix() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.generate_prefix", false]], "generate_xpu_qconfig() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.generate_xpu_qconfig", false]], "generategraphwithqdqpattern (class in neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern)": [[92, "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern.GenerateGraphWithQDQPattern", false]], "generategraphwithqdqpattern (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern)": [[357, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern.GenerateGraphWithQDQPattern", false]], "generator1() (in module neural_compressor.template.api_doc_example)": [[281, "neural_compressor.template.api_doc_example.generator1", false]], "get_absorb_layers() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_absorb_layers", false]], "get_absorb_layers() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.get_absorb_layers", false]], "get_accelerator() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.get_accelerator", false]], "get_activation() (in module neural_compressor.compression.distillation.utility)": [[166, "neural_compressor.compression.distillation.utility.get_activation", false]], "get_adaptor_name() (in module neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.get_adaptor_name", false]], "get_algorithm() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.get_algorithm", false]], "get_all_config_set() (in module neural_compressor.tensorflow.quantization.autotune)": [[302, "neural_compressor.tensorflow.quantization.autotune.get_all_config_set", false]], "get_all_config_set() (in module neural_compressor.torch.quantization.autotune)": [[438, "neural_compressor.torch.quantization.autotune.get_all_config_set", false]], "get_all_config_set_from_config_registry() (in module neural_compressor.common.base_config)": [[152, "neural_compressor.common.base_config.get_all_config_set_from_config_registry", false]], "get_all_fp32_data() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.get_all_fp32_data", false]], "get_all_fp32_data() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_all_fp32_data", false]], "get_all_registered_configs() (in module neural_compressor.tensorflow.keras.quantization.config)": [[299, "neural_compressor.tensorflow.keras.quantization.config.get_all_registered_configs", false]], "get_all_registered_configs() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_all_registered_configs", false]], "get_architecture() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.get_architecture", false]], "get_attributes() (in module neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.get_attributes", false]], "get_blob_size() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.get_blob_size", false]], "get_block_prefix() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_block_prefix", false]], "get_block_prefix() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.get_block_prefix", false]], "get_bounded_threads() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.get_bounded_threads", false]], "get_children() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.get_children", false]], "get_children() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.get_children", false]], "get_common_module() (in module neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.get_common_module", false]], "get_const_dim_count() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn)": [[52, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn.get_const_dim_count", false]], "get_const_dim_count() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in)": [[53, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in.get_const_dim_count", false]], "get_const_dim_count() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn)": [[324, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn.get_const_dim_count", false]], "get_const_dim_count() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in)": [[325, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in.get_const_dim_count", false]], "get_core_ids() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.get_core_ids", false]], "get_criterion() (in module neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.get_criterion", false]], "get_dataloader() (in module neural_compressor.torch.algorithms.weight_only.autoround)": [[418, "neural_compressor.torch.algorithms.weight_only.autoround.get_dataloader", false]], "get_default_autoround_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_AutoRound_config", false]], "get_default_awq_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_awq_config", false]], "get_default_double_quant_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_double_quant_config", false]], "get_default_dynamic_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_dynamic_config", false]], "get_default_fp8_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_fp8_config", false]], "get_default_fp8_config_set() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_fp8_config_set", false]], "get_default_gptq_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_gptq_config", false]], "get_default_hqq_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_hqq_config", false]], "get_default_mixed_precision_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_mixed_precision_config", false]], "get_default_mixed_precision_config_set() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_mixed_precision_config_set", false]], "get_default_mx_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_mx_config", false]], "get_default_rtn_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_rtn_config", false]], "get_default_sq_config() (in module neural_compressor.tensorflow.quantization.config)": [[303, "neural_compressor.tensorflow.quantization.config.get_default_sq_config", false]], "get_default_sq_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_sq_config", false]], "get_default_static_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_static_config", false]], "get_default_static_quant_config() (in module neural_compressor.tensorflow.keras.quantization.config)": [[299, "neural_compressor.tensorflow.keras.quantization.config.get_default_static_quant_config", false]], "get_default_static_quant_config() (in module neural_compressor.tensorflow.quantization.config)": [[303, "neural_compressor.tensorflow.quantization.config.get_default_static_quant_config", false]], "get_default_teq_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_teq_config", false]], "get_depth() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_depth", false]], "get_depth() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.get_depth", false]], "get_dict_at_depth() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_dict_at_depth", false]], "get_dict_at_depth() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.get_dict_at_depth", false]], "get_double_quant_config_dict() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.get_double_quant_config_dict", false]], "get_element_under_depth() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_element_under_depth", false]], "get_element_under_depth() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.get_element_under_depth", false]], "get_embedding_contiguous() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_embedding_contiguous", false]], "get_estimator_graph() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.get_estimator_graph", false]], "get_example_input() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_example_input", false]], "get_fallback_order() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_fallback_order", false]], "get_filter_fn() (in module neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.get_filter_fn", false]], "get_final_text() (in module neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.get_final_text", false]], "get_framework_name() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.get_framework_name", false]], "get_func_from_config() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.get_func_from_config", false]], "get_graph_def() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.get_graph_def", false]], "get_graph_def() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.get_graph_def", false]], "get_half_precision_node_set() (in module neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.get_half_precision_node_set", false]], "get_hidden_states() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_hidden_states", false]], "get_index_from_strided_slice_of_shape() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.get_index_from_strided_slice_of_shape", false]], "get_input_output_node_names() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.get_input_output_node_names", false]], "get_input_output_node_names() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.get_input_output_node_names", false]], "get_ipex_version() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.get_ipex_version", false]], "get_layers() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.get_layers", false]], "get_linux_numa_info() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.get_linux_numa_info", false]], "get_max_supported_opset_version() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema)": [[89, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema.get_max_supported_opset_version", false]], "get_metrics() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.get_metrics", false]], "get_model_device() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.get_model_device", false]], "get_model_fwk_name() (in module neural_compressor.model.model)": [[239, "neural_compressor.model.model.get_model_fwk_name", false]], "get_model_info() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.get_model_info", false]], "get_model_input_shape() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.get_model_input_shape", false]], "get_model_input_shape() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.get_model_input_shape", false]], "get_model_type() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.get_model_type", false]], "get_model_type() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.get_model_type", false]], "get_module() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.get_module", false]], "get_module() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.get_module", false]], "get_module() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.get_module", false]], "get_module() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.get_module", false]], "get_module_input_output() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_module_input_output", false]], "get_module_input_output() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.get_module_input_output", false]], "get_mse_order_per_fp32() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_mse_order_per_fp32", false]], "get_mse_order_per_int8() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_mse_order_per_int8", false]], "get_named_children() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.get_named_children", false]], "get_named_children() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.get_named_children", false]], "get_node_mapping() (in module neural_compressor.utils.export.torch2onnx)": [[459, "neural_compressor.utils.export.torch2onnx.get_node_mapping", false]], "get_node_original_name() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.get_node_original_name", false]], "get_numa_node() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.get_numa_node", false]], "get_number_of_sockets() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_number_of_sockets", false]], "get_op_list() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_op_list", false]], "get_op_type_by_name() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_op_type_by_name", false]], "get_parent() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.get_parent", false]], "get_parent() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.get_parent", false]], "get_pattern() (in module neural_compressor.compression.pruner.patterns)": [[176, "neural_compressor.compression.pruner.patterns.get_pattern", false]], "get_physical_ids() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.get_physical_ids", false]], "get_postprocess() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.get_postprocess", false]], "get_preprocess() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.get_preprocess", false]], "get_processor_type_from_user_config() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.get_processor_type_from_user_config", false]], "get_pruner() (in module neural_compressor.compression.pruner.pruners)": [[183, "neural_compressor.compression.pruner.pruners.get_pruner", false]], "get_quant_dequant_output() (in module neural_compressor.adaptor.ox_utils.smooth_quant)": [[29, "neural_compressor.adaptor.ox_utils.smooth_quant.get_quant_dequant_output", false]], "get_quantizable_onnx_ops() (in module neural_compressor.utils.export.torch2onnx)": [[459, "neural_compressor.utils.export.torch2onnx.get_quantizable_onnx_ops", false]], "get_quantizable_ops_from_cfgs() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_quantizable_ops_from_cfgs", false]], "get_quantizable_ops_from_cfgs() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.get_quantizable_ops_from_cfgs", false]], "get_quantizable_ops_recursively() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.get_quantizable_ops_recursively", false]], "get_quantizable_ops_recursively() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.get_quantizable_ops_recursively", false]], "get_quantizer() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.get_quantizer", false]], "get_reg() (in module neural_compressor.compression.pruner.regs)": [[189, "neural_compressor.compression.pruner.regs.get_reg", false]], "get_reg_type() (in module neural_compressor.compression.pruner.regs)": [[189, "neural_compressor.compression.pruner.regs.get_reg_type", false]], "get_reversed_numa_info() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.get_reversed_numa_info", false]], "get_rtn_double_quant_config_set() (in module neural_compressor.torch.quantization.autotune)": [[438, "neural_compressor.torch.quantization.autotune.get_rtn_double_quant_config_set", false]], "get_scheduler() (in module neural_compressor.compression.pruner.schedulers)": [[190, "neural_compressor.compression.pruner.schedulers.get_scheduler", false]], "get_schema() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema)": [[89, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema.get_schema", false]], "get_size() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_size", false]], "get_sparsity_ratio() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.get_sparsity_ratio", false]], "get_sparsity_ratio_tf() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.get_sparsity_ratio_tf", false]], "get_subgraphs_from_onnx() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.get_subgraphs_from_onnx", false]], "get_super_module_by_name() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.get_super_module_by_name", false]], "get_super_module_by_name() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.get_super_module_by_name", false]], "get_tensor_by_name() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.get_tensor_by_name", false]], "get_tensor_by_name() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.get_tensor_by_name", false]], "get_tensor_histogram() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.get_tensor_histogram", false]], "get_tensor_histogram() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_tensor_histogram", false]], "get_tensor_val_from_graph_node() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.get_tensor_val_from_graph_node", false]], "get_tensorflow_node_attr() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.get_tensorflow_node_attr", false]], "get_tensorflow_node_shape_attr() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.get_tensorflow_node_shape_attr", false]], "get_tensorflow_tensor_data() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.get_tensorflow_tensor_data", false]], "get_tensorflow_tensor_shape() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.get_tensorflow_tensor_shape", false]], "get_tensors_info() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_tensors_info", false]], "get_tf_criterion() (in module neural_compressor.compression.pruner.tf_criteria)": [[191, "neural_compressor.compression.pruner.tf_criteria.get_tf_criterion", false]], "get_tf_model_type() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.get_tf_model_type", false]], "get_threads() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.get_threads", false]], "get_threads_per_core() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.get_threads_per_core", false]], "get_torch_version() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_torch_version", false]], "get_torch_version() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.get_torch_version", false]], "get_torchvision_map() (in module neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.get_torchvision_map", false]], "get_tuning_history() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_tuning_history", false]], "get_unquantized_node_set() (in module neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.get_unquantized_node_set", false]], "get_weight_from_input_tensor() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.get_weight_from_input_tensor", false]], "get_weight_scale() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.get_weight_scale", false]], "get_weights_details() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_weights_details", false]], "get_windows_numa_info() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.get_windows_numa_info", false]], "get_woq_tuning_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_woq_tuning_config", false]], "get_workspace() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.get_workspace", false]], "global_state (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.GLOBAL_STATE", false]], "global_step (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.global_step", false]], "global_step (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.global_step", false]], "global_step (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.global_step", false]], "globalaveragepooloperator (class in neural_compressor.adaptor.ox_utils.operators.gavgpool)": [[14, "neural_compressor.adaptor.ox_utils.operators.gavgpool.GlobalAveragePoolOperator", false]], "gptq (class in neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.GPTQ", false]], "gptq() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.gptq", false]], "gptq_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.gptq_entry", false]], "gptq_quantize() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.gptq_quantize", false]], "gptqconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.GPTQConfig", false]], "gptqconfig (class in neural_compressor.transformers.utils.quantization_config)": [[452, "neural_compressor.transformers.utils.quantization_config.GPTQConfig", false]], "gptquantizer (class in neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.GPTQuantizer", false]], "gradientcriterion (class in neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.GradientCriterion", false]], "graph_def_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.graph_def_session", false]], "graph_def_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.graph_def_session", false]], "graph_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.graph_session", false]], "graph_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.graph_session", false]], "graphanalyzer (class in neural_compressor.adaptor.tf_utils.graph_util)": [[95, "neural_compressor.adaptor.tf_utils.graph_util.GraphAnalyzer", false]], "graphanalyzer (class in neural_compressor.tensorflow.quantization.utils.graph_util)": [[360, "neural_compressor.tensorflow.quantization.utils.graph_util.GraphAnalyzer", false]], "graphconverter (class in neural_compressor.adaptor.tf_utils.graph_converter)": [[33, "neural_compressor.adaptor.tf_utils.graph_converter.GraphConverter", false]], "graphconverter (class in neural_compressor.tensorflow.quantization.utils.graph_converter)": [[306, "neural_compressor.tensorflow.quantization.utils.graph_converter.GraphConverter", false]], "graphconverterwithoutcalib (class in neural_compressor.adaptor.tf_utils.graph_converter_without_calib)": [[34, "neural_compressor.adaptor.tf_utils.graph_converter_without_calib.GraphConverterWithoutCalib", false]], "graphcseoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer)": [[59, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer.GraphCseOptimizer", false]], "graphcseoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer)": [[331, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer.GraphCseOptimizer", false]], "graphfoldconstantoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant)": [[48, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant.GraphFoldConstantOptimizer", false]], "graphfoldconstantoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant)": [[320, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant.GraphFoldConstantOptimizer", false]], "graphrewriterbase (class in neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base)": [[71, "neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base.GraphRewriterBase", false]], "graphrewriterbase (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base)": [[343, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base.GraphRewriterBase", false]], "graphrewriterhelper (class in neural_compressor.adaptor.tf_utils.graph_util)": [[95, "neural_compressor.adaptor.tf_utils.graph_util.GraphRewriterHelper", false]], "graphrewriterhelper (class in neural_compressor.tensorflow.quantization.utils.graph_util)": [[360, "neural_compressor.tensorflow.quantization.utils.graph_util.GraphRewriterHelper", false]], "graphtrace (class in neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.GraphTrace", false]], "graphtrace (class in neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.GraphTrace", false]], "graphtransformbase (class in neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base)": [[129, "neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base.GraphTransformBase", false]], "graphtransformbase (class in neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base)": [[381, "neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base.GraphTransformBase", false]], "grappleroptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass)": [[60, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass.GrapplerOptimizer", false]], "grappleroptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass)": [[332, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass.GrapplerOptimizer", false]], "group_size (neural_compressor.torch.algorithms.weight_only.hqq.qtensor.qtensormetainfo attribute)": [[426, "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensorMetaInfo.group_size", false]], "grouplasso (class in neural_compressor.compression.pruner.regs)": [[189, "neural_compressor.compression.pruner.regs.GroupLasso", false]], "halfprecisionconverter (class in neural_compressor.torch.algorithms.mixed_precision.half_precision_convert)": [[399, "neural_compressor.torch.algorithms.mixed_precision.half_precision_convert.HalfPrecisionConverter", false]], "halfprecisionmodulewrapper (class in neural_compressor.torch.algorithms.mixed_precision.module_wrappers)": [[401, "neural_compressor.torch.algorithms.mixed_precision.module_wrappers.HalfPrecisionModuleWrapper", false]], "hawq_top() (in module neural_compressor.adaptor.torch_utils.hawq_metric)": [[135, "neural_compressor.adaptor.torch_utils.hawq_metric.hawq_top", false]], "hawq_v2tunestrategy (class in neural_compressor.strategy.hawq_v2)": [[269, "neural_compressor.strategy.hawq_v2.HAWQ_V2TuneStrategy", false]], "head_masks (neural_compressor.compression.pruner.pruners.mha.pythonmultiheadattentionpruner attribute)": [[184, "neural_compressor.compression.pruner.pruners.mha.PythonMultiheadAttentionPruner.head_masks", false]], "hessiantrace (class in neural_compressor.adaptor.torch_utils.hawq_metric)": [[135, "neural_compressor.adaptor.torch_utils.hawq_metric.HessianTrace", false]], "histogramcollector (class in neural_compressor.adaptor.ox_utils.calibrator)": [[3, "neural_compressor.adaptor.ox_utils.calibrator.HistogramCollector", false]], "hpoconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.HPOConfig", false]], "hpu_accelerator (class in neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.HPU_Accelerator", false]], "hpuweightonlylinear (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.HPUWeightOnlyLinear", false]], "hqq_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.hqq_entry", false]], "hqqconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.HQQConfig", false]], "hqqlinear (class in neural_compressor.torch.algorithms.weight_only.hqq.core)": [[423, "neural_compressor.torch.algorithms.weight_only.hqq.core.HQQLinear", false]], "hqqmoduleconfig (class in neural_compressor.torch.algorithms.weight_only.hqq.config)": [[422, "neural_compressor.torch.algorithms.weight_only.hqq.config.HQQModuleConfig", false]], "hqqtensorhandle (class in neural_compressor.torch.algorithms.weight_only.hqq.core)": [[423, "neural_compressor.torch.algorithms.weight_only.hqq.core.HQQTensorHandle", false]], "hqquantizer (class in neural_compressor.torch.algorithms.weight_only.hqq.quantizer)": [[427, "neural_compressor.torch.algorithms.weight_only.hqq.quantizer.HQQuantizer", false]], "imagefolder (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.ImageFolder", false]], "imagenetraw (class in neural_compressor.data.datasets.imagenet_dataset)": [[214, "neural_compressor.data.datasets.imagenet_dataset.ImagenetRaw", false]], "incquantizationconfigmixin (class in neural_compressor.transformers.utils.quantization_config)": [[452, "neural_compressor.transformers.utils.quantization_config.INCQuantizationConfigMixin", false]], "incweightonlylinear (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.INCWeightOnlyLinear", false]], "indexfetcher (class in neural_compressor.data.dataloaders.fetcher)": [[203, "neural_compressor.data.dataloaders.fetcher.IndexFetcher", false]], "indexfetcher (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.IndexFetcher", false]], "infer_onnx_shape_dtype() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.infer_onnx_shape_dtype", false]], "infer_shapes() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.infer_shapes", false]], "info() (in module neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.info", false]], "init_quantize_config() (in module neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper)": [[101, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper.init_quantize_config", false]], "init_tuning() (in module neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.init_tuning", false]], "initial_tuning_cfg_with_quant_mode() (in module neural_compressor.strategy.utils.tuning_space)": [[278, "neural_compressor.strategy.utils.tuning_space.initial_tuning_cfg_with_quant_mode", false]], "initialize_int8_avgpool() (in module neural_compressor.tensorflow.keras.layers.pool2d)": [[297, "neural_compressor.tensorflow.keras.layers.pool2d.initialize_int8_avgpool", false]], "initialize_int8_conv2d() (in module neural_compressor.tensorflow.keras.layers.conv2d)": [[292, "neural_compressor.tensorflow.keras.layers.conv2d.initialize_int8_conv2d", false]], "initialize_int8_dense() (in module neural_compressor.tensorflow.keras.layers.dense)": [[293, "neural_compressor.tensorflow.keras.layers.dense.initialize_int8_dense", false]], "initialize_int8_depthwise_conv2d() (in module neural_compressor.tensorflow.keras.layers.depthwise_conv2d)": [[294, "neural_compressor.tensorflow.keras.layers.depthwise_conv2d.initialize_int8_depthwise_conv2d", false]], "initialize_int8_maxpool() (in module neural_compressor.tensorflow.keras.layers.pool2d)": [[297, "neural_compressor.tensorflow.keras.layers.pool2d.initialize_int8_maxpool", false]], "initialize_int8_separable_conv2d() (in module neural_compressor.tensorflow.keras.layers.separable_conv2d)": [[298, "neural_compressor.tensorflow.keras.layers.separable_conv2d.initialize_int8_separable_conv2d", false]], "initialize_name_counter() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.initialize_name_counter", false]], "injectdummybiasaddoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd)": [[44, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd.InjectDummyBiasAddOptimizer", false]], "injectdummybiasaddoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd)": [[316, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd.InjectDummyBiasAddOptimizer", false]], "input2tuple() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.input2tuple", false]], "inputfeatures (class in neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.InputFeatures", false]], "inputfeatures (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.InputFeatures", false]], "insertlogging (class in neural_compressor.adaptor.tf_utils.transform_graph.insert_logging)": [[131, "neural_compressor.adaptor.tf_utils.transform_graph.insert_logging.InsertLogging", false]], "insertlogging (class in neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging)": [[383, "neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging.InsertLogging", false]], "insertprintminmaxnode (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node)": [[62, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node.InsertPrintMinMaxNode", false]], "insertprintminmaxnode (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node)": [[334, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node.InsertPrintMinMaxNode", false]], "int8_node_name_reverse() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.int8_node_name_reverse", false]], "intermediatelayersknowledgedistillationloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.IntermediateLayersKnowledgeDistillationLoss", false]], "intermediatelayersknowledgedistillationlossconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.IntermediateLayersKnowledgeDistillationLossConfig", false]], "invalid_layers (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.invalid_layers", false]], "invalid_layers (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.invalid_layers", false]], "invalid_layers (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.invalid_layers", false]], "ipexmodel (class in neural_compressor.model.torch_model)": [[244, "neural_compressor.model.torch_model.IPEXModel", false]], "is_b_transposed() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.is_B_transposed", false]], "is_ckpt_format() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.is_ckpt_format", false]], "is_ckpt_format() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.is_ckpt_format", false]], "is_fused_module() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.is_fused_module", false]], "is_global (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.is_global", false]], "is_global (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.is_global", false]], "is_global (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.is_global", false]], "is_hpex_available() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.is_hpex_available", false]], "is_int8_model() (in module neural_compressor.utils.pytorch)": [[465, "neural_compressor.utils.pytorch.is_int8_model", false]], "is_ipex_available() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.is_ipex_available", false]], "is_ipex_imported() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.is_ipex_imported", false]], "is_leaf() (in module neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.is_leaf", false]], "is_list_or_tuple() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.is_list_or_tuple", false]], "is_model_quantized() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.is_model_quantized", false]], "is_onnx_domain() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.is_onnx_domain", false]], "is_package_available() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.is_package_available", false]], "is_saved_model_format() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.is_saved_model_format", false]], "is_saved_model_format() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.is_saved_model_format", false]], "is_transformers_imported() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.is_transformers_imported", false]], "isiterable() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.isiterable", false]], "iterabledataset (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.IterableDataset", false]], "iterablefetcher (class in neural_compressor.data.dataloaders.fetcher)": [[203, "neural_compressor.data.dataloaders.fetcher.IterableFetcher", false]], "iterablefetcher (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.IterableFetcher", false]], "iterablesampler (class in neural_compressor.data.dataloaders.sampler)": [[207, "neural_compressor.data.dataloaders.sampler.IterableSampler", false]], "iterablesampler (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.IterableSampler", false]], "iterativescheduler (class in neural_compressor.compression.pruner.schedulers)": [[190, "neural_compressor.compression.pruner.schedulers.IterativeScheduler", false]], "iterator_sess_run() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.iterator_sess_run", false]], "iterator_sess_run() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.iterator_sess_run", false]], "itex_installed() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.itex_installed", false]], "jitbasicsearcher (class in neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher", false]], "k (neural_compressor.metric.metric.generaltopk attribute)": [[234, "neural_compressor.metric.metric.GeneralTopK.k", false]], "k (neural_compressor.metric.metric.tensorflowtopk attribute)": [[234, "neural_compressor.metric.metric.TensorflowTopK.k", false]], "keep_mask_layers (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.keep_mask_layers", false]], "keep_mask_layers (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.keep_mask_layers", false]], "keep_mask_layers (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.keep_mask_layers", false]], "keras (class in neural_compressor.config)": [[195, "neural_compressor.config.Keras", false]], "keras_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.keras_session", false]], "keras_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.keras_session", false]], "kerasadaptor (class in neural_compressor.tensorflow.algorithms.static_quant.keras)": [[288, "neural_compressor.tensorflow.algorithms.static_quant.keras.KerasAdaptor", false]], "kerasbasepattern (class in neural_compressor.compression.pruner.patterns.base)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern", false]], "kerasbasepruner (class in neural_compressor.compression.pruner.pruners.base)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner", false]], "kerasbasicpruner (class in neural_compressor.compression.pruner.pruners.basic)": [[181, "neural_compressor.compression.pruner.pruners.basic.KerasBasicPruner", false]], "kerasconfigconverter (class in neural_compressor.tensorflow.algorithms.static_quant.keras)": [[288, "neural_compressor.tensorflow.algorithms.static_quant.keras.KerasConfigConverter", false]], "kerasmodel (class in neural_compressor.model.keras_model)": [[238, "neural_compressor.model.keras_model.KerasModel", false]], "kerasmodel (class in neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.KerasModel", false]], "keraspatternnxm (class in neural_compressor.compression.pruner.patterns.nxm)": [[179, "neural_compressor.compression.pruner.patterns.nxm.KerasPatternNxM", false]], "kerasquery (class in neural_compressor.tensorflow.algorithms.static_quant.keras)": [[288, "neural_compressor.tensorflow.algorithms.static_quant.keras.KerasQuery", false]], "kerassurgery (class in neural_compressor.tensorflow.algorithms.static_quant.keras)": [[288, "neural_compressor.tensorflow.algorithms.static_quant.keras.KerasSurgery", false]], "kl_divergence (class in neural_compressor.utils.kl_divergence)": [[461, "neural_compressor.utils.kl_divergence.KL_Divergence", false]], "klcalibrator (class in neural_compressor.adaptor.ox_utils.calibrator)": [[3, "neural_compressor.adaptor.ox_utils.calibrator.KLCalibrator", false]], "knowledgedistillationframework (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.KnowledgeDistillationFramework", false]], "knowledgedistillationloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.KnowledgeDistillationLoss", false]], "knowledgedistillationlossconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.KnowledgeDistillationLossConfig", false]], "label_list (neural_compressor.metric.metric.accuracy attribute)": [[234, "neural_compressor.metric.metric.Accuracy.label_list", false]], "label_list (neural_compressor.metric.metric.mae attribute)": [[234, "neural_compressor.metric.metric.MAE.label_list", false]], "label_list (neural_compressor.metric.metric.mse attribute)": [[234, "neural_compressor.metric.metric.MSE.label_list", false]], "labelbalancecocorawfilter (class in neural_compressor.data.filters.coco_filter)": [[217, "neural_compressor.data.filters.coco_filter.LabelBalanceCOCORawFilter", false]], "labelbalancecocorecordfilter (class in neural_compressor.data.filters.coco_filter)": [[217, "neural_compressor.data.filters.coco_filter.LabelBalanceCOCORecordFilter", false]], "labels (neural_compressor.metric.bleu.bleu attribute)": [[227, "neural_compressor.metric.bleu.BLEU.labels", false]], "labelshift (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.LabelShift", false]], "layer_1 (neural_compressor.compression.pruner.model_slim.weight_slim.linearcompression attribute)": [[174, "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompression.layer_1", false]], "layer_2 (neural_compressor.compression.pruner.model_slim.weight_slim.linearcompression attribute)": [[174, "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompression.layer_2", false]], "layerhistogramcollector (class in neural_compressor.utils.collect_layer_histogram)": [[453, "neural_compressor.utils.collect_layer_histogram.LayerHistogramCollector", false]], "layerwisequant (class in neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize)": [[139, "neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize.LayerWiseQuant", false]], "lazyimport (class in neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.LazyImport", false]], "lazyimport (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.LazyImport", false]], "linear2linearsearcher (class in neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher", false]], "linear_layers (neural_compressor.compression.pruner.pruners.mha.pythonmultiheadattentionpruner attribute)": [[184, "neural_compressor.compression.pruner.pruners.mha.PythonMultiheadAttentionPruner.linear_layers", false]], "linear_patterns (neural_compressor.compression.pruner.model_slim.weight_slim.linearcompressioniterator attribute)": [[174, "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompressionIterator.linear_patterns", false]], "linearcompression (class in neural_compressor.compression.pruner.model_slim.weight_slim)": [[174, "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompression", false]], "linearcompressioniterator (class in neural_compressor.compression.pruner.model_slim.weight_slim)": [[174, "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompressionIterator", false]], "load() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load)": [[140, "neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load.load", false]], "load() (in module neural_compressor.torch.algorithms.layer_wise.load)": [[396, "neural_compressor.torch.algorithms.layer_wise.load.load", false]], "load() (in module neural_compressor.torch.algorithms.pt2e_quant.save_load)": [[408, "neural_compressor.torch.algorithms.pt2e_quant.save_load.load", false]], "load() (in module neural_compressor.torch.algorithms.static_quant.save_load)": [[415, "neural_compressor.torch.algorithms.static_quant.save_load.load", false]], "load() (in module neural_compressor.torch.algorithms.weight_only.save_load)": [[431, "neural_compressor.torch.algorithms.weight_only.save_load.load", false]], "load() (in module neural_compressor.torch.quantization.load_entry)": [[441, "neural_compressor.torch.quantization.load_entry.load", false]], "load() (in module neural_compressor.utils.pytorch)": [[465, "neural_compressor.utils.pytorch.load", false]], "load_and_cache_examples() (in module neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.load_and_cache_examples", false]], "load_config_mapping() (in module neural_compressor.common.utils.save_load)": [[160, "neural_compressor.common.utils.save_load.load_config_mapping", false]], "load_data_from_pkl() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.load_data_from_pkl", false]], "load_empty_model() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.load_empty_model", false]], "load_empty_model() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.load_empty_model", false]], "load_empty_model() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.load_empty_model", false]], "load_layer_wise_quantized_model() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.load_layer_wise_quantized_model", false]], "load_layer_wise_quantized_model() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.load_layer_wise_quantized_model", false]], "load_module() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.load_module", false]], "load_saved_model() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.load_saved_model", false]], "load_saved_model() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.load_saved_model", false]], "load_tensor() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.load_tensor", false]], "load_tensor() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.load_tensor", false]], "load_tensor_from_shard() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.load_tensor_from_shard", false]], "load_tensor_from_shard() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.load_tensor_from_shard", false]], "load_value() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.load_value", false]], "load_vocab() (in module neural_compressor.data.transforms.tokenization)": [[224, "neural_compressor.data.transforms.tokenization.load_vocab", false]], "load_weight_only() (in module neural_compressor.utils.pytorch)": [[465, "neural_compressor.utils.pytorch.load_weight_only", false]], "loadformat (class in neural_compressor.torch.utils.constants)": [[445, "neural_compressor.torch.utils.constants.LoadFormat", false]], "log() (in module neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.log", false]], "log_process() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.log_process", false]], "log_quantizable_layers_per_transformer() (in module neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.log_quantizable_layers_per_transformer", false]], "logger (class in neural_compressor.common.utils.logger)": [[159, "neural_compressor.common.utils.logger.Logger", false]], "logger (class in neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.Logger", false]], "loss (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.Loss", false]], "lowerbitssampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.LowerBitsSampler", false]], "lstmoperator (class in neural_compressor.adaptor.ox_utils.operators.lstm)": [[17, "neural_compressor.adaptor.ox_utils.operators.lstm.LSTMOperator", false]], "m (neural_compressor.compression.pruner.patterns.mha.patternmha attribute)": [[177, "neural_compressor.compression.pruner.patterns.mha.PatternMHA.M", false]], "m (neural_compressor.compression.pruner.patterns.ninm.pytorchpatternninm attribute)": [[178, "neural_compressor.compression.pruner.patterns.ninm.PytorchPatternNInM.M", false]], "mae (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.MAE", false]], "magnitudecriterion (class in neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.MagnitudeCriterion", false]], "magnitudecriterion (class in neural_compressor.compression.pruner.tf_criteria)": [[191, "neural_compressor.compression.pruner.tf_criteria.MagnitudeCriterion", false]], "make_dquant_node() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.make_dquant_node", false]], "make_matmul_weight_only_node() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.make_matmul_weight_only_node", false]], "make_module() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.make_module", false]], "make_nc_model() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.make_nc_model", false]], "make_onnx_inputs_outputs() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.make_onnx_inputs_outputs", false]], "make_onnx_shape() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.make_onnx_shape", false]], "make_quant_node() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.make_quant_node", false]], "make_sub_graph() (in module neural_compressor.adaptor.ox_utils.smooth_quant)": [[29, "neural_compressor.adaptor.ox_utils.smooth_quant.make_sub_graph", false]], "make_symbol_block() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.make_symbol_block", false]], "map_numpy_to_onnx_dtype() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.map_numpy_to_onnx_dtype", false]], "map_onnx_to_numpy_type() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.map_onnx_to_numpy_type", false]], "map_tensorflow_dtype() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.map_tensorflow_dtype", false]], "masks (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.masks", false]], "masks (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.masks", false]], "masks (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.masks", false]], "match_datatype_pattern() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.match_datatype_pattern", false]], "matmuloperator (class in neural_compressor.adaptor.ox_utils.operators.matmul)": [[18, "neural_compressor.adaptor.ox_utils.operators.matmul.MatMulOperator", false]], "max_sparsity_ratio_per_op (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.max_sparsity_ratio_per_op", false]], "max_sparsity_ratio_per_op (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.max_sparsity_ratio_per_op", false]], "max_sparsity_ratio_per_op (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.max_sparsity_ratio_per_op", false]], "max_sparsity_ratio_per_op (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.max_sparsity_ratio_per_op", false]], "max_sparsity_ratio_per_op (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.max_sparsity_ratio_per_op", false]], "max_sparsity_ratio_per_op (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.max_sparsity_ratio_per_op", false]], "maxpooloperator (class in neural_compressor.adaptor.ox_utils.operators.maxpool)": [[19, "neural_compressor.adaptor.ox_utils.operators.maxpool.MaxPoolOperator", false]], "mergeduplicatedqdqoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq)": [[93, "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq.MergeDuplicatedQDQOptimizer", false]], "mergeduplicatedqdqoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq)": [[358, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq.MergeDuplicatedQDQOptimizer", false]], "metainfochangingmemopoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer)": [[81, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer.MetaInfoChangingMemOpOptimizer", false]], "metainfochangingmemopoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer)": [[352, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer.MetaInfoChangingMemOpOptimizer", false]], "metric (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.Metric", false]], "metric_max_over_ground_truths() (in module neural_compressor.metric.evaluate_squad)": [[231, "neural_compressor.metric.evaluate_squad.metric_max_over_ground_truths", false]], "metric_max_over_ground_truths() (in module neural_compressor.metric.f1)": [[232, "neural_compressor.metric.f1.metric_max_over_ground_truths", false]], "metric_registry() (in module neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.metric_registry", false]], "metrics (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.METRICS", false]], "metrics (neural_compressor.metric.metric.metrics attribute)": [[234, "neural_compressor.metric.metric.METRICS.metrics", false]], "metrics (neural_compressor.metric.metric.mxnetmetrics attribute)": [[234, "neural_compressor.metric.metric.MXNetMetrics.metrics", false]], "metrics (neural_compressor.metric.metric.onnxrtitmetrics attribute)": [[234, "neural_compressor.metric.metric.ONNXRTITMetrics.metrics", false]], "metrics (neural_compressor.metric.metric.onnxrtqlmetrics attribute)": [[234, "neural_compressor.metric.metric.ONNXRTQLMetrics.metrics", false]], "metrics (neural_compressor.metric.metric.pytorchmetrics attribute)": [[234, "neural_compressor.metric.metric.PyTorchMetrics.metrics", false]], "metrics (neural_compressor.metric.metric.tensorflowmetrics attribute)": [[234, "neural_compressor.metric.metric.TensorflowMetrics.metrics", false]], "mha_compressions (neural_compressor.compression.pruner.pruners.mha.pythonmultiheadattentionpruner attribute)": [[184, "neural_compressor.compression.pruner.pruners.mha.PythonMultiheadAttentionPruner.mha_compressions", false]], "mha_scores (neural_compressor.compression.pruner.pruners.mha.pythonmultiheadattentionpruner attribute)": [[184, "neural_compressor.compression.pruner.pruners.mha.PythonMultiheadAttentionPruner.mha_scores", false]], "min_sparsity_ratio_per_op (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.min_sparsity_ratio_per_op", false]], "min_sparsity_ratio_per_op (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.min_sparsity_ratio_per_op", false]], "min_sparsity_ratio_per_op (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.min_sparsity_ratio_per_op", false]], "minmaxcalibrator (class in neural_compressor.adaptor.ox_utils.calibrator)": [[3, "neural_compressor.adaptor.ox_utils.calibrator.MinMaxCalibrator", false]], "miou (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.mIOU", false]], "mixed_precision_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.mixed_precision_entry", false]], "mixedprecisionconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.MixedPrecisionConfig", false]], "mixedprecisionconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.MixedPrecisionConfig", false]], "mnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.MNIST", false]], "mode (class in neural_compressor.common.utils.constants)": [[157, "neural_compressor.common.utils.constants.Mode", false]], "mode (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.MODE", false]], "model (class in neural_compressor.model.model)": [[239, "neural_compressor.model.model.Model", false]], "model (class in neural_compressor.tensorflow.utils.model)": [[389, "neural_compressor.tensorflow.utils.model.Model", false]], "model (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcher.model", false]], "model (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearchertf attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcherTF.model", false]], "model (neural_compressor.compression.pruner.model_slim.pattern_analyzer.jitbasicsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher.model", false]], "model (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.model", false]], "model (neural_compressor.compression.pruner.model_slim.pattern_analyzer.recipesearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.RecipeSearcher.model", false]], "model (neural_compressor.compression.pruner.model_slim.pattern_analyzer.selfmhasearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.SelfMHASearcher.model", false]], "model (neural_compressor.compression.pruner.pruning.basepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasePruning.model", false]], "model (neural_compressor.compression.pruner.pruning.basicpruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasicPruning.model", false]], "model (neural_compressor.compression.pruner.pruning.retrainfreepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.RetrainFreePruning.model", false]], "model_forward() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.model_forward", false]], "model_forward() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.model_forward", false]], "model_forward_per_sample() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.model_forward_per_sample", false]], "model_level (neural_compressor.common.tuning_param.paramlevel attribute)": [[156, "neural_compressor.common.tuning_param.ParamLevel.MODEL_LEVEL", false]], "model_slim() (in module neural_compressor.compression.pruner.model_slim.auto_slim)": [[171, "neural_compressor.compression.pruner.model_slim.auto_slim.model_slim", false]], "model_slim_ffn2() (in module neural_compressor.compression.pruner.model_slim.auto_slim)": [[171, "neural_compressor.compression.pruner.model_slim.auto_slim.model_slim_ffn2", false]], "model_slim_mha() (in module neural_compressor.compression.pruner.model_slim.auto_slim)": [[171, "neural_compressor.compression.pruner.model_slim.auto_slim.model_slim_mha", false]], "modelsize (class in neural_compressor.objective)": [[245, "neural_compressor.objective.ModelSize", false]], "modelwisetuningsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.ModelWiseTuningSampler", false]], "module": [[0, "module-neural_compressor.adaptor.mxnet_utils", false], [1, "module-neural_compressor.adaptor.mxnet_utils.util", false], [2, "module-neural_compressor.adaptor.ox_utils.calibration", false], [3, "module-neural_compressor.adaptor.ox_utils.calibrator", false], [4, "module-neural_compressor.adaptor.ox_utils", false], [5, "module-neural_compressor.adaptor.ox_utils.operators.activation", false], [6, "module-neural_compressor.adaptor.ox_utils.operators.argmax", false], [7, "module-neural_compressor.adaptor.ox_utils.operators.attention", false], [8, "module-neural_compressor.adaptor.ox_utils.operators.binary_op", false], [9, "module-neural_compressor.adaptor.ox_utils.operators.concat", false], [10, "module-neural_compressor.adaptor.ox_utils.operators.conv", false], [11, "module-neural_compressor.adaptor.ox_utils.operators.direct_q8", false], [12, "module-neural_compressor.adaptor.ox_utils.operators.embed_layernorm", false], [13, "module-neural_compressor.adaptor.ox_utils.operators.gather", false], [14, "module-neural_compressor.adaptor.ox_utils.operators.gavgpool", false], [15, "module-neural_compressor.adaptor.ox_utils.operators.gemm", false], [16, "module-neural_compressor.adaptor.ox_utils.operators", false], [17, "module-neural_compressor.adaptor.ox_utils.operators.lstm", false], [18, "module-neural_compressor.adaptor.ox_utils.operators.matmul", false], [19, "module-neural_compressor.adaptor.ox_utils.operators.maxpool", false], [20, "module-neural_compressor.adaptor.ox_utils.operators.norm", false], [21, "module-neural_compressor.adaptor.ox_utils.operators.ops", false], [22, "module-neural_compressor.adaptor.ox_utils.operators.pad", false], [23, "module-neural_compressor.adaptor.ox_utils.operators.pooling", false], [24, "module-neural_compressor.adaptor.ox_utils.operators.reduce", false], [25, "module-neural_compressor.adaptor.ox_utils.operators.resize", false], [26, "module-neural_compressor.adaptor.ox_utils.operators.split", false], [27, "module-neural_compressor.adaptor.ox_utils.operators.unary_op", false], [28, "module-neural_compressor.adaptor.ox_utils.quantizer", false], [29, "module-neural_compressor.adaptor.ox_utils.smooth_quant", false], [30, "module-neural_compressor.adaptor.ox_utils.util", false], [31, "module-neural_compressor.adaptor.ox_utils.weight_only", false], [32, "module-neural_compressor.adaptor.tensorflow", false], [33, "module-neural_compressor.adaptor.tf_utils.graph_converter", false], [34, "module-neural_compressor.adaptor.tf_utils.graph_converter_without_calib", false], [35, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert", false], [36, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer", false], [37, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16", false], [38, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd", false], [39, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout", false], [40, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu", false], [41, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random", false], [42, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const", false], [43, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction", false], [44, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd", false], [45, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer", false], [46, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape", false], [47, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm", false], [48, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant", false], [49, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add", false], [50, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul", false], [51, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math", false], [52, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn", false], [53, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in", false], [54, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu", false], [55, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm", false], [56, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv", false], [57, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv", false], [58, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose", false], [59, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer", false], [60, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass", false], [61, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic", false], [62, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node", false], [63, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu", false], [64, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize", false], [65, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes", false], [66, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm", false], [67, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input", false], [68, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes", false], [69, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes", false], [70, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer", false], [71, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base", false], [72, "module-neural_compressor.adaptor.tf_utils.graph_rewriter", false], [73, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant", false], [74, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value", false], [75, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib", false], [76, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize", false], [77, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize", false], [78, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize", false], [79, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize", false], [80, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8", false], [81, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer", false], [82, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter", false], [83, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse", false], [84, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert", false], [85, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation", false], [86, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx", false], [87, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph", false], [88, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node", false], [89, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema", false], [90, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils", false], [91, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq", false], [92, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern", false], [93, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq", false], [94, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern", false], [95, "module-neural_compressor.adaptor.tf_utils.graph_util", false], [96, "module-neural_compressor.adaptor.tf_utils", false], [97, "module-neural_compressor.adaptor.tf_utils.quantize_graph", false], [98, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize", false], [99, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat", false], [100, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config", false], [101, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper", false], [102, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers", false], [103, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer", false], [104, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add", false], [105, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base", false], [106, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn", false], [107, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper", false], [108, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn", false], [109, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2", false], [110, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv", false], [111, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv", false], [112, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in", false], [113, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul", false], [114, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling", false], [115, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq", false], [116, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq", false], [117, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base", false], [118, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn", false], [119, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2", false], [120, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv", false], [121, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu", false], [122, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul", false], [123, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling", false], [124, "module-neural_compressor.adaptor.tf_utils.quantize_graph_common", false], [125, "module-neural_compressor.adaptor.tf_utils.smooth_quant_calibration", false], [126, "module-neural_compressor.adaptor.tf_utils.smooth_quant_scaler", false], [127, "module-neural_compressor.adaptor.tf_utils.tf2onnx_converter", false], [128, "module-neural_compressor.adaptor.tf_utils.transform_graph.bias_correction", false], [129, "module-neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base", false], [130, "module-neural_compressor.adaptor.tf_utils.transform_graph", false], [131, "module-neural_compressor.adaptor.tf_utils.transform_graph.insert_logging", false], [132, "module-neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat", false], [133, "module-neural_compressor.adaptor.tf_utils.util", false], [134, "module-neural_compressor.adaptor.torch_utils.bf16_convert", false], [135, "module-neural_compressor.adaptor.torch_utils.hawq_metric", false], [136, "module-neural_compressor.adaptor.torch_utils", false], [137, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant", false], [138, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle", false], [139, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize", false], [140, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load", false], [141, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.utils", false], [142, "module-neural_compressor.adaptor.torch_utils.model_wrapper", false], [143, "module-neural_compressor.adaptor.torch_utils.pattern_detector", false], [144, "module-neural_compressor.adaptor.torch_utils.symbolic_trace", false], [145, "module-neural_compressor.adaptor.torch_utils.util", false], [146, "module-neural_compressor.algorithm.algorithm", false], [147, "module-neural_compressor.algorithm.fast_bias_correction", false], [148, "module-neural_compressor.algorithm", false], [149, "module-neural_compressor.algorithm.smooth_quant", false], [150, "module-neural_compressor.algorithm.weight_correction", false], [151, "module-neural_compressor.benchmark", false], [152, "module-neural_compressor.common.base_config", false], [153, "module-neural_compressor.common.base_tuning", false], [154, "module-neural_compressor.common.benchmark", false], [155, "module-neural_compressor.common", false], [156, "module-neural_compressor.common.tuning_param", false], [157, "module-neural_compressor.common.utils.constants", false], [158, "module-neural_compressor.common.utils", false], [159, "module-neural_compressor.common.utils.logger", false], [160, "module-neural_compressor.common.utils.save_load", false], [161, "module-neural_compressor.common.utils.utility", false], [162, "module-neural_compressor.compression.callbacks", false], [163, "module-neural_compressor.compression.distillation.criterions", false], [164, "module-neural_compressor.compression.distillation", false], [165, "module-neural_compressor.compression.distillation.optimizers", false], [166, "module-neural_compressor.compression.distillation.utility", false], [167, "module-neural_compressor.compression.hpo", false], [168, "module-neural_compressor.compression.hpo.sa_optimizer", false], [169, "module-neural_compressor.compression.pruner.criteria", false], [170, "module-neural_compressor.compression.pruner", false], [171, "module-neural_compressor.compression.pruner.model_slim.auto_slim", false], [172, "module-neural_compressor.compression.pruner.model_slim", false], [173, "module-neural_compressor.compression.pruner.model_slim.pattern_analyzer", false], [174, "module-neural_compressor.compression.pruner.model_slim.weight_slim", false], [175, "module-neural_compressor.compression.pruner.patterns.base", false], [176, "module-neural_compressor.compression.pruner.patterns", false], [177, "module-neural_compressor.compression.pruner.patterns.mha", false], [178, "module-neural_compressor.compression.pruner.patterns.ninm", false], [179, "module-neural_compressor.compression.pruner.patterns.nxm", false], [180, "module-neural_compressor.compression.pruner.pruners.base", false], [181, "module-neural_compressor.compression.pruner.pruners.basic", false], [182, "module-neural_compressor.compression.pruner.pruners.block_mask", false], [183, "module-neural_compressor.compression.pruner.pruners", false], [184, "module-neural_compressor.compression.pruner.pruners.mha", false], [185, "module-neural_compressor.compression.pruner.pruners.pattern_lock", false], [186, "module-neural_compressor.compression.pruner.pruners.progressive", false], [187, "module-neural_compressor.compression.pruner.pruners.retrain_free", false], [188, "module-neural_compressor.compression.pruner.pruning", false], [189, "module-neural_compressor.compression.pruner.regs", false], [190, "module-neural_compressor.compression.pruner.schedulers", false], [191, "module-neural_compressor.compression.pruner.tf_criteria", false], [192, "module-neural_compressor.compression.pruner.utils", false], [193, "module-neural_compressor.compression.pruner.wanda", false], [194, "module-neural_compressor.compression.pruner.wanda.utils", false], [195, "module-neural_compressor.config", false], [196, "module-neural_compressor.contrib", false], [197, "module-neural_compressor.contrib.strategy", false], [198, "module-neural_compressor.contrib.strategy.sigopt", false], [199, "module-neural_compressor.contrib.strategy.tpe", false], [200, "module-neural_compressor.data.dataloaders.base_dataloader", false], [201, "module-neural_compressor.data.dataloaders.dataloader", false], [202, "module-neural_compressor.data.dataloaders.default_dataloader", false], [203, "module-neural_compressor.data.dataloaders.fetcher", false], [204, "module-neural_compressor.data.dataloaders.mxnet_dataloader", false], [205, "module-neural_compressor.data.dataloaders.onnxrt_dataloader", false], [206, "module-neural_compressor.data.dataloaders.pytorch_dataloader", false], [207, "module-neural_compressor.data.dataloaders.sampler", false], [208, "module-neural_compressor.data.dataloaders.tensorflow_dataloader", false], [209, "module-neural_compressor.data.datasets.bert_dataset", false], [210, "module-neural_compressor.data.datasets.coco_dataset", false], [211, "module-neural_compressor.data.datasets.dataset", false], [212, "module-neural_compressor.data.datasets.dummy_dataset", false], [213, "module-neural_compressor.data.datasets.dummy_dataset_v2", false], [214, "module-neural_compressor.data.datasets.imagenet_dataset", false], [215, "module-neural_compressor.data.datasets", false], [216, "module-neural_compressor.data.datasets.style_transfer_dataset", false], [217, "module-neural_compressor.data.filters.coco_filter", false], [218, "module-neural_compressor.data.filters.filter", false], [219, "module-neural_compressor.data.filters", false], [220, "module-neural_compressor.data", false], [221, "module-neural_compressor.data.transforms.imagenet_transform", false], [222, "module-neural_compressor.data.transforms", false], [223, "module-neural_compressor.data.transforms.postprocess", false], [224, "module-neural_compressor.data.transforms.tokenization", false], [225, "module-neural_compressor.data.transforms.transform", false], [226, "module-neural_compressor", false], [227, "module-neural_compressor.metric.bleu", false], [228, "module-neural_compressor.metric.bleu_util", false], [229, "module-neural_compressor.metric.coco_label_map", false], [230, "module-neural_compressor.metric.coco_tools", false], [231, "module-neural_compressor.metric.evaluate_squad", false], [232, "module-neural_compressor.metric.f1", false], [233, "module-neural_compressor.metric", false], [234, "module-neural_compressor.metric.metric", false], [235, "module-neural_compressor.mix_precision", false], [236, "module-neural_compressor.model.base_model", false], [237, "module-neural_compressor.model", false], [238, "module-neural_compressor.model.keras_model", false], [239, "module-neural_compressor.model.model", false], [240, "module-neural_compressor.model.mxnet_model", false], [241, "module-neural_compressor.model.nets_factory", false], [242, "module-neural_compressor.model.onnx_model", false], [243, "module-neural_compressor.model.tensorflow_model", false], [244, "module-neural_compressor.model.torch_model", false], [245, "module-neural_compressor.objective", false], [246, "module-neural_compressor.profiling", false], [247, "module-neural_compressor.profiling.parser.factory", false], [248, "module-neural_compressor.profiling.parser.onnx_parser.factory", false], [249, "module-neural_compressor.profiling.parser.onnx_parser.parser", false], [250, "module-neural_compressor.profiling.parser.parser", false], [251, "module-neural_compressor.profiling.parser.result", false], [252, "module-neural_compressor.profiling.parser.tensorflow_parser.factory", false], [253, "module-neural_compressor.profiling.parser.tensorflow_parser.parser", false], [254, "module-neural_compressor.profiling.profiler.factory", false], [255, "module-neural_compressor.profiling.profiler.onnxrt_profiler.factory", false], [256, "module-neural_compressor.profiling.profiler.onnxrt_profiler.profiler", false], [257, "module-neural_compressor.profiling.profiler.onnxrt_profiler.utils", false], [258, "module-neural_compressor.profiling.profiler.profiler", false], [259, "module-neural_compressor.profiling.profiler.tensorflow_profiler.factory", false], [260, "module-neural_compressor.profiling.profiler.tensorflow_profiler.profiler", false], [261, "module-neural_compressor.profiling.profiler.tensorflow_profiler.utils", false], [262, "module-neural_compressor.quantization", false], [263, "module-neural_compressor.strategy.auto", false], [264, "module-neural_compressor.strategy.auto_mixed_precision", false], [265, "module-neural_compressor.strategy.basic", false], [266, "module-neural_compressor.strategy.bayesian", false], [267, "module-neural_compressor.strategy.conservative", false], [268, "module-neural_compressor.strategy.exhaustive", false], [269, "module-neural_compressor.strategy.hawq_v2", false], [270, "module-neural_compressor.strategy", false], [271, "module-neural_compressor.strategy.mse", false], [272, "module-neural_compressor.strategy.mse_v2", false], [273, "module-neural_compressor.strategy.random", false], [274, "module-neural_compressor.strategy.strategy", false], [275, "module-neural_compressor.strategy.utils.constant", false], [276, "module-neural_compressor.strategy.utils", false], [277, "module-neural_compressor.strategy.utils.tuning_sampler", false], [278, "module-neural_compressor.strategy.utils.tuning_space", false], [279, "module-neural_compressor.strategy.utils.tuning_structs", false], [280, "module-neural_compressor.strategy.utils.utility", false], [281, "module-neural_compressor.template.api_doc_example", false], [282, "module-neural_compressor.tensorflow.algorithms", false], [283, "module-neural_compressor.tensorflow.algorithms.smoother.calibration", false], [284, "module-neural_compressor.tensorflow.algorithms.smoother.core", false], [285, "module-neural_compressor.tensorflow.algorithms.smoother", false], [286, "module-neural_compressor.tensorflow.algorithms.smoother.scaler", false], [287, "module-neural_compressor.tensorflow.algorithms.static_quant", false], [288, "module-neural_compressor.tensorflow.algorithms.static_quant.keras", false], [289, "module-neural_compressor.tensorflow.algorithms.static_quant.tensorflow", false], [290, "module-neural_compressor.tensorflow", false], [291, "module-neural_compressor.tensorflow.keras", false], [292, "module-neural_compressor.tensorflow.keras.layers.conv2d", false], [293, "module-neural_compressor.tensorflow.keras.layers.dense", false], [294, "module-neural_compressor.tensorflow.keras.layers.depthwise_conv2d", false], [295, "module-neural_compressor.tensorflow.keras.layers", false], [296, "module-neural_compressor.tensorflow.keras.layers.layer_initializer", false], [297, "module-neural_compressor.tensorflow.keras.layers.pool2d", false], [298, "module-neural_compressor.tensorflow.keras.layers.separable_conv2d", false], [299, "module-neural_compressor.tensorflow.keras.quantization.config", false], [300, "module-neural_compressor.tensorflow.keras.quantization", false], [301, "module-neural_compressor.tensorflow.quantization.algorithm_entry", false], [302, "module-neural_compressor.tensorflow.quantization.autotune", false], [303, "module-neural_compressor.tensorflow.quantization.config", false], [304, "module-neural_compressor.tensorflow.quantization", false], [305, "module-neural_compressor.tensorflow.quantization.quantize", false], [306, "module-neural_compressor.tensorflow.quantization.utils.graph_converter", false], [307, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert", false], [308, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer", false], [309, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16", false], [310, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd", false], [311, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout", false], [312, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu", false], [313, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random", false], [314, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const", false], [315, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction", false], [316, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd", false], [317, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer", false], [318, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape", false], [319, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm", false], [320, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant", false], [321, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add", false], [322, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul", false], [323, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math", false], [324, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn", false], [325, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in", false], [326, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu", false], [327, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm", false], [328, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv", false], [329, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv", false], [330, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose", false], [331, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer", false], [332, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass", false], [333, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic", false], [334, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node", false], [335, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu", false], [336, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize", false], [337, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes", false], [338, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm", false], [339, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input", false], [340, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes", false], [341, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes", false], [342, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer", false], [343, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base", false], [344, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter", false], [345, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant", false], [346, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value", false], [347, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize", false], [348, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize", false], [349, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize", false], [350, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize", false], [351, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8", false], [352, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer", false], [353, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter", false], [354, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse", false], [355, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation", false], [356, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq", false], [357, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern", false], [358, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq", false], [359, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern", false], [360, "module-neural_compressor.tensorflow.quantization.utils.graph_util", false], [361, "module-neural_compressor.tensorflow.quantization.utils", false], [362, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph", false], [363, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn", false], [364, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2", false], [365, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv", false], [366, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv", false], [367, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in", false], [368, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul", false], [369, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling", false], [370, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq", false], [371, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq", false], [372, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base", false], [373, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn", false], [374, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2", false], [375, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv", false], [376, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu", false], [377, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul", false], [378, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling", false], [379, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph_common", false], [380, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction", false], [381, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base", false], [382, "module-neural_compressor.tensorflow.quantization.utils.transform_graph", false], [383, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging", false], [384, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat", false], [385, "module-neural_compressor.tensorflow.quantization.utils.utility", false], [386, "module-neural_compressor.tensorflow.utils.constants", false], [387, "module-neural_compressor.tensorflow.utils.data", false], [388, "module-neural_compressor.tensorflow.utils", false], [389, "module-neural_compressor.tensorflow.utils.model", false], [390, "module-neural_compressor.tensorflow.utils.model_wrappers", false], [391, "module-neural_compressor.tensorflow.utils.utility", false], [392, "module-neural_compressor.torch.algorithms.base_algorithm", false], [393, "module-neural_compressor.torch.algorithms.fp8_quant.utils.logger", false], [394, "module-neural_compressor.torch.algorithms", false], [395, "module-neural_compressor.torch.algorithms.layer_wise", false], [396, "module-neural_compressor.torch.algorithms.layer_wise.load", false], [397, "module-neural_compressor.torch.algorithms.layer_wise.modified_pickle", false], [398, "module-neural_compressor.torch.algorithms.layer_wise.utils", false], [399, "module-neural_compressor.torch.algorithms.mixed_precision.half_precision_convert", false], [400, "module-neural_compressor.torch.algorithms.mixed_precision", false], [401, "module-neural_compressor.torch.algorithms.mixed_precision.module_wrappers", false], [402, "module-neural_compressor.torch.algorithms.mx_quant", false], [403, "module-neural_compressor.torch.algorithms.mx_quant.mx", false], [404, "module-neural_compressor.torch.algorithms.mx_quant.utils", false], [405, "module-neural_compressor.torch.algorithms.pt2e_quant.core", false], [406, "module-neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter", false], [407, "module-neural_compressor.torch.algorithms.pt2e_quant", false], [408, "module-neural_compressor.torch.algorithms.pt2e_quant.save_load", false], [409, "module-neural_compressor.torch.algorithms.pt2e_quant.utility", false], [410, "module-neural_compressor.torch.algorithms.smooth_quant", false], [411, "module-neural_compressor.torch.algorithms.smooth_quant.save_load", false], [412, "module-neural_compressor.torch.algorithms.smooth_quant.smooth_quant", false], [413, "module-neural_compressor.torch.algorithms.smooth_quant.utility", false], [414, "module-neural_compressor.torch.algorithms.static_quant", false], [415, "module-neural_compressor.torch.algorithms.static_quant.save_load", false], [416, "module-neural_compressor.torch.algorithms.static_quant.static_quant", false], [417, "module-neural_compressor.torch.algorithms.static_quant.utility", false], [418, "module-neural_compressor.torch.algorithms.weight_only.autoround", false], [419, "module-neural_compressor.torch.algorithms.weight_only.awq", false], [420, "module-neural_compressor.torch.algorithms.weight_only.gptq", false], [421, "module-neural_compressor.torch.algorithms.weight_only.hqq.bitpack", false], [422, "module-neural_compressor.torch.algorithms.weight_only.hqq.config", false], [423, "module-neural_compressor.torch.algorithms.weight_only.hqq.core", false], [424, "module-neural_compressor.torch.algorithms.weight_only.hqq", false], [425, "module-neural_compressor.torch.algorithms.weight_only.hqq.optimizer", false], [426, "module-neural_compressor.torch.algorithms.weight_only.hqq.qtensor", false], [427, "module-neural_compressor.torch.algorithms.weight_only.hqq.quantizer", false], [428, "module-neural_compressor.torch.algorithms.weight_only", false], [429, "module-neural_compressor.torch.algorithms.weight_only.modules", false], [430, "module-neural_compressor.torch.algorithms.weight_only.rtn", false], [431, "module-neural_compressor.torch.algorithms.weight_only.save_load", false], [432, "module-neural_compressor.torch.algorithms.weight_only.teq", false], [433, "module-neural_compressor.torch.algorithms.weight_only.utility", false], [434, "module-neural_compressor.torch.export", false], [435, "module-neural_compressor.torch.export.pt2e_export", false], [436, "module-neural_compressor.torch", false], [437, "module-neural_compressor.torch.quantization.algorithm_entry", false], [438, "module-neural_compressor.torch.quantization.autotune", false], [439, "module-neural_compressor.torch.quantization.config", false], [440, "module-neural_compressor.torch.quantization", false], [441, "module-neural_compressor.torch.quantization.load_entry", false], [442, "module-neural_compressor.torch.quantization.quantize", false], [443, "module-neural_compressor.torch.utils.auto_accelerator", false], [444, "module-neural_compressor.torch.utils.bit_packer", false], [445, "module-neural_compressor.torch.utils.constants", false], [446, "module-neural_compressor.torch.utils.environ", false], [447, "module-neural_compressor.torch.utils", false], [448, "module-neural_compressor.torch.utils.utility", false], [449, "module-neural_compressor.training", false], [450, "module-neural_compressor.transformers.quantization.utils", false], [451, "module-neural_compressor.transformers.utils", false], [452, "module-neural_compressor.transformers.utils.quantization_config", false], [453, "module-neural_compressor.utils.collect_layer_histogram", false], [454, "module-neural_compressor.utils.constant", false], [455, "module-neural_compressor.utils.create_obj_from_config", false], [456, "module-neural_compressor.utils.export", false], [457, "module-neural_compressor.utils.export.qlinear2qdq", false], [458, "module-neural_compressor.utils.export.tf2onnx", false], [459, "module-neural_compressor.utils.export.torch2onnx", false], [460, "module-neural_compressor.utils", false], [461, "module-neural_compressor.utils.kl_divergence", false], [462, "module-neural_compressor.utils.load_huggingface", false], [463, "module-neural_compressor.utils.logger", false], [464, "module-neural_compressor.utils.options", false], [465, "module-neural_compressor.utils.pytorch", false], [466, "module-neural_compressor.utils.utility", false], [467, "module-neural_compressor.utils.weights_details", false], [468, "module-neural_compressor.version", false]], "module_debug_level1 (in module neural_compressor.template.api_doc_example)": [[281, "neural_compressor.template.api_doc_example.module_debug_level1", false]], "modules (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.modules", false]], "modules (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.modules", false]], "modules (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.modules", false]], "modules (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.modules", false]], "modules (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.modules", false]], "modules (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.modules", false]], "move_input_device() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.move_input_device", false]], "move_input_to_device() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.move_input_to_device", false]], "move_input_to_device() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.move_input_to_device", false]], "movesqueezeafterreluoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu)": [[63, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu.MoveSqueezeAfterReluOptimizer", false]], "movesqueezeafterreluoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu)": [[335, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu.MoveSqueezeAfterReluOptimizer", false]], "mse (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.MSE", false]], "mse (neural_compressor.metric.metric.rmse attribute)": [[234, "neural_compressor.metric.metric.RMSE.mse", false]], "mse_metric_gap() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.mse_metric_gap", false]], "mse_v2tunestrategy (class in neural_compressor.strategy.mse_v2)": [[272, "neural_compressor.strategy.mse_v2.MSE_V2TuneStrategy", false]], "msetunestrategy (class in neural_compressor.strategy.mse)": [[271, "neural_compressor.strategy.mse.MSETuneStrategy", false]], "mullinear (class in neural_compressor.adaptor.torch_utils.model_wrapper)": [[142, "neural_compressor.adaptor.torch_utils.model_wrapper.MulLinear", false]], "mullinear (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.MulLinear", false]], "multiobjective (class in neural_compressor.objective)": [[245, "neural_compressor.objective.MultiObjective", false]], "mx_quant_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.mx_quant_entry", false]], "mxlinear (class in neural_compressor.torch.algorithms.mx_quant.mx)": [[403, "neural_compressor.torch.algorithms.mx_quant.mx.MXLinear", false]], "mxnet (class in neural_compressor.config)": [[195, "neural_compressor.config.MXNet", false]], "mxnetcifar10 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.MXNetCIFAR10", false]], "mxnetcifar100 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.MXNetCIFAR100", false]], "mxnetcropresizetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.MXNetCropResizeTransform", false]], "mxnetcroptoboundingbox (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.MXNetCropToBoundingBox", false]], "mxnetdataloader (class in neural_compressor.data.dataloaders.mxnet_dataloader)": [[204, "neural_compressor.data.dataloaders.mxnet_dataloader.MXNetDataLoader", false]], "mxnetdatasets (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.MXNetDatasets", false]], "mxnetfashionmnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.MXNetFashionMNIST", false]], "mxnetfilters (class in neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.MXNetFilters", false]], "mxnetimagefolder (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.MXNetImageFolder", false]], "mxnetimagenetraw (class in neural_compressor.data.datasets.imagenet_dataset)": [[214, "neural_compressor.data.datasets.imagenet_dataset.MXNetImagenetRaw", false]], "mxnetmetrics (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.MXNetMetrics", false]], "mxnetmnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.MXNetMNIST", false]], "mxnetmodel (class in neural_compressor.model.mxnet_model)": [[240, "neural_compressor.model.mxnet_model.MXNetModel", false]], "mxnetnormalizetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.MXNetNormalizeTransform", false]], "mxnettransforms (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.MXNetTransforms", false]], "mxnettranspose (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.MXNetTranspose", false]], "mxquantconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.MXQuantConfig", false]], "mxquantizer (class in neural_compressor.torch.algorithms.mx_quant.mx)": [[403, "neural_compressor.torch.algorithms.mx_quant.mx.MXQuantizer", false]], "n (neural_compressor.compression.pruner.patterns.mha.patternmha attribute)": [[177, "neural_compressor.compression.pruner.patterns.mha.PatternMHA.N", false]], "n (neural_compressor.compression.pruner.patterns.ninm.pytorchpatternninm attribute)": [[178, "neural_compressor.compression.pruner.patterns.ninm.PytorchPatternNInM.N", false]], "name (neural_compressor.common.base_config.baseconfig attribute)": [[152, "neural_compressor.common.base_config.BaseConfig.name", false]], "namecollector (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.NameCollector", false]], "nasconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.NASConfig", false]], "nbits (neural_compressor.torch.algorithms.weight_only.hqq.qtensor.qtensormetainfo attribute)": [[426, "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensorMetaInfo.nbits", false]], "ndarray_to_device() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.ndarray_to_device", false]], "need_apply() (in module neural_compressor.tensorflow.quantization.quantize)": [[305, "neural_compressor.tensorflow.quantization.quantize.need_apply", false]], "need_apply() (in module neural_compressor.torch.quantization.quantize)": [[442, "neural_compressor.torch.quantization.quantize.need_apply", false]], "neural_compressor": [[226, "module-neural_compressor", false]], "neural_compressor.adaptor.mxnet_utils": [[0, "module-neural_compressor.adaptor.mxnet_utils", false]], "neural_compressor.adaptor.mxnet_utils.util": [[1, "module-neural_compressor.adaptor.mxnet_utils.util", false]], "neural_compressor.adaptor.ox_utils": [[4, "module-neural_compressor.adaptor.ox_utils", false]], "neural_compressor.adaptor.ox_utils.calibration": [[2, "module-neural_compressor.adaptor.ox_utils.calibration", false]], "neural_compressor.adaptor.ox_utils.calibrator": [[3, "module-neural_compressor.adaptor.ox_utils.calibrator", false]], "neural_compressor.adaptor.ox_utils.operators": [[16, "module-neural_compressor.adaptor.ox_utils.operators", false]], "neural_compressor.adaptor.ox_utils.operators.activation": [[5, "module-neural_compressor.adaptor.ox_utils.operators.activation", false]], "neural_compressor.adaptor.ox_utils.operators.argmax": [[6, "module-neural_compressor.adaptor.ox_utils.operators.argmax", false]], "neural_compressor.adaptor.ox_utils.operators.attention": [[7, "module-neural_compressor.adaptor.ox_utils.operators.attention", false]], "neural_compressor.adaptor.ox_utils.operators.binary_op": [[8, "module-neural_compressor.adaptor.ox_utils.operators.binary_op", false]], "neural_compressor.adaptor.ox_utils.operators.concat": [[9, "module-neural_compressor.adaptor.ox_utils.operators.concat", false]], "neural_compressor.adaptor.ox_utils.operators.conv": [[10, "module-neural_compressor.adaptor.ox_utils.operators.conv", false]], "neural_compressor.adaptor.ox_utils.operators.direct_q8": [[11, "module-neural_compressor.adaptor.ox_utils.operators.direct_q8", false]], "neural_compressor.adaptor.ox_utils.operators.embed_layernorm": [[12, "module-neural_compressor.adaptor.ox_utils.operators.embed_layernorm", false]], "neural_compressor.adaptor.ox_utils.operators.gather": [[13, "module-neural_compressor.adaptor.ox_utils.operators.gather", false]], "neural_compressor.adaptor.ox_utils.operators.gavgpool": [[14, "module-neural_compressor.adaptor.ox_utils.operators.gavgpool", false]], "neural_compressor.adaptor.ox_utils.operators.gemm": [[15, "module-neural_compressor.adaptor.ox_utils.operators.gemm", false]], "neural_compressor.adaptor.ox_utils.operators.lstm": [[17, "module-neural_compressor.adaptor.ox_utils.operators.lstm", false]], "neural_compressor.adaptor.ox_utils.operators.matmul": [[18, "module-neural_compressor.adaptor.ox_utils.operators.matmul", false]], "neural_compressor.adaptor.ox_utils.operators.maxpool": [[19, "module-neural_compressor.adaptor.ox_utils.operators.maxpool", false]], "neural_compressor.adaptor.ox_utils.operators.norm": [[20, "module-neural_compressor.adaptor.ox_utils.operators.norm", false]], "neural_compressor.adaptor.ox_utils.operators.ops": [[21, "module-neural_compressor.adaptor.ox_utils.operators.ops", false]], "neural_compressor.adaptor.ox_utils.operators.pad": [[22, "module-neural_compressor.adaptor.ox_utils.operators.pad", false]], "neural_compressor.adaptor.ox_utils.operators.pooling": [[23, "module-neural_compressor.adaptor.ox_utils.operators.pooling", false]], "neural_compressor.adaptor.ox_utils.operators.reduce": [[24, "module-neural_compressor.adaptor.ox_utils.operators.reduce", false]], "neural_compressor.adaptor.ox_utils.operators.resize": [[25, "module-neural_compressor.adaptor.ox_utils.operators.resize", false]], "neural_compressor.adaptor.ox_utils.operators.split": [[26, "module-neural_compressor.adaptor.ox_utils.operators.split", false]], "neural_compressor.adaptor.ox_utils.operators.unary_op": [[27, "module-neural_compressor.adaptor.ox_utils.operators.unary_op", false]], "neural_compressor.adaptor.ox_utils.quantizer": [[28, "module-neural_compressor.adaptor.ox_utils.quantizer", false]], "neural_compressor.adaptor.ox_utils.smooth_quant": [[29, "module-neural_compressor.adaptor.ox_utils.smooth_quant", false]], "neural_compressor.adaptor.ox_utils.util": [[30, "module-neural_compressor.adaptor.ox_utils.util", false]], "neural_compressor.adaptor.ox_utils.weight_only": [[31, "module-neural_compressor.adaptor.ox_utils.weight_only", false]], "neural_compressor.adaptor.tensorflow": [[32, "module-neural_compressor.adaptor.tensorflow", false]], "neural_compressor.adaptor.tf_utils": [[96, "module-neural_compressor.adaptor.tf_utils", false]], "neural_compressor.adaptor.tf_utils.graph_converter": [[33, "module-neural_compressor.adaptor.tf_utils.graph_converter", false]], "neural_compressor.adaptor.tf_utils.graph_converter_without_calib": [[34, "module-neural_compressor.adaptor.tf_utils.graph_converter_without_calib", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter": [[72, "module-neural_compressor.adaptor.tf_utils.graph_rewriter", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16": [[37, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert": [[35, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer": [[36, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic": [[61, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd": [[38, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout": [[39, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu": [[40, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random": [[41, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const": [[42, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction": [[43, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd": [[44, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer": [[45, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape": [[46, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm": [[47, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant": [[48, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add": [[49, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul": [[50, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math": [[51, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn": [[52, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in": [[53, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu": [[54, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm": [[55, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv": [[56, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv": [[57, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose": [[58, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer": [[59, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass": [[60, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node": [[62, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu": [[63, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize": [[64, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes": [[65, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm": [[66, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input": [[67, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes": [[68, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes": [[69, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer": [[70, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base": [[71, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8": [[80, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant": [[73, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value": [[74, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib": [[75, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize": [[76, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize": [[77, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize": [[78, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize": [[79, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer": [[81, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter": [[82, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse": [[83, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert": [[84, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation": [[85, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx": [[86, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph": [[87, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node": [[88, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema": [[89, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils": [[90, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq": [[91, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern": [[92, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq": [[93, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern": [[94, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern", false]], "neural_compressor.adaptor.tf_utils.graph_util": [[95, "module-neural_compressor.adaptor.tf_utils.graph_util", false]], "neural_compressor.adaptor.tf_utils.quantize_graph": [[97, "module-neural_compressor.adaptor.tf_utils.quantize_graph", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat": [[99, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize": [[98, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config": [[100, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper": [[101, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers": [[102, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer": [[103, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add": [[104, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base": [[105, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn": [[106, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper": [[107, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq": [[115, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn": [[108, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2": [[109, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv": [[110, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv": [[111, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in": [[112, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul": [[113, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling": [[114, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq": [[116, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base": [[117, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn": [[118, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2": [[119, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv": [[120, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu": [[121, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul": [[122, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling": [[123, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling", false]], "neural_compressor.adaptor.tf_utils.quantize_graph_common": [[124, "module-neural_compressor.adaptor.tf_utils.quantize_graph_common", false]], "neural_compressor.adaptor.tf_utils.smooth_quant_calibration": [[125, "module-neural_compressor.adaptor.tf_utils.smooth_quant_calibration", false]], "neural_compressor.adaptor.tf_utils.smooth_quant_scaler": [[126, "module-neural_compressor.adaptor.tf_utils.smooth_quant_scaler", false]], "neural_compressor.adaptor.tf_utils.tf2onnx_converter": [[127, "module-neural_compressor.adaptor.tf_utils.tf2onnx_converter", false]], "neural_compressor.adaptor.tf_utils.transform_graph": [[130, "module-neural_compressor.adaptor.tf_utils.transform_graph", false]], "neural_compressor.adaptor.tf_utils.transform_graph.bias_correction": [[128, "module-neural_compressor.adaptor.tf_utils.transform_graph.bias_correction", false]], "neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base": [[129, "module-neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base", false]], "neural_compressor.adaptor.tf_utils.transform_graph.insert_logging": [[131, "module-neural_compressor.adaptor.tf_utils.transform_graph.insert_logging", false]], "neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat": [[132, "module-neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat", false]], "neural_compressor.adaptor.tf_utils.util": [[133, "module-neural_compressor.adaptor.tf_utils.util", false]], "neural_compressor.adaptor.torch_utils": [[136, "module-neural_compressor.adaptor.torch_utils", false]], "neural_compressor.adaptor.torch_utils.bf16_convert": [[134, "module-neural_compressor.adaptor.torch_utils.bf16_convert", false]], "neural_compressor.adaptor.torch_utils.hawq_metric": [[135, "module-neural_compressor.adaptor.torch_utils.hawq_metric", false]], "neural_compressor.adaptor.torch_utils.layer_wise_quant": [[137, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant", false]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle": [[138, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle", false]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize": [[139, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize", false]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load": [[140, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load", false]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils": [[141, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.utils", false]], "neural_compressor.adaptor.torch_utils.model_wrapper": [[142, "module-neural_compressor.adaptor.torch_utils.model_wrapper", false]], "neural_compressor.adaptor.torch_utils.pattern_detector": [[143, "module-neural_compressor.adaptor.torch_utils.pattern_detector", false]], "neural_compressor.adaptor.torch_utils.symbolic_trace": [[144, "module-neural_compressor.adaptor.torch_utils.symbolic_trace", false]], "neural_compressor.adaptor.torch_utils.util": [[145, "module-neural_compressor.adaptor.torch_utils.util", false]], "neural_compressor.algorithm": [[148, "module-neural_compressor.algorithm", false]], "neural_compressor.algorithm.algorithm": [[146, "module-neural_compressor.algorithm.algorithm", false]], "neural_compressor.algorithm.fast_bias_correction": [[147, "module-neural_compressor.algorithm.fast_bias_correction", false]], "neural_compressor.algorithm.smooth_quant": [[149, "module-neural_compressor.algorithm.smooth_quant", false]], "neural_compressor.algorithm.weight_correction": [[150, "module-neural_compressor.algorithm.weight_correction", false]], "neural_compressor.benchmark": [[151, "module-neural_compressor.benchmark", false]], "neural_compressor.common": [[155, "module-neural_compressor.common", false]], "neural_compressor.common.base_config": [[152, "module-neural_compressor.common.base_config", false]], "neural_compressor.common.base_tuning": [[153, "module-neural_compressor.common.base_tuning", false]], "neural_compressor.common.benchmark": [[154, "module-neural_compressor.common.benchmark", false]], "neural_compressor.common.tuning_param": [[156, "module-neural_compressor.common.tuning_param", false]], "neural_compressor.common.utils": [[158, "module-neural_compressor.common.utils", false]], "neural_compressor.common.utils.constants": [[157, "module-neural_compressor.common.utils.constants", false]], "neural_compressor.common.utils.logger": [[159, "module-neural_compressor.common.utils.logger", false]], "neural_compressor.common.utils.save_load": [[160, "module-neural_compressor.common.utils.save_load", false]], "neural_compressor.common.utils.utility": [[161, "module-neural_compressor.common.utils.utility", false]], "neural_compressor.compression.callbacks": [[162, "module-neural_compressor.compression.callbacks", false]], "neural_compressor.compression.distillation": [[164, "module-neural_compressor.compression.distillation", false]], "neural_compressor.compression.distillation.criterions": [[163, "module-neural_compressor.compression.distillation.criterions", false]], "neural_compressor.compression.distillation.optimizers": [[165, "module-neural_compressor.compression.distillation.optimizers", false]], "neural_compressor.compression.distillation.utility": [[166, "module-neural_compressor.compression.distillation.utility", false]], "neural_compressor.compression.hpo": [[167, "module-neural_compressor.compression.hpo", false]], "neural_compressor.compression.hpo.sa_optimizer": [[168, "module-neural_compressor.compression.hpo.sa_optimizer", false]], "neural_compressor.compression.pruner": [[170, "module-neural_compressor.compression.pruner", false]], "neural_compressor.compression.pruner.criteria": [[169, "module-neural_compressor.compression.pruner.criteria", false]], "neural_compressor.compression.pruner.model_slim": [[172, "module-neural_compressor.compression.pruner.model_slim", false]], "neural_compressor.compression.pruner.model_slim.auto_slim": [[171, "module-neural_compressor.compression.pruner.model_slim.auto_slim", false]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer": [[173, "module-neural_compressor.compression.pruner.model_slim.pattern_analyzer", false]], "neural_compressor.compression.pruner.model_slim.weight_slim": [[174, "module-neural_compressor.compression.pruner.model_slim.weight_slim", false]], "neural_compressor.compression.pruner.patterns": [[176, "module-neural_compressor.compression.pruner.patterns", false]], "neural_compressor.compression.pruner.patterns.base": [[175, "module-neural_compressor.compression.pruner.patterns.base", false]], "neural_compressor.compression.pruner.patterns.mha": [[177, "module-neural_compressor.compression.pruner.patterns.mha", false]], "neural_compressor.compression.pruner.patterns.ninm": [[178, "module-neural_compressor.compression.pruner.patterns.ninm", false]], "neural_compressor.compression.pruner.patterns.nxm": [[179, "module-neural_compressor.compression.pruner.patterns.nxm", false]], "neural_compressor.compression.pruner.pruners": [[183, "module-neural_compressor.compression.pruner.pruners", false]], "neural_compressor.compression.pruner.pruners.base": [[180, "module-neural_compressor.compression.pruner.pruners.base", false]], "neural_compressor.compression.pruner.pruners.basic": [[181, "module-neural_compressor.compression.pruner.pruners.basic", false]], "neural_compressor.compression.pruner.pruners.block_mask": [[182, "module-neural_compressor.compression.pruner.pruners.block_mask", false]], "neural_compressor.compression.pruner.pruners.mha": [[184, "module-neural_compressor.compression.pruner.pruners.mha", false]], "neural_compressor.compression.pruner.pruners.pattern_lock": [[185, "module-neural_compressor.compression.pruner.pruners.pattern_lock", false]], "neural_compressor.compression.pruner.pruners.progressive": [[186, "module-neural_compressor.compression.pruner.pruners.progressive", false]], "neural_compressor.compression.pruner.pruners.retrain_free": [[187, "module-neural_compressor.compression.pruner.pruners.retrain_free", false]], "neural_compressor.compression.pruner.pruning": [[188, "module-neural_compressor.compression.pruner.pruning", false]], "neural_compressor.compression.pruner.regs": [[189, "module-neural_compressor.compression.pruner.regs", false]], "neural_compressor.compression.pruner.schedulers": [[190, "module-neural_compressor.compression.pruner.schedulers", false]], "neural_compressor.compression.pruner.tf_criteria": [[191, "module-neural_compressor.compression.pruner.tf_criteria", false]], "neural_compressor.compression.pruner.utils": [[192, "module-neural_compressor.compression.pruner.utils", false]], "neural_compressor.compression.pruner.wanda": [[193, "module-neural_compressor.compression.pruner.wanda", false]], "neural_compressor.compression.pruner.wanda.utils": [[194, "module-neural_compressor.compression.pruner.wanda.utils", false]], "neural_compressor.config": [[195, "module-neural_compressor.config", false]], "neural_compressor.contrib": [[196, "module-neural_compressor.contrib", false]], "neural_compressor.contrib.strategy": [[197, "module-neural_compressor.contrib.strategy", false]], "neural_compressor.contrib.strategy.sigopt": [[198, "module-neural_compressor.contrib.strategy.sigopt", false]], "neural_compressor.contrib.strategy.tpe": [[199, "module-neural_compressor.contrib.strategy.tpe", false]], "neural_compressor.data": [[220, "module-neural_compressor.data", false]], "neural_compressor.data.dataloaders.base_dataloader": [[200, "module-neural_compressor.data.dataloaders.base_dataloader", false]], "neural_compressor.data.dataloaders.dataloader": [[201, "module-neural_compressor.data.dataloaders.dataloader", false]], "neural_compressor.data.dataloaders.default_dataloader": [[202, "module-neural_compressor.data.dataloaders.default_dataloader", false]], "neural_compressor.data.dataloaders.fetcher": [[203, "module-neural_compressor.data.dataloaders.fetcher", false]], "neural_compressor.data.dataloaders.mxnet_dataloader": [[204, "module-neural_compressor.data.dataloaders.mxnet_dataloader", false]], "neural_compressor.data.dataloaders.onnxrt_dataloader": [[205, "module-neural_compressor.data.dataloaders.onnxrt_dataloader", false]], "neural_compressor.data.dataloaders.pytorch_dataloader": [[206, "module-neural_compressor.data.dataloaders.pytorch_dataloader", false]], "neural_compressor.data.dataloaders.sampler": [[207, "module-neural_compressor.data.dataloaders.sampler", false]], "neural_compressor.data.dataloaders.tensorflow_dataloader": [[208, "module-neural_compressor.data.dataloaders.tensorflow_dataloader", false]], "neural_compressor.data.datasets": [[215, "module-neural_compressor.data.datasets", false]], "neural_compressor.data.datasets.bert_dataset": [[209, "module-neural_compressor.data.datasets.bert_dataset", false]], "neural_compressor.data.datasets.coco_dataset": [[210, "module-neural_compressor.data.datasets.coco_dataset", false]], "neural_compressor.data.datasets.dataset": [[211, "module-neural_compressor.data.datasets.dataset", false]], "neural_compressor.data.datasets.dummy_dataset": [[212, "module-neural_compressor.data.datasets.dummy_dataset", false]], "neural_compressor.data.datasets.dummy_dataset_v2": [[213, "module-neural_compressor.data.datasets.dummy_dataset_v2", false]], "neural_compressor.data.datasets.imagenet_dataset": [[214, "module-neural_compressor.data.datasets.imagenet_dataset", false]], "neural_compressor.data.datasets.style_transfer_dataset": [[216, "module-neural_compressor.data.datasets.style_transfer_dataset", false]], "neural_compressor.data.filters": [[219, "module-neural_compressor.data.filters", false]], "neural_compressor.data.filters.coco_filter": [[217, "module-neural_compressor.data.filters.coco_filter", false]], "neural_compressor.data.filters.filter": [[218, "module-neural_compressor.data.filters.filter", false]], "neural_compressor.data.transforms": [[222, "module-neural_compressor.data.transforms", false]], "neural_compressor.data.transforms.imagenet_transform": [[221, "module-neural_compressor.data.transforms.imagenet_transform", false]], "neural_compressor.data.transforms.postprocess": [[223, "module-neural_compressor.data.transforms.postprocess", false]], "neural_compressor.data.transforms.tokenization": [[224, "module-neural_compressor.data.transforms.tokenization", false]], "neural_compressor.data.transforms.transform": [[225, "module-neural_compressor.data.transforms.transform", false]], "neural_compressor.metric": [[233, "module-neural_compressor.metric", false]], "neural_compressor.metric.bleu": [[227, "module-neural_compressor.metric.bleu", false]], "neural_compressor.metric.bleu_util": [[228, "module-neural_compressor.metric.bleu_util", false]], "neural_compressor.metric.coco_label_map": [[229, "module-neural_compressor.metric.coco_label_map", false]], "neural_compressor.metric.coco_tools": [[230, "module-neural_compressor.metric.coco_tools", false]], "neural_compressor.metric.evaluate_squad": [[231, "module-neural_compressor.metric.evaluate_squad", false]], "neural_compressor.metric.f1": [[232, "module-neural_compressor.metric.f1", false]], "neural_compressor.metric.metric": [[234, "module-neural_compressor.metric.metric", false]], "neural_compressor.mix_precision": [[235, "module-neural_compressor.mix_precision", false]], "neural_compressor.model": [[237, "module-neural_compressor.model", false]], "neural_compressor.model.base_model": [[236, "module-neural_compressor.model.base_model", false]], "neural_compressor.model.keras_model": [[238, "module-neural_compressor.model.keras_model", false]], "neural_compressor.model.model": [[239, "module-neural_compressor.model.model", false]], "neural_compressor.model.mxnet_model": [[240, "module-neural_compressor.model.mxnet_model", false]], "neural_compressor.model.nets_factory": [[241, "module-neural_compressor.model.nets_factory", false]], "neural_compressor.model.onnx_model": [[242, "module-neural_compressor.model.onnx_model", false]], "neural_compressor.model.tensorflow_model": [[243, "module-neural_compressor.model.tensorflow_model", false]], "neural_compressor.model.torch_model": [[244, "module-neural_compressor.model.torch_model", false]], "neural_compressor.objective": [[245, "module-neural_compressor.objective", false]], "neural_compressor.profiling": [[246, "module-neural_compressor.profiling", false]], "neural_compressor.profiling.parser.factory": [[247, "module-neural_compressor.profiling.parser.factory", false]], "neural_compressor.profiling.parser.onnx_parser.factory": [[248, "module-neural_compressor.profiling.parser.onnx_parser.factory", false]], "neural_compressor.profiling.parser.onnx_parser.parser": [[249, "module-neural_compressor.profiling.parser.onnx_parser.parser", false]], "neural_compressor.profiling.parser.parser": [[250, "module-neural_compressor.profiling.parser.parser", false]], "neural_compressor.profiling.parser.result": [[251, "module-neural_compressor.profiling.parser.result", false]], "neural_compressor.profiling.parser.tensorflow_parser.factory": [[252, "module-neural_compressor.profiling.parser.tensorflow_parser.factory", false]], "neural_compressor.profiling.parser.tensorflow_parser.parser": [[253, "module-neural_compressor.profiling.parser.tensorflow_parser.parser", false]], "neural_compressor.profiling.profiler.factory": [[254, "module-neural_compressor.profiling.profiler.factory", false]], "neural_compressor.profiling.profiler.onnxrt_profiler.factory": [[255, "module-neural_compressor.profiling.profiler.onnxrt_profiler.factory", false]], "neural_compressor.profiling.profiler.onnxrt_profiler.profiler": [[256, "module-neural_compressor.profiling.profiler.onnxrt_profiler.profiler", false]], "neural_compressor.profiling.profiler.onnxrt_profiler.utils": [[257, "module-neural_compressor.profiling.profiler.onnxrt_profiler.utils", false]], "neural_compressor.profiling.profiler.profiler": [[258, "module-neural_compressor.profiling.profiler.profiler", false]], "neural_compressor.profiling.profiler.tensorflow_profiler.factory": [[259, "module-neural_compressor.profiling.profiler.tensorflow_profiler.factory", false]], "neural_compressor.profiling.profiler.tensorflow_profiler.profiler": [[260, "module-neural_compressor.profiling.profiler.tensorflow_profiler.profiler", false]], "neural_compressor.profiling.profiler.tensorflow_profiler.utils": [[261, "module-neural_compressor.profiling.profiler.tensorflow_profiler.utils", false]], "neural_compressor.quantization": [[262, "module-neural_compressor.quantization", false]], "neural_compressor.strategy": [[270, "module-neural_compressor.strategy", false]], "neural_compressor.strategy.auto": [[263, "module-neural_compressor.strategy.auto", false]], "neural_compressor.strategy.auto_mixed_precision": [[264, "module-neural_compressor.strategy.auto_mixed_precision", false]], "neural_compressor.strategy.basic": [[265, "module-neural_compressor.strategy.basic", false]], "neural_compressor.strategy.bayesian": [[266, "module-neural_compressor.strategy.bayesian", false]], "neural_compressor.strategy.conservative": [[267, "module-neural_compressor.strategy.conservative", false]], "neural_compressor.strategy.exhaustive": [[268, "module-neural_compressor.strategy.exhaustive", false]], "neural_compressor.strategy.hawq_v2": [[269, "module-neural_compressor.strategy.hawq_v2", false]], "neural_compressor.strategy.mse": [[271, "module-neural_compressor.strategy.mse", false]], "neural_compressor.strategy.mse_v2": [[272, "module-neural_compressor.strategy.mse_v2", false]], "neural_compressor.strategy.random": [[273, "module-neural_compressor.strategy.random", false]], "neural_compressor.strategy.strategy": [[274, "module-neural_compressor.strategy.strategy", false]], "neural_compressor.strategy.utils": [[276, "module-neural_compressor.strategy.utils", false]], "neural_compressor.strategy.utils.constant": [[275, "module-neural_compressor.strategy.utils.constant", false]], "neural_compressor.strategy.utils.tuning_sampler": [[277, "module-neural_compressor.strategy.utils.tuning_sampler", false]], "neural_compressor.strategy.utils.tuning_space": [[278, "module-neural_compressor.strategy.utils.tuning_space", false]], "neural_compressor.strategy.utils.tuning_structs": [[279, "module-neural_compressor.strategy.utils.tuning_structs", false]], "neural_compressor.strategy.utils.utility": [[280, "module-neural_compressor.strategy.utils.utility", false]], "neural_compressor.template.api_doc_example": [[281, "module-neural_compressor.template.api_doc_example", false]], "neural_compressor.tensorflow": [[290, "module-neural_compressor.tensorflow", false]], "neural_compressor.tensorflow.algorithms": [[282, "module-neural_compressor.tensorflow.algorithms", false]], "neural_compressor.tensorflow.algorithms.smoother": [[285, "module-neural_compressor.tensorflow.algorithms.smoother", false]], "neural_compressor.tensorflow.algorithms.smoother.calibration": [[283, "module-neural_compressor.tensorflow.algorithms.smoother.calibration", false]], "neural_compressor.tensorflow.algorithms.smoother.core": [[284, "module-neural_compressor.tensorflow.algorithms.smoother.core", false]], "neural_compressor.tensorflow.algorithms.smoother.scaler": [[286, "module-neural_compressor.tensorflow.algorithms.smoother.scaler", false]], "neural_compressor.tensorflow.algorithms.static_quant": [[287, "module-neural_compressor.tensorflow.algorithms.static_quant", false]], "neural_compressor.tensorflow.algorithms.static_quant.keras": [[288, "module-neural_compressor.tensorflow.algorithms.static_quant.keras", false]], "neural_compressor.tensorflow.algorithms.static_quant.tensorflow": [[289, "module-neural_compressor.tensorflow.algorithms.static_quant.tensorflow", false]], "neural_compressor.tensorflow.keras": [[291, "module-neural_compressor.tensorflow.keras", false]], "neural_compressor.tensorflow.keras.layers": [[295, "module-neural_compressor.tensorflow.keras.layers", false]], "neural_compressor.tensorflow.keras.layers.conv2d": [[292, "module-neural_compressor.tensorflow.keras.layers.conv2d", false]], "neural_compressor.tensorflow.keras.layers.dense": [[293, "module-neural_compressor.tensorflow.keras.layers.dense", false]], "neural_compressor.tensorflow.keras.layers.depthwise_conv2d": [[294, "module-neural_compressor.tensorflow.keras.layers.depthwise_conv2d", false]], "neural_compressor.tensorflow.keras.layers.layer_initializer": [[296, "module-neural_compressor.tensorflow.keras.layers.layer_initializer", false]], "neural_compressor.tensorflow.keras.layers.pool2d": [[297, "module-neural_compressor.tensorflow.keras.layers.pool2d", false]], "neural_compressor.tensorflow.keras.layers.separable_conv2d": [[298, "module-neural_compressor.tensorflow.keras.layers.separable_conv2d", false]], "neural_compressor.tensorflow.keras.quantization": [[300, "module-neural_compressor.tensorflow.keras.quantization", false]], "neural_compressor.tensorflow.keras.quantization.config": [[299, "module-neural_compressor.tensorflow.keras.quantization.config", false]], "neural_compressor.tensorflow.quantization": [[304, "module-neural_compressor.tensorflow.quantization", false]], "neural_compressor.tensorflow.quantization.algorithm_entry": [[301, "module-neural_compressor.tensorflow.quantization.algorithm_entry", false]], "neural_compressor.tensorflow.quantization.autotune": [[302, "module-neural_compressor.tensorflow.quantization.autotune", false]], "neural_compressor.tensorflow.quantization.config": [[303, "module-neural_compressor.tensorflow.quantization.config", false]], "neural_compressor.tensorflow.quantization.quantize": [[305, "module-neural_compressor.tensorflow.quantization.quantize", false]], "neural_compressor.tensorflow.quantization.utils": [[361, "module-neural_compressor.tensorflow.quantization.utils", false]], "neural_compressor.tensorflow.quantization.utils.graph_converter": [[306, "module-neural_compressor.tensorflow.quantization.utils.graph_converter", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter": [[344, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16": [[309, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert": [[307, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer": [[308, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic": [[333, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd": [[310, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout": [[311, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu": [[312, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random": [[313, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const": [[314, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction": [[315, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd": [[316, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer": [[317, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape": [[318, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm": [[319, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant": [[320, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add": [[321, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul": [[322, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math": [[323, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn": [[324, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in": [[325, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu": [[326, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm": [[327, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv": [[328, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv": [[329, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose": [[330, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer": [[331, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass": [[332, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node": [[334, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu": [[335, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize": [[336, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes": [[337, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm": [[338, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input": [[339, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes": [[340, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes": [[341, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer": [[342, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base": [[343, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8": [[351, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant": [[345, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value": [[346, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize": [[347, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize": [[348, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize": [[349, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize": [[350, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer": [[352, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter": [[353, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse": [[354, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation": [[355, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq": [[356, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern": [[357, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq": [[358, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern": [[359, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern", false]], "neural_compressor.tensorflow.quantization.utils.graph_util": [[360, "module-neural_compressor.tensorflow.quantization.utils.graph_util", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph": [[362, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq": [[370, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn": [[363, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2": [[364, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv": [[365, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv": [[366, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in": [[367, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul": [[368, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling": [[369, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq": [[371, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base": [[372, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn": [[373, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2": [[374, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv": [[375, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu": [[376, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul": [[377, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling": [[378, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph_common": [[379, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph_common", false]], "neural_compressor.tensorflow.quantization.utils.transform_graph": [[382, "module-neural_compressor.tensorflow.quantization.utils.transform_graph", false]], "neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction": [[380, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction", false]], "neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base": [[381, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base", false]], "neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging": [[383, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging", false]], "neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat": [[384, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat", false]], "neural_compressor.tensorflow.quantization.utils.utility": [[385, "module-neural_compressor.tensorflow.quantization.utils.utility", false]], "neural_compressor.tensorflow.utils": [[388, "module-neural_compressor.tensorflow.utils", false]], "neural_compressor.tensorflow.utils.constants": [[386, "module-neural_compressor.tensorflow.utils.constants", false]], "neural_compressor.tensorflow.utils.data": [[387, "module-neural_compressor.tensorflow.utils.data", false]], "neural_compressor.tensorflow.utils.model": [[389, "module-neural_compressor.tensorflow.utils.model", false]], "neural_compressor.tensorflow.utils.model_wrappers": [[390, "module-neural_compressor.tensorflow.utils.model_wrappers", false]], "neural_compressor.tensorflow.utils.utility": [[391, "module-neural_compressor.tensorflow.utils.utility", false]], "neural_compressor.torch": [[436, "module-neural_compressor.torch", false]], "neural_compressor.torch.algorithms": [[394, "module-neural_compressor.torch.algorithms", false]], "neural_compressor.torch.algorithms.base_algorithm": [[392, "module-neural_compressor.torch.algorithms.base_algorithm", false]], "neural_compressor.torch.algorithms.fp8_quant.utils.logger": [[393, "module-neural_compressor.torch.algorithms.fp8_quant.utils.logger", false]], "neural_compressor.torch.algorithms.layer_wise": [[395, "module-neural_compressor.torch.algorithms.layer_wise", false]], "neural_compressor.torch.algorithms.layer_wise.load": [[396, "module-neural_compressor.torch.algorithms.layer_wise.load", false]], "neural_compressor.torch.algorithms.layer_wise.modified_pickle": [[397, "module-neural_compressor.torch.algorithms.layer_wise.modified_pickle", false]], "neural_compressor.torch.algorithms.layer_wise.utils": [[398, "module-neural_compressor.torch.algorithms.layer_wise.utils", false]], "neural_compressor.torch.algorithms.mixed_precision": [[400, "module-neural_compressor.torch.algorithms.mixed_precision", false]], "neural_compressor.torch.algorithms.mixed_precision.half_precision_convert": [[399, "module-neural_compressor.torch.algorithms.mixed_precision.half_precision_convert", false]], "neural_compressor.torch.algorithms.mixed_precision.module_wrappers": [[401, "module-neural_compressor.torch.algorithms.mixed_precision.module_wrappers", false]], "neural_compressor.torch.algorithms.mx_quant": [[402, "module-neural_compressor.torch.algorithms.mx_quant", false]], "neural_compressor.torch.algorithms.mx_quant.mx": [[403, "module-neural_compressor.torch.algorithms.mx_quant.mx", false]], "neural_compressor.torch.algorithms.mx_quant.utils": [[404, "module-neural_compressor.torch.algorithms.mx_quant.utils", false]], "neural_compressor.torch.algorithms.pt2e_quant": [[407, "module-neural_compressor.torch.algorithms.pt2e_quant", false]], "neural_compressor.torch.algorithms.pt2e_quant.core": [[405, "module-neural_compressor.torch.algorithms.pt2e_quant.core", false]], "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter": [[406, "module-neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter", false]], "neural_compressor.torch.algorithms.pt2e_quant.save_load": [[408, "module-neural_compressor.torch.algorithms.pt2e_quant.save_load", false]], "neural_compressor.torch.algorithms.pt2e_quant.utility": [[409, "module-neural_compressor.torch.algorithms.pt2e_quant.utility", false]], "neural_compressor.torch.algorithms.smooth_quant": [[410, "module-neural_compressor.torch.algorithms.smooth_quant", false]], "neural_compressor.torch.algorithms.smooth_quant.save_load": [[411, "module-neural_compressor.torch.algorithms.smooth_quant.save_load", false]], "neural_compressor.torch.algorithms.smooth_quant.smooth_quant": [[412, "module-neural_compressor.torch.algorithms.smooth_quant.smooth_quant", false]], "neural_compressor.torch.algorithms.smooth_quant.utility": [[413, "module-neural_compressor.torch.algorithms.smooth_quant.utility", false]], "neural_compressor.torch.algorithms.static_quant": [[414, "module-neural_compressor.torch.algorithms.static_quant", false]], "neural_compressor.torch.algorithms.static_quant.save_load": [[415, "module-neural_compressor.torch.algorithms.static_quant.save_load", false]], "neural_compressor.torch.algorithms.static_quant.static_quant": [[416, "module-neural_compressor.torch.algorithms.static_quant.static_quant", false]], "neural_compressor.torch.algorithms.static_quant.utility": [[417, "module-neural_compressor.torch.algorithms.static_quant.utility", false]], "neural_compressor.torch.algorithms.weight_only": [[428, "module-neural_compressor.torch.algorithms.weight_only", false]], "neural_compressor.torch.algorithms.weight_only.autoround": [[418, "module-neural_compressor.torch.algorithms.weight_only.autoround", false]], "neural_compressor.torch.algorithms.weight_only.awq": [[419, "module-neural_compressor.torch.algorithms.weight_only.awq", false]], "neural_compressor.torch.algorithms.weight_only.gptq": [[420, "module-neural_compressor.torch.algorithms.weight_only.gptq", false]], "neural_compressor.torch.algorithms.weight_only.hqq": [[424, "module-neural_compressor.torch.algorithms.weight_only.hqq", false]], "neural_compressor.torch.algorithms.weight_only.hqq.bitpack": [[421, "module-neural_compressor.torch.algorithms.weight_only.hqq.bitpack", false]], "neural_compressor.torch.algorithms.weight_only.hqq.config": [[422, "module-neural_compressor.torch.algorithms.weight_only.hqq.config", false]], "neural_compressor.torch.algorithms.weight_only.hqq.core": [[423, "module-neural_compressor.torch.algorithms.weight_only.hqq.core", false]], "neural_compressor.torch.algorithms.weight_only.hqq.optimizer": [[425, "module-neural_compressor.torch.algorithms.weight_only.hqq.optimizer", false]], "neural_compressor.torch.algorithms.weight_only.hqq.qtensor": [[426, "module-neural_compressor.torch.algorithms.weight_only.hqq.qtensor", false]], "neural_compressor.torch.algorithms.weight_only.hqq.quantizer": [[427, "module-neural_compressor.torch.algorithms.weight_only.hqq.quantizer", false]], "neural_compressor.torch.algorithms.weight_only.modules": [[429, "module-neural_compressor.torch.algorithms.weight_only.modules", false]], "neural_compressor.torch.algorithms.weight_only.rtn": [[430, "module-neural_compressor.torch.algorithms.weight_only.rtn", false]], "neural_compressor.torch.algorithms.weight_only.save_load": [[431, "module-neural_compressor.torch.algorithms.weight_only.save_load", false]], "neural_compressor.torch.algorithms.weight_only.teq": [[432, "module-neural_compressor.torch.algorithms.weight_only.teq", false]], "neural_compressor.torch.algorithms.weight_only.utility": [[433, "module-neural_compressor.torch.algorithms.weight_only.utility", false]], "neural_compressor.torch.export": [[434, "module-neural_compressor.torch.export", false]], "neural_compressor.torch.export.pt2e_export": [[435, "module-neural_compressor.torch.export.pt2e_export", false]], "neural_compressor.torch.quantization": [[440, "module-neural_compressor.torch.quantization", false]], "neural_compressor.torch.quantization.algorithm_entry": [[437, "module-neural_compressor.torch.quantization.algorithm_entry", false]], "neural_compressor.torch.quantization.autotune": [[438, "module-neural_compressor.torch.quantization.autotune", false]], "neural_compressor.torch.quantization.config": [[439, "module-neural_compressor.torch.quantization.config", false]], "neural_compressor.torch.quantization.load_entry": [[441, "module-neural_compressor.torch.quantization.load_entry", false]], "neural_compressor.torch.quantization.quantize": [[442, "module-neural_compressor.torch.quantization.quantize", false]], "neural_compressor.torch.utils": [[447, "module-neural_compressor.torch.utils", false]], "neural_compressor.torch.utils.auto_accelerator": [[443, "module-neural_compressor.torch.utils.auto_accelerator", false]], "neural_compressor.torch.utils.bit_packer": [[444, "module-neural_compressor.torch.utils.bit_packer", false]], "neural_compressor.torch.utils.constants": [[445, "module-neural_compressor.torch.utils.constants", false]], "neural_compressor.torch.utils.environ": [[446, "module-neural_compressor.torch.utils.environ", false]], "neural_compressor.torch.utils.utility": [[448, "module-neural_compressor.torch.utils.utility", false]], "neural_compressor.training": [[449, "module-neural_compressor.training", false]], "neural_compressor.transformers.quantization.utils": [[450, "module-neural_compressor.transformers.quantization.utils", false]], "neural_compressor.transformers.utils": [[451, "module-neural_compressor.transformers.utils", false]], "neural_compressor.transformers.utils.quantization_config": [[452, "module-neural_compressor.transformers.utils.quantization_config", false]], "neural_compressor.utils": [[460, "module-neural_compressor.utils", false]], "neural_compressor.utils.collect_layer_histogram": [[453, "module-neural_compressor.utils.collect_layer_histogram", false]], "neural_compressor.utils.constant": [[454, "module-neural_compressor.utils.constant", false]], "neural_compressor.utils.create_obj_from_config": [[455, "module-neural_compressor.utils.create_obj_from_config", false]], "neural_compressor.utils.export": [[456, "module-neural_compressor.utils.export", false]], "neural_compressor.utils.export.qlinear2qdq": [[457, "module-neural_compressor.utils.export.qlinear2qdq", false]], "neural_compressor.utils.export.tf2onnx": [[458, "module-neural_compressor.utils.export.tf2onnx", false]], "neural_compressor.utils.export.torch2onnx": [[459, "module-neural_compressor.utils.export.torch2onnx", false]], "neural_compressor.utils.kl_divergence": [[461, "module-neural_compressor.utils.kl_divergence", false]], "neural_compressor.utils.load_huggingface": [[462, "module-neural_compressor.utils.load_huggingface", false]], "neural_compressor.utils.logger": [[463, "module-neural_compressor.utils.logger", false]], "neural_compressor.utils.options": [[464, "module-neural_compressor.utils.options", false]], "neural_compressor.utils.pytorch": [[465, "module-neural_compressor.utils.pytorch", false]], "neural_compressor.utils.utility": [[466, "module-neural_compressor.utils.utility", false]], "neural_compressor.utils.weights_details": [[467, "module-neural_compressor.utils.weights_details", false]], "neural_compressor.version": [[468, "module-neural_compressor.version", false]], "node_collector (class in neural_compressor.adaptor.torch_utils.hawq_metric)": [[135, "neural_compressor.adaptor.torch_utils.hawq_metric.Node_collector", false]], "node_from_map() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn)": [[52, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn.node_from_map", false]], "node_from_map() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in)": [[53, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in.node_from_map", false]], "node_from_map() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm)": [[55, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm.node_from_map", false]], "node_from_map() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn)": [[324, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn.node_from_map", false]], "node_from_map() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in)": [[325, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in.node_from_map", false]], "node_from_map() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm)": [[327, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm.node_from_map", false]], "node_name_from_input() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn)": [[52, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn.node_name_from_input", false]], "node_name_from_input() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in)": [[53, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in.node_name_from_input", false]], "node_name_from_input() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm)": [[55, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm.node_name_from_input", false]], "node_name_from_input() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn)": [[324, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn.node_name_from_input", false]], "node_name_from_input() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in)": [[325, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in.node_name_from_input", false]], "node_name_from_input() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm)": [[327, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm.node_name_from_input", false]], "nondigit_punct_re (neural_compressor.metric.bleu.unicoderegex attribute)": [[227, "neural_compressor.metric.bleu.UnicodeRegex.nondigit_punct_re", false]], "normalizationoperator (class in neural_compressor.adaptor.ox_utils.operators.norm)": [[20, "neural_compressor.adaptor.ox_utils.operators.norm.NormalizationOperator", false]], "normalize_answer() (in module neural_compressor.metric.f1)": [[232, "neural_compressor.metric.f1.normalize_answer", false]], "normalizetftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.NormalizeTFTransform", false]], "normalizetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.NormalizeTransform", false]], "num_correct (neural_compressor.metric.metric.generaltopk attribute)": [[234, "neural_compressor.metric.metric.GeneralTopK.num_correct", false]], "num_correct (neural_compressor.metric.metric.tensorflowtopk attribute)": [[234, "neural_compressor.metric.metric.TensorflowTopK.num_correct", false]], "num_sample (neural_compressor.metric.metric.generaltopk attribute)": [[234, "neural_compressor.metric.metric.GeneralTopK.num_sample", false]], "num_sample (neural_compressor.metric.metric.tensorflowtopk attribute)": [[234, "neural_compressor.metric.metric.TensorflowTopK.num_sample", false]], "objective (class in neural_compressor.objective)": [[245, "neural_compressor.objective.Objective", false]], "objective_custom_registry() (in module neural_compressor.objective)": [[245, "neural_compressor.objective.objective_custom_registry", false]], "objective_registry() (in module neural_compressor.objective)": [[245, "neural_compressor.objective.objective_registry", false]], "oneshotscheduler (class in neural_compressor.compression.pruner.schedulers)": [[190, "neural_compressor.compression.pruner.schedulers.OneshotScheduler", false]], "onnx (class in neural_compressor.config)": [[195, "neural_compressor.config.ONNX", false]], "onnx_qlinear_to_qdq() (in module neural_compressor.utils.export.qlinear2qdq)": [[457, "neural_compressor.utils.export.qlinear2qdq.onnx_qlinear_to_qdq", false]], "onnxbilinearimagenettransform (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.OnnxBilinearImagenetTransform", false]], "onnxgraph (class in neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph)": [[87, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph.OnnxGraph", false]], "onnxmodel (class in neural_compressor.model.onnx_model)": [[242, "neural_compressor.model.onnx_model.ONNXModel", false]], "onnxnode (class in neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node)": [[88, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node.OnnxNode", false]], "onnxopschema (class in neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema)": [[89, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema.OnnxOpSchema", false]], "onnxprofilingparser (class in neural_compressor.profiling.parser.onnx_parser.parser)": [[249, "neural_compressor.profiling.parser.onnx_parser.parser.OnnxProfilingParser", false]], "onnxqlinear2qdqconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.ONNXQlinear2QDQConfig", false]], "onnxresizecropimagenettransform (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.ONNXResizeCropImagenetTransform", false]], "onnxrt (class in neural_compressor.utils.options)": [[464, "neural_compressor.utils.options.onnxrt", false]], "onnxrtaugment (class in neural_compressor.adaptor.ox_utils.calibration)": [[2, "neural_compressor.adaptor.ox_utils.calibration.ONNXRTAugment", false]], "onnxrtbertdataloader (class in neural_compressor.data.dataloaders.onnxrt_dataloader)": [[205, "neural_compressor.data.dataloaders.onnxrt_dataloader.ONNXRTBertDataLoader", false]], "onnxrtbertdataset (class in neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.ONNXRTBertDataset", false]], "onnxrtcroptoboundingbox (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ONNXRTCropToBoundingBox", false]], "onnxrtdataloader (class in neural_compressor.data.dataloaders.onnxrt_dataloader)": [[205, "neural_compressor.data.dataloaders.onnxrt_dataloader.ONNXRTDataLoader", false]], "onnxrtglue (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.ONNXRTGLUE", false]], "onnxrtimagenetdataset (class in neural_compressor.data.datasets.imagenet_dataset)": [[214, "neural_compressor.data.datasets.imagenet_dataset.ONNXRTImagenetDataset", false]], "onnxrtitdatasets (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.ONNXRTITDatasets", false]], "onnxrtitfilters (class in neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.ONNXRTITFilters", false]], "onnxrtitmetrics (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.ONNXRTITMetrics", false]], "onnxrtittransforms (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ONNXRTITTransforms", false]], "onnxrtparserfactory (class in neural_compressor.profiling.parser.onnx_parser.factory)": [[248, "neural_compressor.profiling.parser.onnx_parser.factory.OnnxrtParserFactory", false]], "onnxrtqldatasets (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.ONNXRTQLDatasets", false]], "onnxrtqlfilters (class in neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.ONNXRTQLFilters", false]], "onnxrtqlmetrics (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.ONNXRTQLMetrics", false]], "onnxrtqltransforms (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ONNXRTQLTransforms", false]], "op_level (neural_compressor.common.tuning_param.paramlevel attribute)": [[156, "neural_compressor.common.tuning_param.ParamLevel.OP_LEVEL", false]], "op_registry() (in module neural_compressor.adaptor.ox_utils.operators.ops)": [[21, "neural_compressor.adaptor.ox_utils.operators.ops.op_registry", false]], "op_type_level (neural_compressor.common.tuning_param.paramlevel attribute)": [[156, "neural_compressor.common.tuning_param.ParamLevel.OP_TYPE_LEVEL", false]], "opentry (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.OpEntry", false]], "operator (class in neural_compressor.adaptor.ox_utils.operators.ops)": [[21, "neural_compressor.adaptor.ox_utils.operators.ops.Operator", false]], "operatorconfig (class in neural_compressor.tensorflow.keras.quantization.config)": [[299, "neural_compressor.tensorflow.keras.quantization.config.OperatorConfig", false]], "operatorconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.OperatorConfig", false]], "optimize_weights_proximal_legacy() (in module neural_compressor.torch.algorithms.weight_only.hqq.optimizer)": [[425, "neural_compressor.torch.algorithms.weight_only.hqq.optimizer.optimize_weights_proximal_legacy", false]], "optimizedmodel (class in neural_compressor.utils.load_huggingface)": [[462, "neural_compressor.utils.load_huggingface.OptimizedModel", false]], "optimizeqdqgraph (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq)": [[116, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq.OptimizeQDQGraph", false]], "optimizeqdqgraph (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq)": [[371, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq.OptimizeQDQGraph", false]], "optimizer_registry() (in module neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.optimizer_registry", false]], "optimizers (class in neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.Optimizers", false]], "options (class in neural_compressor.config)": [[195, "neural_compressor.config.Options", false]], "optuningconfig (class in neural_compressor.strategy.utils.tuning_structs)": [[279, "neural_compressor.strategy.utils.tuning_structs.OpTuningConfig", false]], "optype (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.OpType", false]], "optypewisetuningsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.OpTypeWiseTuningSampler", false]], "opwisetuningsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.OpWiseTuningSampler", false]], "ordereddefaultdict (class in neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.OrderedDefaultDict", false]], "ortsmoothquant (class in neural_compressor.adaptor.ox_utils.smooth_quant)": [[29, "neural_compressor.adaptor.ox_utils.smooth_quant.ORTSmoothQuant", false]], "pack_array_with_numba_b2_c16() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b2_c16", false]], "pack_array_with_numba_b2_c32() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b2_c32", false]], "pack_array_with_numba_b2_c64() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b2_c64", false]], "pack_array_with_numba_b2_c8() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b2_c8", false]], "pack_array_with_numba_b4_c16() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b4_c16", false]], "pack_array_with_numba_b4_c32() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b4_c32", false]], "pack_array_with_numba_b4_c64() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b4_c64", false]], "pack_array_with_numba_b4_c8() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b4_c8", false]], "pack_array_with_numba_b8_c16() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b8_c16", false]], "pack_array_with_numba_b8_c32() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b8_c32", false]], "pack_array_with_numba_b8_c64() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b8_c64", false]], "pack_array_with_numba_b8_c8() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b8_c8", false]], "packer (class in neural_compressor.torch.algorithms.weight_only.hqq.bitpack)": [[421, "neural_compressor.torch.algorithms.weight_only.hqq.bitpack.Packer", false]], "packing (neural_compressor.torch.algorithms.weight_only.hqq.qtensor.qtensormetainfo attribute)": [[426, "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensorMetaInfo.packing", false]], "pad_tensor() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.pad_tensor", false]], "paddedcentercroptransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PaddedCenterCropTransform", false]], "padoperator (class in neural_compressor.adaptor.ox_utils.operators.pad)": [[22, "neural_compressor.adaptor.ox_utils.operators.pad.PadOperator", false]], "paramlevel (class in neural_compressor.common.tuning_param)": [[156, "neural_compressor.common.tuning_param.ParamLevel", false]], "params_list (neural_compressor.common.base_config.baseconfig attribute)": [[152, "neural_compressor.common.base_config.BaseConfig.params_list", false]], "parse_auto_slim_config() (in module neural_compressor.compression.pruner.model_slim.auto_slim)": [[171, "neural_compressor.compression.pruner.model_slim.auto_slim.parse_auto_slim_config", false]], "parse_cfgs() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.parse_cfgs", false]], "parse_last_linear() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.parse_last_linear", false]], "parse_last_linear_tf() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.parse_last_linear_tf", false]], "parse_saved_model() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.parse_saved_model", false]], "parse_saved_model() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.parse_saved_model", false]], "parse_str2list() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.parse_str2list", false]], "parse_to_prune() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.parse_to_prune", false]], "parse_to_prune_tf() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.parse_to_prune_tf", false]], "parse_tune_config() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.parse_tune_config", false]], "parse_valid_pruner_types() (in module neural_compressor.compression.pruner.pruners)": [[183, "neural_compressor.compression.pruner.pruners.parse_valid_pruner_types", false]], "parsedecodebert (class in neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.ParseDecodeBert", false]], "parsedecodecoco (class in neural_compressor.data.datasets.coco_dataset)": [[210, "neural_compressor.data.datasets.coco_dataset.ParseDecodeCoco", false]], "parsedecodeimagenet (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.ParseDecodeImagenet", false]], "parsedecodeimagenettransform (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.ParseDecodeImagenetTransform", false]], "parsedecodevoctransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ParseDecodeVocTransform", false]], "parserfactory (class in neural_compressor.profiling.parser.factory)": [[247, "neural_compressor.profiling.parser.factory.ParserFactory", false]], "paser_cfgs() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.paser_cfgs", false]], "patch_hqq_moduile() (in module neural_compressor.torch.algorithms.weight_only.hqq.quantizer)": [[427, "neural_compressor.torch.algorithms.weight_only.hqq.quantizer.patch_hqq_moduile", false]], "pattern (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.pattern", false]], "pattern (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.pattern", false]], "pattern (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.pattern", false]], "pattern (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.pattern", false]], "pattern (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.pattern", false]], "pattern (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.pattern", false]], "pattern (neural_compressor.compression.pruner.pruners.basic.kerasbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.KerasBasicPruner.pattern", false]], "pattern (neural_compressor.compression.pruner.pruners.basic.pytorchbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.PytorchBasicPruner.pattern", false]], "pattern (neural_compressor.compression.pruner.pruners.block_mask.pytorchblockmaskpruner attribute)": [[182, "neural_compressor.compression.pruner.pruners.block_mask.PytorchBlockMaskPruner.pattern", false]], "pattern (neural_compressor.compression.pruner.pruners.retrain_free.pytorchretrainfreepruner attribute)": [[187, "neural_compressor.compression.pruner.pruners.retrain_free.PytorchRetrainFreePruner.pattern", false]], "pattern_factory() (in module neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.pattern_factory", false]], "pattern_to_internal() (in module neural_compressor.strategy.utils.tuning_space)": [[278, "neural_compressor.strategy.utils.tuning_space.pattern_to_internal", false]], "pattern_to_path() (in module neural_compressor.strategy.utils.tuning_space)": [[278, "neural_compressor.strategy.utils.tuning_space.pattern_to_path", false]], "patternmha (class in neural_compressor.compression.pruner.patterns.mha)": [[177, "neural_compressor.compression.pruner.patterns.mha.PatternMHA", false]], "patternpair (class in neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.PatternPair", false]], "percentilecalibrator (class in neural_compressor.adaptor.ox_utils.calibrator)": [[3, "neural_compressor.adaptor.ox_utils.calibrator.PercentileCalibrator", false]], "performance (class in neural_compressor.objective)": [[245, "neural_compressor.objective.Performance", false]], "pickleerror": [[138, "neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle.PickleError", false], [397, "neural_compressor.torch.algorithms.layer_wise.modified_pickle.PickleError", false]], "picklingerror": [[138, "neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle.PicklingError", false], [397, "neural_compressor.torch.algorithms.layer_wise.modified_pickle.PicklingError", false]], "pooloperator (class in neural_compressor.adaptor.ox_utils.operators.pooling)": [[23, "neural_compressor.adaptor.ox_utils.operators.pooling.PoolOperator", false]], "postcompressionutils (class in neural_compressor.compression.pruner.model_slim.weight_slim)": [[174, "neural_compressor.compression.pruner.model_slim.weight_slim.PostCompressionUtils", false]], "postcseoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse)": [[83, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse.PostCseOptimizer", false]], "postcseoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse)": [[354, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse.PostCseOptimizer", false]], "posthostconstconverter (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter)": [[82, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter.PostHostConstConverter", false]], "posthostconstconverter (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter)": [[353, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter.PostHostConstConverter", false]], "postprocess (class in neural_compressor.data.transforms.postprocess)": [[223, "neural_compressor.data.transforms.postprocess.Postprocess", false]], "postprocess_model() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.postprocess_model", false]], "posttrainingquantconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.PostTrainingQuantConfig", false]], "pred_list (neural_compressor.metric.metric.accuracy attribute)": [[234, "neural_compressor.metric.metric.Accuracy.pred_list", false]], "pred_list (neural_compressor.metric.metric.mae attribute)": [[234, "neural_compressor.metric.metric.MAE.pred_list", false]], "pred_list (neural_compressor.metric.metric.mse attribute)": [[234, "neural_compressor.metric.metric.MSE.pred_list", false]], "predictions (neural_compressor.metric.bleu.bleu attribute)": [[227, "neural_compressor.metric.bleu.BLEU.predictions", false]], "preoptimization (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize)": [[64, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize.PreOptimization", false]], "preoptimization (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize)": [[336, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize.PreOptimization", false]], "prepare() (in module neural_compressor.torch.quantization.quantize)": [[442, "neural_compressor.torch.quantization.quantize.prepare", false]], "prepare_compression() (in module neural_compressor.training)": [[449, "neural_compressor.training.prepare_compression", false]], "prepare_dataloader() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.prepare_dataloader", false]], "prepare_inputs() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.prepare_inputs", false]], "prepare_model() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.prepare_model", false]], "prepare_model_data() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.prepare_model_data", false]], "prepare_pruning() (in module neural_compressor.compression.pruner)": [[170, "neural_compressor.compression.pruner.prepare_pruning", false]], "preprocess_user_cfg() (in module neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.preprocess_user_cfg", false]], "print_iterables() (in module neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.print_iterables", false]], "print_op_list() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.print_op_list", false]], "print_table() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.print_table", false]], "process_and_check_config() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.process_and_check_config", false]], "process_config() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.process_config", false]], "process_weight_config() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.process_weight_config", false]], "process_yaml_config() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.process_yaml_config", false]], "processortype (class in neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.ProcessorType", false]], "profile() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.profile", false]], "profiler (class in neural_compressor.profiling.profiler.onnxrt_profiler.profiler)": [[256, "neural_compressor.profiling.profiler.onnxrt_profiler.profiler.Profiler", false]], "profiler (class in neural_compressor.profiling.profiler.profiler)": [[258, "neural_compressor.profiling.profiler.profiler.Profiler", false]], "profiler (class in neural_compressor.profiling.profiler.tensorflow_profiler.profiler)": [[260, "neural_compressor.profiling.profiler.tensorflow_profiler.profiler.Profiler", false]], "profilerfactory (class in neural_compressor.profiling.profiler.factory)": [[254, "neural_compressor.profiling.profiler.factory.ProfilerFactory", false]], "profilerfactory (class in neural_compressor.profiling.profiler.onnxrt_profiler.factory)": [[255, "neural_compressor.profiling.profiler.onnxrt_profiler.factory.ProfilerFactory", false]], "profilerfactory (class in neural_compressor.profiling.profiler.tensorflow_profiler.factory)": [[259, "neural_compressor.profiling.profiler.tensorflow_profiler.factory.ProfilerFactory", false]], "profilingparser (class in neural_compressor.profiling.parser.parser)": [[250, "neural_compressor.profiling.parser.parser.ProfilingParser", false]], "profilingresult (class in neural_compressor.profiling.parser.result)": [[251, "neural_compressor.profiling.parser.result.ProfilingResult", false]], "pruner_info (neural_compressor.compression.pruner.pruning.basepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasePruning.pruner_info", false]], "pruner_info (neural_compressor.compression.pruner.pruning.basicpruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasicPruning.pruner_info", false]], "pruner_info (neural_compressor.compression.pruner.pruning.retrainfreepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.RetrainFreePruning.pruner_info", false]], "pruners (neural_compressor.compression.pruner.pruning.basepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasePruning.pruners", false]], "pruners (neural_compressor.compression.pruner.pruning.basicpruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasicPruning.pruners", false]], "pruners (neural_compressor.compression.pruner.pruning.retrainfreepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.RetrainFreePruning.pruners", false]], "pruning_frequency (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.pruning_frequency", false]], "pruning_frequency (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.pruning_frequency", false]], "pruning_frequency (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.pruning_frequency", false]], "pruningcallbacks (class in neural_compressor.compression.callbacks)": [[162, "neural_compressor.compression.callbacks.PruningCallbacks", false]], "pruningcriterion (class in neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.PruningCriterion", false]], "pruningcriterion (class in neural_compressor.compression.pruner.tf_criteria)": [[191, "neural_compressor.compression.pruner.tf_criteria.PruningCriterion", false]], "pruningscheduler (class in neural_compressor.compression.pruner.schedulers)": [[190, "neural_compressor.compression.pruner.schedulers.PruningScheduler", false]], "pt2e_dynamic_quant_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.pt2e_dynamic_quant_entry", false]], "pt2e_static_quant_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.pt2e_static_quant_entry", false]], "punct_nondigit_re (neural_compressor.metric.bleu.unicoderegex attribute)": [[227, "neural_compressor.metric.bleu.UnicodeRegex.punct_nondigit_re", false]], "pythonmultiheadattentionpruner (class in neural_compressor.compression.pruner.pruners.mha)": [[184, "neural_compressor.compression.pruner.pruners.mha.PythonMultiheadAttentionPruner", false]], "pytorch (class in neural_compressor.config)": [[195, "neural_compressor.config.PyTorch", false]], "pytorchalignimagechannel (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PyTorchAlignImageChannel", false]], "pytorchbasemodel (class in neural_compressor.model.torch_model)": [[244, "neural_compressor.model.torch_model.PyTorchBaseModel", false]], "pytorchbasepattern (class in neural_compressor.compression.pruner.patterns.base)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern", false]], "pytorchbasepruner (class in neural_compressor.compression.pruner.pruners.base)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner", false]], "pytorchbasicpruner (class in neural_compressor.compression.pruner.pruners.basic)": [[181, "neural_compressor.compression.pruner.pruners.basic.PytorchBasicPruner", false]], "pytorchbertdataset (class in neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.PytorchBertDataset", false]], "pytorchblockmaskpruner (class in neural_compressor.compression.pruner.pruners.block_mask)": [[182, "neural_compressor.compression.pruner.pruners.block_mask.PytorchBlockMaskPruner", false]], "pytorchcifar10 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.PytorchCIFAR10", false]], "pytorchcifar100 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.PytorchCIFAR100", false]], "pytorchcriterions (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchCriterions", false]], "pytorchcropresizetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PyTorchCropResizeTransform", false]], "pytorchcrossentropyloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchCrossEntropyLoss", false]], "pytorchdataloader (class in neural_compressor.data.dataloaders.pytorch_dataloader)": [[206, "neural_compressor.data.dataloaders.pytorch_dataloader.PyTorchDataLoader", false]], "pytorchdatasets (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.PyTorchDatasets", false]], "pytorchfashionmnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.PytorchFashionMNIST", false]], "pytorchfilters (class in neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.PyTorchFilters", false]], "pytorchfxmodel (class in neural_compressor.model.torch_model)": [[244, "neural_compressor.model.torch_model.PyTorchFXModel", false]], "pytorchimagenetraw (class in neural_compressor.data.datasets.imagenet_dataset)": [[214, "neural_compressor.data.datasets.imagenet_dataset.PytorchImagenetRaw", false]], "pytorchintermediatelayersknowledgedistillationloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchIntermediateLayersKnowledgeDistillationLoss", false]], "pytorchintermediatelayersknowledgedistillationlosswrapper (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchIntermediateLayersKnowledgeDistillationLossWrapper", false]], "pytorchknowledgedistillationloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchKnowledgeDistillationLoss", false]], "pytorchknowledgedistillationlosswrapper (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchKnowledgeDistillationLossWrapper", false]], "pytorchloss (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.PyTorchLoss", false]], "pytorchmetrics (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.PyTorchMetrics", false]], "pytorchmnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.PytorchMNIST", false]], "pytorchmodel (class in neural_compressor.model.torch_model)": [[244, "neural_compressor.model.torch_model.PyTorchModel", false]], "pytorchmxnettransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PytorchMxnetTransform", false]], "pytorchmxnetwrapdataset (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.PytorchMxnetWrapDataset", false]], "pytorchmxnetwrapfunction (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.PytorchMxnetWrapFunction", false]], "pytorchmxnetwrapfunction (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PytorchMxnetWrapFunction", false]], "pytorchnormalizetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PyTorchNormalizeTransform", false]], "pytorchoptimizers (class in neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.PyTorchOptimizers", false]], "pytorchpatternlockpruner (class in neural_compressor.compression.pruner.pruners.pattern_lock)": [[185, "neural_compressor.compression.pruner.pruners.pattern_lock.PytorchPatternLockPruner", false]], "pytorchpatternninm (class in neural_compressor.compression.pruner.patterns.ninm)": [[178, "neural_compressor.compression.pruner.patterns.ninm.PytorchPatternNInM", false]], "pytorchpatternnxm (class in neural_compressor.compression.pruner.patterns.nxm)": [[179, "neural_compressor.compression.pruner.patterns.nxm.PytorchPatternNxM", false]], "pytorchprogressivepruner (class in neural_compressor.compression.pruner.pruners.progressive)": [[186, "neural_compressor.compression.pruner.pruners.progressive.PytorchProgressivePruner", false]], "pytorchretrainfreepruner (class in neural_compressor.compression.pruner.pruners.retrain_free)": [[187, "neural_compressor.compression.pruner.pruners.retrain_free.PytorchRetrainFreePruner", false]], "pytorchselfknowledgedistillationloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchSelfKnowledgeDistillationLoss", false]], "pytorchselfknowledgedistillationlosswrapper (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchSelfKnowledgeDistillationLossWrapper", false]], "pytorchsgd (class in neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.PyTorchSGD", false]], "pytorchtransforms (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PyTorchTransforms", false]], "pytorchtranspose (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PyTorchTranspose", false]], "qactivationoperator (class in neural_compressor.adaptor.ox_utils.operators.activation)": [[5, "neural_compressor.adaptor.ox_utils.operators.activation.QActivationOperator", false]], "qargmaxoperator (class in neural_compressor.adaptor.ox_utils.operators.argmax)": [[6, "neural_compressor.adaptor.ox_utils.operators.argmax.QArgMaxOperator", false]], "qat_clone_function() (in module neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper)": [[101, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper.qat_clone_function", false]], "qattentionoperator (class in neural_compressor.adaptor.ox_utils.operators.attention)": [[7, "neural_compressor.adaptor.ox_utils.operators.attention.QAttentionOperator", false]], "qavgpool2d (class in neural_compressor.tensorflow.keras.layers.pool2d)": [[297, "neural_compressor.tensorflow.keras.layers.pool2d.QAvgPool2D", false]], "qbinaryoperator (class in neural_compressor.adaptor.ox_utils.operators.binary_op)": [[8, "neural_compressor.adaptor.ox_utils.operators.binary_op.QBinaryOperator", false]], "qconcatoperator (class in neural_compressor.adaptor.ox_utils.operators.concat)": [[9, "neural_compressor.adaptor.ox_utils.operators.concat.QConcatOperator", false]], "qconv2d (class in neural_compressor.tensorflow.keras.layers.conv2d)": [[292, "neural_compressor.tensorflow.keras.layers.conv2d.QConv2D", false]], "qconvoperator (class in neural_compressor.adaptor.ox_utils.operators.conv)": [[10, "neural_compressor.adaptor.ox_utils.operators.conv.QConvOperator", false]], "qdense (class in neural_compressor.tensorflow.keras.layers.dense)": [[293, "neural_compressor.tensorflow.keras.layers.dense.QDense", false]], "qdepthwiseconv2d (class in neural_compressor.tensorflow.keras.layers.depthwise_conv2d)": [[294, "neural_compressor.tensorflow.keras.layers.depthwise_conv2d.QDepthwiseConv2D", false]], "qdirectoperator (class in neural_compressor.adaptor.ox_utils.operators.direct_q8)": [[11, "neural_compressor.adaptor.ox_utils.operators.direct_q8.QDirectOperator", false]], "qdq_quantize() (in module neural_compressor.torch.algorithms.smooth_quant.smooth_quant)": [[412, "neural_compressor.torch.algorithms.smooth_quant.smooth_quant.qdq_quantize", false]], "qdq_tensor() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.qdq_tensor", false]], "qdq_weight_actor() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.qdq_weight_actor", false]], "qdq_weight_asym() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.qdq_weight_asym", false]], "qdq_weight_sym() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.qdq_weight_sym", false]], "qdqlayer (class in neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.QDQLayer", false]], "qdqlayer (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.QDQLayer", false]], "qembedlayernormalizationoperator (class in neural_compressor.adaptor.ox_utils.operators.embed_layernorm)": [[12, "neural_compressor.adaptor.ox_utils.operators.embed_layernorm.QEmbedLayerNormalizationOperator", false]], "qgatheroperator (class in neural_compressor.adaptor.ox_utils.operators.gather)": [[13, "neural_compressor.adaptor.ox_utils.operators.gather.QGatherOperator", false]], "qgemmoperator (class in neural_compressor.adaptor.ox_utils.operators.gemm)": [[15, "neural_compressor.adaptor.ox_utils.operators.gemm.QGemmOperator", false]], "qglobalaveragepooloperator (class in neural_compressor.adaptor.ox_utils.operators.gavgpool)": [[14, "neural_compressor.adaptor.ox_utils.operators.gavgpool.QGlobalAveragePoolOperator", false]], "qmatmuloperator (class in neural_compressor.adaptor.ox_utils.operators.matmul)": [[18, "neural_compressor.adaptor.ox_utils.operators.matmul.QMatMulOperator", false]], "qmaxpool2d (class in neural_compressor.tensorflow.keras.layers.pool2d)": [[297, "neural_compressor.tensorflow.keras.layers.pool2d.QMaxPool2D", false]], "qmaxpooloperator (class in neural_compressor.adaptor.ox_utils.operators.maxpool)": [[19, "neural_compressor.adaptor.ox_utils.operators.maxpool.QMaxPoolOperator", false]], "qop_registry() (in module neural_compressor.adaptor.ox_utils.operators.ops)": [[21, "neural_compressor.adaptor.ox_utils.operators.ops.qop_registry", false]], "qoperator (class in neural_compressor.adaptor.ox_utils.operators.ops)": [[21, "neural_compressor.adaptor.ox_utils.operators.ops.QOperator", false]], "qpadoperator (class in neural_compressor.adaptor.ox_utils.operators.pad)": [[22, "neural_compressor.adaptor.ox_utils.operators.pad.QPadOperator", false]], "qpooloperator (class in neural_compressor.adaptor.ox_utils.operators.pooling)": [[23, "neural_compressor.adaptor.ox_utils.operators.pooling.QPoolOperator", false]], "qresizeoperator (class in neural_compressor.adaptor.ox_utils.operators.resize)": [[25, "neural_compressor.adaptor.ox_utils.operators.resize.QResizeOperator", false]], "qseparableconv2d (class in neural_compressor.tensorflow.keras.layers.separable_conv2d)": [[298, "neural_compressor.tensorflow.keras.layers.separable_conv2d.QSeparableConv2D", false]], "qsplitoperator (class in neural_compressor.adaptor.ox_utils.operators.split)": [[26, "neural_compressor.adaptor.ox_utils.operators.split.QSplitOperator", false]], "qtensor (class in neural_compressor.torch.algorithms.weight_only.hqq.qtensor)": [[426, "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensor", false]], "qtensorconfig (class in neural_compressor.torch.algorithms.weight_only.hqq.config)": [[422, "neural_compressor.torch.algorithms.weight_only.hqq.config.QTensorConfig", false]], "qtensormetainfo (class in neural_compressor.torch.algorithms.weight_only.hqq.qtensor)": [[426, "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensorMetaInfo", false]], "quant_dequant_data() (in module neural_compressor.adaptor.ox_utils.smooth_quant)": [[29, "neural_compressor.adaptor.ox_utils.smooth_quant.quant_dequant_data", false]], "quant_dequant_w_v1() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.quant_dequant_w_v1", false]], "quant_dequant_x_v1() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.quant_dequant_x_v1", false]], "quant_mode_from_pattern() (in module neural_compressor.strategy.utils.tuning_space)": [[278, "neural_compressor.strategy.utils.tuning_space.quant_mode_from_pattern", false]], "quant_tensor() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.quant_tensor", false]], "quant_tensor() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.quant_tensor", false]], "quant_weight_w_scale() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.quant_weight_w_scale", false]], "quantformat (class in neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.QuantFormat", false]], "quantizationawaretrainingcallbacks (class in neural_compressor.compression.callbacks)": [[162, "neural_compressor.compression.callbacks.QuantizationAwareTrainingCallbacks", false]], "quantizationawaretrainingconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.QuantizationAwareTrainingConfig", false]], "quantizationmethod (class in neural_compressor.transformers.utils.quantization_config)": [[452, "neural_compressor.transformers.utils.quantization_config.QuantizationMethod", false]], "quantizationmode (class in neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.QuantizationMode", false]], "quantize() (in module neural_compressor.torch.quantization.quantize)": [[442, "neural_compressor.torch.quantization.quantize.quantize", false]], "quantize_4bit() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.quantize_4bit", false]], "quantize_data() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.quantize_data", false]], "quantize_data_per_channel() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.quantize_data_per_channel", false]], "quantize_data_with_scale_zero() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.quantize_data_with_scale_zero", false]], "quantize_elemwise_op() (in module neural_compressor.torch.algorithms.mx_quant.utils)": [[404, "neural_compressor.torch.algorithms.mx_quant.utils.quantize_elemwise_op", false]], "quantize_model() (in module neural_compressor.tensorflow.quantization.quantize)": [[305, "neural_compressor.tensorflow.quantization.quantize.quantize_model", false]], "quantize_model_with_single_config() (in module neural_compressor.tensorflow.quantization.quantize)": [[305, "neural_compressor.tensorflow.quantization.quantize.quantize_model_with_single_config", false]], "quantize_mx_op() (in module neural_compressor.torch.algorithms.mx_quant.utils)": [[404, "neural_compressor.torch.algorithms.mx_quant.utils.quantize_mx_op", false]], "quantize_nparray() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.quantize_nparray", false]], "quantize_sym_model() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.quantize_sym_model", false]], "quantizeconfig (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config)": [[100, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config.QuantizeConfig", false]], "quantizedinitializer (class in neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.QuantizedInitializer", false]], "quantizedinput (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.QuantizedInput", false]], "quantizedrnnconverter (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert)": [[84, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert.QuantizedRNNConverter", false]], "quantizedvalue (class in neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.QuantizedValue", false]], "quantizedvaluetype (class in neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.QuantizedValueType", false]], "quantizegraphbase (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base)": [[117, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base.QuantizeGraphBase", false]], "quantizegraphbase (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base)": [[372, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base.QuantizeGraphBase", false]], "quantizegraphforintel (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu)": [[121, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu.QuantizeGraphForIntel", false]], "quantizegraphforintel (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu)": [[376, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu.QuantizeGraphForIntel", false]], "quantizegraphhelper (class in neural_compressor.adaptor.tf_utils.quantize_graph_common)": [[124, "neural_compressor.adaptor.tf_utils.quantize_graph_common.QuantizeGraphHelper", false]], "quantizegraphhelper (class in neural_compressor.tensorflow.quantization.utils.quantize_graph_common)": [[379, "neural_compressor.tensorflow.quantization.utils.quantize_graph_common.QuantizeGraphHelper", false]], "quantizelayeradd (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add)": [[104, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add.QuantizeLayerAdd", false]], "quantizelayerbase (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base)": [[105, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base.QuantizeLayerBase", false]], "quantizelayerbatchnormalization (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn)": [[106, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn.QuantizeLayerBatchNormalization", false]], "quantizenodebase (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base)": [[117, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base.QuantizeNodeBase", false]], "quantizenodebase (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base)": [[372, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base.QuantizeNodeBase", false]], "quantizer (class in neural_compressor.adaptor.ox_utils.quantizer)": [[28, "neural_compressor.adaptor.ox_utils.quantizer.Quantizer", false]], "quantizer (class in neural_compressor.torch.algorithms.base_algorithm)": [[392, "neural_compressor.torch.algorithms.base_algorithm.Quantizer", false]], "quantizer (class in neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.Quantizer", false]], "quantizewrapper (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper)": [[107, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper.QuantizeWrapper", false]], "quantizewrapperbase (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper)": [[107, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper.QuantizeWrapperBase", false]], "quantoptions (class in neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.QuantOptions", false]], "quanttype (class in neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.QuantType", false]], "quanttype (class in neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.QuantType", false]], "query_quantizable_nodes() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.query_quantizable_nodes", false]], "randomcroptftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomCropTFTransform", false]], "randomcroptransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomCropTransform", false]], "randomhorizontalflip (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomHorizontalFlip", false]], "randomresizedcropmxnettransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomResizedCropMXNetTransform", false]], "randomresizedcroppytorchtransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomResizedCropPytorchTransform", false]], "randomresizedcroptftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomResizedCropTFTransform", false]], "randomresizedcroptransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomResizedCropTransform", false]], "randomtunestrategy (class in neural_compressor.strategy.random)": [[273, "neural_compressor.strategy.random.RandomTuneStrategy", false]], "randomverticalflip (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomVerticalFlip", false]], "rawgptquantizer (class in neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.RAWGPTQuantizer", false]], "read_graph() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.read_graph", false]], "read_graph() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.read_graph", false]], "read_squad_examples() (in module neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.read_squad_examples", false]], "read_tensorflow_node_attrs() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.read_tensorflow_node_attrs", false]], "recipe (neural_compressor.compression.pruner.model_slim.pattern_analyzer.recipesearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.RecipeSearcher.recipe", false]], "recipesearcher (class in neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.RecipeSearcher", false]], "reconstruct_saved_model() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.reconstruct_saved_model", false]], "reconstruct_saved_model() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.reconstruct_saved_model", false]], "record_output() (in module neural_compressor.compression.distillation.utility)": [[166, "neural_compressor.compression.distillation.utility.record_output", false]], "recover() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.recover", false]], "recover_forward() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.recover_forward", false]], "recover_model_from_json() (in module neural_compressor.torch.algorithms.smooth_quant.save_load)": [[411, "neural_compressor.torch.algorithms.smooth_quant.save_load.recover_model_from_json", false]], "recover_model_from_json() (in module neural_compressor.utils.pytorch)": [[465, "neural_compressor.utils.pytorch.recover_model_from_json", false]], "reduceminmaxoperator (class in neural_compressor.adaptor.ox_utils.operators.reduce)": [[24, "neural_compressor.adaptor.ox_utils.operators.reduce.ReduceMinMaxOperator", false]], "reduceoperator (class in neural_compressor.adaptor.ox_utils.operators.reduce)": [[24, "neural_compressor.adaptor.ox_utils.operators.reduce.ReduceOperator", false]], "reg (neural_compressor.compression.pruner.pruners.basic.kerasbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.KerasBasicPruner.reg", false]], "reg (neural_compressor.compression.pruner.pruners.basic.pytorchbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.PytorchBasicPruner.reg", false]], "reg (neural_compressor.compression.pruner.pruners.block_mask.pytorchblockmaskpruner attribute)": [[182, "neural_compressor.compression.pruner.pruners.block_mask.PytorchBlockMaskPruner.reg", false]], "reg (neural_compressor.compression.pruner.pruners.retrain_free.pytorchretrainfreepruner attribute)": [[187, "neural_compressor.compression.pruner.pruners.retrain_free.PytorchRetrainFreePruner.reg", false]], "reg_terms (neural_compressor.compression.pruner.regs.grouplasso attribute)": [[189, "neural_compressor.compression.pruner.regs.GroupLasso.reg_terms", false]], "register_accelerator() (in module neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.register_accelerator", false]], "register_algo() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.register_algo", false]], "register_algo() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.register_algo", false]], "register_autotune() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.register_autotune", false]], "register_config() (in module neural_compressor.common.base_config)": [[152, "neural_compressor.common.base_config.register_config", false]], "register_criterion() (in module neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.register_criterion", false]], "register_criterion() (in module neural_compressor.compression.pruner.tf_criteria)": [[191, "neural_compressor.compression.pruner.tf_criteria.register_criterion", false]], "register_customer_metric() (in module neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.register_customer_metric", false]], "register_pack_func() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.register_pack_func", false]], "register_pattern() (in module neural_compressor.compression.pruner.patterns.base)": [[175, "neural_compressor.compression.pruner.patterns.base.register_pattern", false]], "register_pruner() (in module neural_compressor.compression.pruner.pruners.base)": [[180, "neural_compressor.compression.pruner.pruners.base.register_pruner", false]], "register_pruning() (in module neural_compressor.compression.pruner.pruning)": [[188, "neural_compressor.compression.pruner.pruning.register_pruning", false]], "register_reg() (in module neural_compressor.compression.pruner.regs)": [[189, "neural_compressor.compression.pruner.regs.register_reg", false]], "register_scheduler() (in module neural_compressor.compression.pruner.schedulers)": [[190, "neural_compressor.compression.pruner.schedulers.register_scheduler", false]], "register_supported_configs_for_fwk() (in module neural_compressor.common.base_config)": [[152, "neural_compressor.common.base_config.register_supported_configs_for_fwk", false]], "register_weight_hooks() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.register_weight_hooks", false]], "removableactivationoperator (class in neural_compressor.adaptor.ox_utils.operators.activation)": [[5, "neural_compressor.adaptor.ox_utils.operators.activation.RemovableActivationOperator", false]], "remove_init_from_model_input() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.remove_init_from_model_input", false]], "removetrainingnodesoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes)": [[65, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes.RemoveTrainingNodesOptimizer", false]], "removetrainingnodesoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes)": [[337, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes.RemoveTrainingNodesOptimizer", false]], "renamebatchnormoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm)": [[66, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm.RenameBatchNormOptimizer", false]], "renamebatchnormoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm)": [[338, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm.RenameBatchNormOptimizer", false]], "replace_forward() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.replace_forward", false]], "replace_pattern (neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.patternpair attribute)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.PatternPair.replace_pattern", false]], "replacement_fn() (in module neural_compressor.torch.algorithms.weight_only.hqq.quantizer)": [[427, "neural_compressor.torch.algorithms.weight_only.hqq.quantizer.replacement_fn", false]], "rerangequantizedconcat (class in neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat)": [[132, "neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat.RerangeQuantizedConcat", false]], "rerangequantizedconcat (class in neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat)": [[384, "neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat.RerangeQuantizedConcat", false]], "rescalekeraspretraintransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RescaleKerasPretrainTransform", false]], "rescaletftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RescaleTFTransform", false]], "rescaletransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RescaleTransform", false]], "reset_none_to_default() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.reset_none_to_default", false]], "reshape_in_channel_to_last() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.reshape_in_channel_to_last", false]], "reshape_scale_as_input() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.reshape_scale_as_input", false]], "reshape_scale_as_weight() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.reshape_scale_as_weight", false]], "resizemxnettransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ResizeMXNetTransform", false]], "resizeoperator (class in neural_compressor.adaptor.ox_utils.operators.resize)": [[25, "neural_compressor.adaptor.ox_utils.operators.resize.ResizeOperator", false]], "resizepytorchtransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ResizePytorchTransform", false]], "resizetftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ResizeTFTransform", false]], "resizetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ResizeTransform", false]], "resizewithaspectratio (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.ResizeWithAspectRatio", false]], "resizewithratio (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ResizeWithRatio", false]], "retrainfreecriterion (class in neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.RetrainFreeCriterion", false]], "retrainfreepruning (class in neural_compressor.compression.pruner.pruning)": [[188, "neural_compressor.compression.pruner.pruning.RetrainFreePruning", false]], "reverted_data_type() (in module neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.reverted_data_type", false]], "rmse (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.RMSE", false]], "roc (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.ROC", false]], "roundingmode (class in neural_compressor.torch.algorithms.mx_quant.utils)": [[404, "neural_compressor.torch.algorithms.mx_quant.utils.RoundingMode", false]], "rtn_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.rtn_entry", false]], "rtn_quantize() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.rtn_quantize", false]], "rtnconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.RTNConfig", false]], "rtnconfig (class in neural_compressor.transformers.utils.quantization_config)": [[452, "neural_compressor.transformers.utils.quantization_config.RtnConfig", false]], "rtnquantizer (class in neural_compressor.torch.algorithms.weight_only.rtn)": [[430, "neural_compressor.torch.algorithms.weight_only.rtn.RTNQuantizer", false]], "run_forward() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.run_forward", false]], "run_instance() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.run_instance", false]], "run_multi_instance_command() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.run_multi_instance_command", false]], "sample (neural_compressor.metric.metric.accuracy attribute)": [[234, "neural_compressor.metric.metric.Accuracy.sample", false]], "sample (neural_compressor.metric.metric.loss attribute)": [[234, "neural_compressor.metric.metric.Loss.sample", false]], "sampler (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.Sampler", false]], "sampler (class in neural_compressor.data.dataloaders.sampler)": [[207, "neural_compressor.data.dataloaders.sampler.Sampler", false]], "save() (in module neural_compressor.compression.pruner)": [[170, "neural_compressor.compression.pruner.save", false]], "save() (in module neural_compressor.torch.algorithms.pt2e_quant.save_load)": [[408, "neural_compressor.torch.algorithms.pt2e_quant.save_load.save", false]], "save() (in module neural_compressor.torch.algorithms.static_quant.save_load)": [[415, "neural_compressor.torch.algorithms.static_quant.save_load.save", false]], "save() (in module neural_compressor.torch.algorithms.weight_only.save_load)": [[431, "neural_compressor.torch.algorithms.weight_only.save_load.save", false]], "save_config_mapping() (in module neural_compressor.common.utils.save_load)": [[160, "neural_compressor.common.utils.save_load.save_config_mapping", false]], "save_for_huggingface_upstream() (in module neural_compressor.utils.load_huggingface)": [[462, "neural_compressor.utils.load_huggingface.save_for_huggingface_upstream", false]], "save_protobuf() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.save_protobuf", false]], "saved_model_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.saved_model_session", false]], "saved_model_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.saved_model_session", false]], "scalepropagationtransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation)": [[85, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation.ScaleProPagationTransformer", false]], "scalepropagationtransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation)": [[355, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation.ScaleProPagationTransformer", false]], "scheduler (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.scheduler", false]], "scheduler (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.scheduler", false]], "scheduler (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.scheduler", false]], "scheduler (neural_compressor.compression.pruner.pruners.basic.kerasbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.KerasBasicPruner.scheduler", false]], "scheduler (neural_compressor.compression.pruner.pruners.basic.pytorchbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.PytorchBasicPruner.scheduler", false]], "scheduler (neural_compressor.compression.pruner.pruners.block_mask.pytorchblockmaskpruner attribute)": [[182, "neural_compressor.compression.pruner.pruners.block_mask.PytorchBlockMaskPruner.scheduler", false]], "scheduler (neural_compressor.compression.pruner.pruners.retrain_free.pytorchretrainfreepruner attribute)": [[187, "neural_compressor.compression.pruner.pruners.retrain_free.PytorchRetrainFreePruner.scheduler", false]], "scores (neural_compressor.compression.pruner.criteria.blockmaskcriterion attribute)": [[169, "neural_compressor.compression.pruner.criteria.BlockMaskCriterion.scores", false]], "scores (neural_compressor.compression.pruner.criteria.gradientcriterion attribute)": [[169, "neural_compressor.compression.pruner.criteria.GradientCriterion.scores", false]], "scores (neural_compressor.compression.pruner.criteria.magnitudecriterion attribute)": [[169, "neural_compressor.compression.pruner.criteria.MagnitudeCriterion.scores", false]], "scores (neural_compressor.compression.pruner.criteria.pruningcriterion attribute)": [[169, "neural_compressor.compression.pruner.criteria.PruningCriterion.scores", false]], "scores (neural_compressor.compression.pruner.criteria.retrainfreecriterion attribute)": [[169, "neural_compressor.compression.pruner.criteria.RetrainFreeCriterion.scores", false]], "scores (neural_compressor.compression.pruner.criteria.snipcriterion attribute)": [[169, "neural_compressor.compression.pruner.criteria.SnipCriterion.scores", false]], "scores (neural_compressor.compression.pruner.criteria.snipmomentumcriterion attribute)": [[169, "neural_compressor.compression.pruner.criteria.SnipMomentumCriterion.scores", false]], "scores (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.scores", false]], "scores (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.scores", false]], "scores (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.scores", false]], "scores (neural_compressor.compression.pruner.tf_criteria.magnitudecriterion attribute)": [[191, "neural_compressor.compression.pruner.tf_criteria.MagnitudeCriterion.scores", false]], "scores (neural_compressor.compression.pruner.tf_criteria.pruningcriterion attribute)": [[191, "neural_compressor.compression.pruner.tf_criteria.PruningCriterion.scores", false]], "search_clip() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.search_clip", false]], "search_pattern (neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.patternpair attribute)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.PatternPair.search_pattern", false]], "searching_results (neural_compressor.compression.pruner.model_slim.pattern_analyzer.jitbasicsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher.searching_results", false]], "searching_results (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.searching_results", false]], "searching_results (neural_compressor.compression.pruner.model_slim.pattern_analyzer.recipesearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.RecipeSearcher.searching_results", false]], "selfknowledgedistillationloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.SelfKnowledgeDistillationLoss", false]], "selfknowledgedistillationlossconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.SelfKnowledgeDistillationLossConfig", false]], "selfmhasearcher (class in neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.SelfMHASearcher", false]], "seqtype (class in neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.SeqType", false]], "sequentialsampler (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.SequentialSampler", false]], "sequentialsampler (class in neural_compressor.data.dataloaders.sampler)": [[207, "neural_compressor.data.dataloaders.sampler.SequentialSampler", false]], "sequentialsampler (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.SequentialSampler", false]], "set_all_env_var() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.set_all_env_var", false]], "set_cores_for_instance() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.set_cores_for_instance", false]], "set_eager_execution() (in module neural_compressor.profiling.profiler.tensorflow_profiler.utils)": [[261, "neural_compressor.profiling.profiler.tensorflow_profiler.utils.set_eager_execution", false]], "set_env_var() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.set_env_var", false]], "set_module() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.set_module", false]], "set_module() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.set_module", false]], "set_module() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.set_module", false]], "set_module() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.set_module", false]], "set_name() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.set_name", false]], "set_random_seed() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.set_random_seed", false]], "set_random_seed() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.set_random_seed", false]], "set_resume_from() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.set_resume_from", false]], "set_resume_from() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.set_resume_from", false]], "set_tensorboard() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.set_tensorboard", false]], "set_tensorboard() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.set_tensorboard", false]], "set_workspace() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.set_workspace", false]], "set_workspace() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.set_workspace", false]], "shape (neural_compressor.torch.algorithms.weight_only.hqq.qtensor.qtensormetainfo attribute)": [[426, "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensorMetaInfo.shape", false]], "shareqdqforitexypatternoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern)": [[94, "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern.ShareQDQForItexYPatternOptimizer", false]], "shareqdqforitexypatternoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern)": [[359, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern.ShareQDQForItexYPatternOptimizer", false]], "show_memory_info() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.show_memory_info", false]], "sigopttunestrategy (class in neural_compressor.contrib.strategy.sigopt)": [[198, "neural_compressor.contrib.strategy.sigopt.SigOptTuneStrategy", false]], "simple_inference() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.simple_inference", false]], "simple_inference() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.simple_inference", false]], "simple_progress_bar() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.simple_progress_bar", false]], "singleton() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.singleton", false]], "singleton() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.singleton", false]], "singleton() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.singleton", false]], "slim_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.slim_session", false]], "slim_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.slim_session", false]], "smooth_distribution() (in module neural_compressor.adaptor.ox_utils.calibrator)": [[3, "neural_compressor.adaptor.ox_utils.calibrator.smooth_distribution", false]], "smooth_quant_entry() (in module neural_compressor.tensorflow.quantization.algorithm_entry)": [[301, "neural_compressor.tensorflow.quantization.algorithm_entry.smooth_quant_entry", false]], "smooth_quant_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.smooth_quant_entry", false]], "smoothquant (class in neural_compressor.algorithm.smooth_quant)": [[149, "neural_compressor.algorithm.smooth_quant.SmoothQuant", false]], "smoothquant (class in neural_compressor.tensorflow.algorithms.smoother.core)": [[284, "neural_compressor.tensorflow.algorithms.smoother.core.SmoothQuant", false]], "smoothquantcalibration (class in neural_compressor.adaptor.tf_utils.smooth_quant_calibration)": [[125, "neural_compressor.adaptor.tf_utils.smooth_quant_calibration.SmoothQuantCalibration", false]], "smoothquantcalibration (class in neural_compressor.tensorflow.algorithms.smoother.calibration)": [[283, "neural_compressor.tensorflow.algorithms.smoother.calibration.SmoothQuantCalibration", false]], "smoothquantcalibrationllm (class in neural_compressor.adaptor.tf_utils.smooth_quant_calibration)": [[125, "neural_compressor.adaptor.tf_utils.smooth_quant_calibration.SmoothQuantCalibrationLLM", false]], "smoothquantcalibrationllm (class in neural_compressor.tensorflow.algorithms.smoother.calibration)": [[283, "neural_compressor.tensorflow.algorithms.smoother.calibration.SmoothQuantCalibrationLLM", false]], "smoothquantconfig (class in neural_compressor.tensorflow.quantization.config)": [[303, "neural_compressor.tensorflow.quantization.config.SmoothQuantConfig", false]], "smoothquantconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.SmoothQuantConfig", false]], "smoothquantquantizer (class in neural_compressor.torch.algorithms.smooth_quant.smooth_quant)": [[412, "neural_compressor.torch.algorithms.smooth_quant.smooth_quant.SmoothQuantQuantizer", false]], "smoothquantsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.SmoothQuantSampler", false]], "smoothquantscaler (class in neural_compressor.adaptor.tf_utils.smooth_quant_scaler)": [[126, "neural_compressor.adaptor.tf_utils.smooth_quant_scaler.SmoothQuantScaler", false]], "smoothquantscaler (class in neural_compressor.tensorflow.algorithms.smoother.scaler)": [[286, "neural_compressor.tensorflow.algorithms.smoother.scaler.SmoothQuantScaler", false]], "smoothquantscalerllm (class in neural_compressor.adaptor.tf_utils.smooth_quant_scaler)": [[126, "neural_compressor.adaptor.tf_utils.smooth_quant_scaler.SmoothQuantScalerLLM", false]], "smoothquantscalerllm (class in neural_compressor.tensorflow.algorithms.smoother.scaler)": [[286, "neural_compressor.tensorflow.algorithms.smoother.scaler.SmoothQuantScalerLLM", false]], "snipcriterion (class in neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.SnipCriterion", false]], "snipmomentumcriterion (class in neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.SnipMomentumCriterion", false]], "sparsedummydataset (class in neural_compressor.data.datasets.dummy_dataset_v2)": [[213, "neural_compressor.data.datasets.dummy_dataset_v2.SparseDummyDataset", false]], "sparsegptpruning (class in neural_compressor.compression.pruner.pruning)": [[188, "neural_compressor.compression.pruner.pruning.SparseGPTPruning", false]], "split_shared_bias() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.split_shared_bias", false]], "splitoperator (class in neural_compressor.adaptor.ox_utils.operators.split)": [[26, "neural_compressor.adaptor.ox_utils.operators.split.SplitOperator", false]], "splitsharedinputoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input)": [[67, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input.SplitSharedInputOptimizer", false]], "splitsharedinputoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input)": [[339, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input.SplitSharedInputOptimizer", false]], "sqlinearwrapper (class in neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.SQLinearWrapper", false]], "squadexample (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.SquadExample", false]], "squadf1 (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.SquadF1", false]], "start_step (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.start_step", false]], "start_step (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.start_step", false]], "start_step (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.start_step", false]], "static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcher.static_graph", false]], "static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearchertf attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcherTF.static_graph", false]], "static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.jitbasicsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher.static_graph", false]], "static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.static_graph", false]], "static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.selfmhasearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.SelfMHASearcher.static_graph", false]], "static_quant_entry() (in module neural_compressor.tensorflow.quantization.algorithm_entry)": [[301, "neural_compressor.tensorflow.quantization.algorithm_entry.static_quant_entry", false]], "static_quant_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.static_quant_entry", false]], "static_quant_export() (in module neural_compressor.utils.export.torch2onnx)": [[459, "neural_compressor.utils.export.torch2onnx.static_quant_export", false]], "staticquantconfig (class in neural_compressor.tensorflow.keras.quantization.config)": [[299, "neural_compressor.tensorflow.keras.quantization.config.StaticQuantConfig", false]], "staticquantconfig (class in neural_compressor.tensorflow.quantization.config)": [[303, "neural_compressor.tensorflow.quantization.config.StaticQuantConfig", false]], "staticquantconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.StaticQuantConfig", false]], "staticquantquantizer (class in neural_compressor.torch.algorithms.static_quant.static_quant)": [[416, "neural_compressor.torch.algorithms.static_quant.static_quant.StaticQuantQuantizer", false]], "statistics (class in neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.Statistics", false]], "statistics (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.Statistics", false]], "str2array() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.str2array", false]], "strategy_registry() (in module neural_compressor.strategy.strategy)": [[274, "neural_compressor.strategy.strategy.strategy_registry", false]], "strip_equivalent_nodes() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.strip_equivalent_nodes", false]], "strip_equivalent_nodes() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.strip_equivalent_nodes", false]], "strip_unused_nodes() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.strip_unused_nodes", false]], "strip_unused_nodes() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.strip_unused_nodes", false]], "stripequivalentnodesoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes)": [[68, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes.StripEquivalentNodesOptimizer", false]], "stripequivalentnodesoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes)": [[340, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes.StripEquivalentNodesOptimizer", false]], "stripunusednodesoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes)": [[69, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes.StripUnusedNodesOptimizer", false]], "stripunusednodesoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes)": [[341, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes.StripUnusedNodesOptimizer", false]], "styletransferdataset (class in neural_compressor.data.datasets.style_transfer_dataset)": [[216, "neural_compressor.data.datasets.style_transfer_dataset.StyleTransferDataset", false]], "sum (neural_compressor.metric.metric.loss attribute)": [[234, "neural_compressor.metric.metric.Loss.sum", false]], "summary_benchmark() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.summary_benchmark", false]], "summary_latency_throughput() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.summary_latency_throughput", false]], "switchoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer)": [[70, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer.SwitchOptimizer", false]], "switchoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer)": [[342, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer.SwitchOptimizer", false]], "symbol_re (neural_compressor.metric.bleu.unicoderegex attribute)": [[227, "neural_compressor.metric.bleu.UnicodeRegex.symbol_re", false]], "symbolic_trace() (in module neural_compressor.adaptor.torch_utils.symbolic_trace)": [[144, "neural_compressor.adaptor.torch_utils.symbolic_trace.symbolic_trace", false]], "target_layers (neural_compressor.compression.pruner.model_slim.pattern_analyzer.jitbasicsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher.target_layers", false]], "target_layers (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.target_layers", false]], "target_op_lut (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.target_op_lut", false]], "target_sparsity (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.target_sparsity", false]], "target_sparsity (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.target_sparsity", false]], "target_sparsity (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.target_sparsity", false]], "target_sparsity_ratio (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.target_sparsity_ratio", false]], "target_sparsity_ratio (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.target_sparsity_ratio", false]], "target_sparsity_ratio (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.target_sparsity_ratio", false]], "targets (neural_compressor.compression.pruner.model_slim.pattern_analyzer.recipesearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.RecipeSearcher.targets", false]], "targetspace (class in neural_compressor.strategy.bayesian)": [[266, "neural_compressor.strategy.bayesian.TargetSpace", false]], "tensorcollector (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.TensorCollector", false]], "tensorflow (class in neural_compressor.config)": [[195, "neural_compressor.config.TensorFlow", false]], "tensorflow (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.Tensorflow", false]], "tensorflow_itexadaptor (class in neural_compressor.adaptor.tensorflow)": [[32, "neural_compressor.adaptor.tensorflow.Tensorflow_ITEXAdaptor", false]], "tensorflow_itexadaptor (class in neural_compressor.tensorflow.algorithms.static_quant.tensorflow)": [[289, "neural_compressor.tensorflow.algorithms.static_quant.tensorflow.Tensorflow_ITEXAdaptor", false]], "tensorflowadam (class in neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.TensorFlowAdam", false]], "tensorflowadamw (class in neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.TensorFlowAdamW", false]], "tensorflowadaptor (class in neural_compressor.adaptor.tensorflow)": [[32, "neural_compressor.adaptor.tensorflow.TensorFlowAdaptor", false]], "tensorflowadaptor (class in neural_compressor.tensorflow.algorithms.static_quant.tensorflow)": [[289, "neural_compressor.tensorflow.algorithms.static_quant.tensorflow.TensorFlowAdaptor", false]], "tensorflowbasemodel (class in neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.TensorflowBaseModel", false]], "tensorflowbasemodel (class in neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.TensorflowBaseModel", false]], "tensorflowbertdataloader (class in neural_compressor.data.dataloaders.tensorflow_dataloader)": [[208, "neural_compressor.data.dataloaders.tensorflow_dataloader.TensorflowBertDataLoader", false]], "tensorflowbertdataset (class in neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.TensorflowBertDataset", false]], "tensorflowcheckpointmodel (class in neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.TensorflowCheckpointModel", false]], "tensorflowcheckpointmodel (class in neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.TensorflowCheckpointModel", false]], "tensorflowcifar10 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowCIFAR10", false]], "tensorflowcifar100 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowCIFAR100", false]], "tensorflowcocomap (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.TensorflowCOCOMAP", false]], "tensorflowconfig (class in neural_compressor.tensorflow.algorithms.static_quant.tensorflow)": [[289, "neural_compressor.tensorflow.algorithms.static_quant.tensorflow.TensorFlowConfig", false]], "tensorflowconfigconverter (class in neural_compressor.tensorflow.algorithms.static_quant.tensorflow)": [[289, "neural_compressor.tensorflow.algorithms.static_quant.tensorflow.TensorflowConfigConverter", false]], "tensorflowcriterions (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.TensorflowCriterions", false]], "tensorflowcroptoboundingbox (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowCropToBoundingBox", false]], "tensorflowcrossentropyloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.TensorFlowCrossEntropyLoss", false]], "tensorflowdataloader (class in neural_compressor.data.dataloaders.tensorflow_dataloader)": [[208, "neural_compressor.data.dataloaders.tensorflow_dataloader.TensorflowDataLoader", false]], "tensorflowdatasets (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowDatasets", false]], "tensorflowfashionmnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowFashionMNIST", false]], "tensorflowfilters (class in neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.TensorflowFilters", false]], "tensorflowglobalconfig (class in neural_compressor.tensorflow.utils.model)": [[389, "neural_compressor.tensorflow.utils.model.TensorflowGlobalConfig", false]], "tensorflowimagenetdataset (class in neural_compressor.data.datasets.imagenet_dataset)": [[214, "neural_compressor.data.datasets.imagenet_dataset.TensorflowImagenetDataset", false]], "tensorflowimagenetraw (class in neural_compressor.data.datasets.imagenet_dataset)": [[214, "neural_compressor.data.datasets.imagenet_dataset.TensorflowImagenetRaw", false]], "tensorflowimagerecord (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowImageRecord", false]], "tensorflowknowledgedistillationloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.TensorflowKnowledgeDistillationLoss", false]], "tensorflowknowledgedistillationlossexternal (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.TensorflowKnowledgeDistillationLossExternal", false]], "tensorflowknowledgedistillationlosswrapper (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.TensorflowKnowledgeDistillationLossWrapper", false]], "tensorflowllmmodel (class in neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.TensorflowLLMModel", false]], "tensorflowllmmodel (class in neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.TensorflowLLMModel", false]], "tensorflowmap (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.TensorflowMAP", false]], "tensorflowmetrics (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.TensorflowMetrics", false]], "tensorflowmnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowMNIST", false]], "tensorflowmodel (class in neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.TensorflowModel", false]], "tensorflowmodel (class in neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.TensorflowModel", false]], "tensorflowmodelzoobertdataloader (class in neural_compressor.data.dataloaders.tensorflow_dataloader)": [[208, "neural_compressor.data.dataloaders.tensorflow_dataloader.TensorflowModelZooBertDataLoader", false]], "tensorflowmodelzoobertdataset (class in neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.TensorflowModelZooBertDataset", false]], "tensorflowoptimizers (class in neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.TensorflowOptimizers", false]], "tensorflowparserfactory (class in neural_compressor.profiling.parser.tensorflow_parser.factory)": [[252, "neural_compressor.profiling.parser.tensorflow_parser.factory.TensorFlowParserFactory", false]], "tensorflowprofilingparser (class in neural_compressor.profiling.parser.tensorflow_parser.parser)": [[253, "neural_compressor.profiling.parser.tensorflow_parser.parser.TensorFlowProfilingParser", false]], "tensorflowqatmodel (class in neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.TensorflowQATModel", false]], "tensorflowqdqtoonnxqdqconverter (class in neural_compressor.adaptor.tf_utils.tf2onnx_converter)": [[127, "neural_compressor.adaptor.tf_utils.tf2onnx_converter.TensorflowQDQToOnnxQDQConverter", false]], "tensorflowquery (class in neural_compressor.adaptor.tensorflow)": [[32, "neural_compressor.adaptor.tensorflow.TensorflowQuery", false]], "tensorflowquery (class in neural_compressor.tensorflow.algorithms.static_quant.tensorflow)": [[289, "neural_compressor.tensorflow.algorithms.static_quant.tensorflow.TensorflowQuery", false]], "tensorflowrandomhorizontalflip (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowRandomHorizontalFlip", false]], "tensorflowrandomverticalflip (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowRandomVerticalFlip", false]], "tensorflowresizecropimagenettransform (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.TensorflowResizeCropImagenetTransform", false]], "tensorflowresizewithratio (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowResizeWithRatio", false]], "tensorflowsavedmodelmodel (class in neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.TensorflowSavedModelModel", false]], "tensorflowsavedmodelmodel (class in neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.TensorflowSavedModelModel", false]], "tensorflowsgd (class in neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.TensorFlowSGD", false]], "tensorflowshiftrescale (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.TensorflowShiftRescale", false]], "tensorflowsparsecategoricalcrossentropy (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.TensorFlowSparseCategoricalCrossentropy", false]], "tensorflowtfrecorddataset (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowTFRecordDataset", false]], "tensorflowtopk (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.TensorflowTopK", false]], "tensorflowtransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowTransform", false]], "tensorflowtransforms (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowTransforms", false]], "tensorflowtranspose (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowTranspose", false]], "tensorflowtransposelastchannel (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.TensorflowTransposeLastChannel", false]], "tensorflowvocmap (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.TensorflowVOCMAP", false]], "tensorflowvocrecord (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowVOCRecord", false]], "tensorflowwrapfunction (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowWrapFunction", false]], "teq_quantize_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.teq_quantize_entry", false]], "teqconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.TEQConfig", false]], "teqconfig (class in neural_compressor.transformers.utils.quantization_config)": [[452, "neural_compressor.transformers.utils.quantization_config.TeqConfig", false]], "teqlinearfakequant (class in neural_compressor.adaptor.torch_utils.model_wrapper)": [[142, "neural_compressor.adaptor.torch_utils.model_wrapper.TEQLinearFakeQuant", false]], "teqlinearfakequant (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.TEQLinearFakeQuant", false]], "tequantizer (class in neural_compressor.torch.algorithms.weight_only.teq)": [[432, "neural_compressor.torch.algorithms.weight_only.teq.TEQuantizer", false]], "tf2onnxconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.TF2ONNXConfig", false]], "tf_to_fp32_onnx() (in module neural_compressor.utils.export.tf2onnx)": [[458, "neural_compressor.utils.export.tf2onnx.tf_to_fp32_onnx", false]], "tf_to_int8_onnx() (in module neural_compressor.utils.export.tf2onnx)": [[458, "neural_compressor.utils.export.tf2onnx.tf_to_int8_onnx", false]], "tfdatadataloader (class in neural_compressor.data.dataloaders.tensorflow_dataloader)": [[208, "neural_compressor.data.dataloaders.tensorflow_dataloader.TFDataDataLoader", false]], "tfmodelzoocollecttransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TFModelZooCollectTransform", false]], "tfslimnetsfactory (class in neural_compressor.model.nets_factory)": [[241, "neural_compressor.model.nets_factory.TFSlimNetsFactory", false]], "tfslimnetsfactory (class in neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.TFSlimNetsFactory", false]], "tfsquadv1modelzooposttransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TFSquadV1ModelZooPostTransform", false]], "tfsquadv1posttransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TFSquadV1PostTransform", false]], "time_limit() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.time_limit", false]], "to_numpy() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.to_numpy", false]], "toarray (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ToArray", false]], "tondarraytransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ToNDArrayTransform", false]], "torch2onnxconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.Torch2ONNXConfig", false]], "torch_to_fp32_onnx() (in module neural_compressor.utils.export.torch2onnx)": [[459, "neural_compressor.utils.export.torch2onnx.torch_to_fp32_onnx", false]], "torch_to_int8_onnx() (in module neural_compressor.utils.export.torch2onnx)": [[459, "neural_compressor.utils.export.torch2onnx.torch_to_int8_onnx", false]], "torchbaseconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.TorchBaseConfig", false]], "torchsmoothquant (class in neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.TorchSmoothQuant", false]], "tpetunestrategy (class in neural_compressor.contrib.strategy.tpe)": [[199, "neural_compressor.contrib.strategy.tpe.TpeTuneStrategy", false]], "trace_and_fuse_sub_graph() (in module neural_compressor.adaptor.torch_utils.symbolic_trace)": [[144, "neural_compressor.adaptor.torch_utils.symbolic_trace.trace_and_fuse_sub_graph", false]], "trace_gptq_target_blocks() (in module neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.trace_gptq_target_blocks", false]], "trainableequivalenttransformation (class in neural_compressor.torch.algorithms.weight_only.teq)": [[432, "neural_compressor.torch.algorithms.weight_only.teq.TrainableEquivalentTransformation", false]], "transform_registry() (in module neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.transform_registry", false]], "transformation() (in module neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.transformation", false]], "transformerbasedmodelblockpatterndetector (class in neural_compressor.adaptor.torch_utils.pattern_detector)": [[143, "neural_compressor.adaptor.torch_utils.pattern_detector.TransformerBasedModelBlockPatternDetector", false]], "transformerbasedmodelblockpatterndetector (class in neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.TransformerBasedModelBlockPatternDetector", false]], "transforms (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TRANSFORMS", false], [225, "neural_compressor.data.transforms.transform.Transforms", false]], "transpose (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.Transpose", false]], "trt_env_setup() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.trt_env_setup", false]], "try_loading_keras() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.try_loading_keras", false]], "try_loading_keras() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.try_loading_keras", false]], "tunestrategy (class in neural_compressor.strategy.strategy)": [[274, "neural_compressor.strategy.strategy.TuneStrategy", false]], "tunestrategymeta (class in neural_compressor.strategy.strategy)": [[274, "neural_compressor.strategy.strategy.TuneStrategyMeta", false]], "tuningconfig (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.TuningConfig", false]], "tuningcriterion (class in neural_compressor.config)": [[195, "neural_compressor.config.TuningCriterion", false]], "tuningitem (class in neural_compressor.strategy.utils.tuning_space)": [[278, "neural_compressor.strategy.utils.tuning_space.TuningItem", false]], "tuninglogger (class in neural_compressor.common.utils.logger)": [[159, "neural_compressor.common.utils.logger.TuningLogger", false]], "tuningmonitor (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.TuningMonitor", false]], "tuningorder (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.TuningOrder", false]], "tuningparam (class in neural_compressor.common.tuning_param)": [[156, "neural_compressor.common.tuning_param.TuningParam", false]], "tuningsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.TuningSampler", false]], "tuningspace (class in neural_compressor.strategy.utils.tuning_space)": [[278, "neural_compressor.strategy.utils.tuning_space.TuningSpace", false]], "unarydirect8bitoperator (class in neural_compressor.adaptor.ox_utils.operators.unary_op)": [[27, "neural_compressor.adaptor.ox_utils.operators.unary_op.UnaryDirect8BitOperator", false]], "unaryoperator (class in neural_compressor.adaptor.ox_utils.operators.unary_op)": [[27, "neural_compressor.adaptor.ox_utils.operators.unary_op.UnaryOperator", false]], "unicoderegex (class in neural_compressor.metric.bleu)": [[227, "neural_compressor.metric.bleu.UnicodeRegex", false]], "unpackedweightonlylinearparams (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.UnpackedWeightOnlyLinearParams", false]], "unpicklingerror": [[138, "neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle.UnpicklingError", false], [397, "neural_compressor.torch.algorithms.layer_wise.modified_pickle.UnpicklingError", false]], "update_module() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.update_module", false]], "update_module() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.update_module", false]], "update_params() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.update_params", false]], "update_sq_scale() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.update_sq_scale", false]], "update_sq_scale() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.update_sq_scale", false]], "valid_keras_format() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.valid_keras_format", false]], "valid_reshape_inputs() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn)": [[52, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn.valid_reshape_inputs", false]], "valid_reshape_inputs() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in)": [[53, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in.valid_reshape_inputs", false]], "valid_reshape_inputs() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn)": [[324, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn.valid_reshape_inputs", false]], "valid_reshape_inputs() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in)": [[325, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in.valid_reshape_inputs", false]], "validate_and_inference_input_output() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.validate_and_inference_input_output", false]], "validate_and_inference_input_output() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.validate_and_inference_input_output", false]], "validate_graph_node() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.validate_graph_node", false]], "validate_graph_node() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.validate_graph_node", false]], "valueinfo (class in neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.ValueInfo", false]], "values_from_const() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn)": [[52, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn.values_from_const", false]], "values_from_const() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in)": [[53, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in.values_from_const", false]], "values_from_const() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm)": [[55, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm.values_from_const", false]], "values_from_const() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn)": [[324, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn.values_from_const", false]], "values_from_const() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in)": [[325, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in.values_from_const", false]], "values_from_const() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm)": [[327, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm.values_from_const", false]], "version1_eq_version2() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.version1_eq_version2", false]], "version1_eq_version2() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.version1_eq_version2", false]], "version1_eq_version2() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.version1_eq_version2", false]], "version1_gt_version2() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.version1_gt_version2", false]], "version1_gt_version2() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.version1_gt_version2", false]], "version1_gt_version2() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.version1_gt_version2", false]], "version1_gte_version2() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.version1_gte_version2", false]], "version1_gte_version2() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.version1_gte_version2", false]], "version1_gte_version2() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.version1_gte_version2", false]], "version1_lt_version2() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.version1_lt_version2", false]], "version1_lt_version2() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.version1_lt_version2", false]], "version1_lt_version2() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.version1_lt_version2", false]], "version1_lte_version2() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.version1_lte_version2", false]], "version1_lte_version2() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.version1_lte_version2", false]], "version1_lte_version2() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.version1_lte_version2", false]], "w8a8pt2equantizer (class in neural_compressor.torch.algorithms.pt2e_quant.core)": [[405, "neural_compressor.torch.algorithms.pt2e_quant.core.W8A8PT2EQuantizer", false]], "warn() (in module neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.warn", false]], "warning() (in module neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.warning", false]], "weightcorrection (class in neural_compressor.algorithm.weight_correction)": [[150, "neural_compressor.algorithm.weight_correction.WeightCorrection", false]], "weightonlylinear (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.WeightOnlyLinear", false]], "weightonlyquantsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.WeightOnlyQuantSampler", false]], "weightpruningconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.WeightPruningConfig", false]], "weightsdetails (class in neural_compressor.utils.weights_details)": [[467, "neural_compressor.utils.weights_details.WeightsDetails", false]], "weightsstatistics (class in neural_compressor.utils.weights_details)": [[467, "neural_compressor.utils.weights_details.WeightsStatistics", false]], "whitespace_tokenize() (in module neural_compressor.data.transforms.tokenization)": [[224, "neural_compressor.data.transforms.tokenization.whitespace_tokenize", false]], "woqmodelloader (class in neural_compressor.torch.algorithms.weight_only.save_load)": [[431, "neural_compressor.torch.algorithms.weight_only.save_load.WOQModelLoader", false]], "wordpiecetokenizer (class in neural_compressor.data.transforms.tokenization)": [[224, "neural_compressor.data.transforms.tokenization.WordpieceTokenizer", false]], "wrapmxnetmetric (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.WrapMXNetMetric", false]], "wraponnxrtmetric (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.WrapONNXRTMetric", false]], "wrapperlayer (class in neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.WrapperLayer", false]], "wrappytorchmetric (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.WrapPyTorchMetric", false]], "write_graph() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.write_graph", false]], "write_graph() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.write_graph", false]], "xpu_accelerator (class in neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.XPU_Accelerator", false]]}, "objects": {"": [[226, 0, 0, "-", "neural_compressor"]], "neural_compressor": [[148, 0, 0, "-", "algorithm"], [151, 0, 0, "-", "benchmark"], [155, 0, 0, "-", "common"], [195, 0, 0, "-", "config"], [196, 0, 0, "-", "contrib"], [220, 0, 0, "-", "data"], [233, 0, 0, "-", "metric"], [235, 0, 0, "-", "mix_precision"], [237, 0, 0, "-", "model"], [245, 0, 0, "-", "objective"], [246, 0, 0, "-", "profiling"], [262, 0, 0, "-", "quantization"], [270, 0, 0, "-", "strategy"], [290, 0, 0, "-", "tensorflow"], [436, 0, 0, "-", "torch"], [449, 0, 0, "-", "training"], [460, 0, 0, "-", "utils"], [468, 0, 0, "-", "version"]], "neural_compressor.adaptor": [[0, 0, 0, "-", "mxnet_utils"], [4, 0, 0, "-", "ox_utils"], [32, 0, 0, "-", "tensorflow"], [96, 0, 0, "-", "tf_utils"], [136, 0, 0, "-", "torch_utils"]], "neural_compressor.adaptor.mxnet_utils": [[1, 0, 0, "-", "util"]], "neural_compressor.adaptor.mxnet_utils.util": [[1, 1, 1, "", "CalibCollector"], [1, 1, 1, "", "CalibData"], [1, 1, 1, "", "CollectorBase"], [1, 1, 1, "", "DataIterLoader"], [1, 1, 1, "", "DataLoaderWrap"], [1, 1, 1, "", "NameCollector"], [1, 1, 1, "", "OpType"], [1, 1, 1, "", "TensorCollector"], [1, 2, 1, "", "amp_convert"], [1, 2, 1, "", "calib_model"], [1, 2, 1, "", "check_mx_version"], [1, 2, 1, "", "combine_capabilities"], [1, 2, 1, "", "create_data_example"], [1, 2, 1, "", "distribute_calib_tensors"], [1, 2, 1, "", "ensure_list"], [1, 2, 1, "", "fuse"], [1, 2, 1, "", "get_framework_name"], [1, 2, 1, "", "is_model_quantized"], [1, 2, 1, "", "isiterable"], [1, 2, 1, "", "make_module"], [1, 2, 1, "", "make_nc_model"], [1, 2, 1, "", "make_symbol_block"], [1, 2, 1, "", "ndarray_to_device"], [1, 2, 1, "", "parse_tune_config"], [1, 2, 1, "", "prepare_dataloader"], [1, 2, 1, "", "prepare_model"], [1, 2, 1, "", "prepare_model_data"], [1, 2, 1, "", "quantize_sym_model"], [1, 2, 1, "", "query_quantizable_nodes"], [1, 2, 1, "", "run_forward"]], "neural_compressor.adaptor.ox_utils": [[2, 0, 0, "-", "calibration"], [3, 0, 0, "-", "calibrator"], [16, 0, 0, "-", "operators"], [28, 0, 0, "-", "quantizer"], [29, 0, 0, "-", "smooth_quant"], [30, 0, 0, "-", "util"], [31, 0, 0, "-", "weight_only"]], "neural_compressor.adaptor.ox_utils.calibration": [[2, 1, 1, "", "ONNXRTAugment"]], "neural_compressor.adaptor.ox_utils.calibrator": [[3, 1, 1, "", "CalibratorBase"], [3, 1, 1, "", "HistogramCollector"], [3, 1, 1, "", "KLCalibrator"], [3, 1, 1, "", "MinMaxCalibrator"], [3, 1, 1, "", "PercentileCalibrator"], [3, 2, 1, "", "calib_registry"], [3, 2, 1, "", "smooth_distribution"]], "neural_compressor.adaptor.ox_utils.operators": [[5, 0, 0, "-", "activation"], [6, 0, 0, "-", "argmax"], [7, 0, 0, "-", "attention"], [8, 0, 0, "-", "binary_op"], [9, 0, 0, "-", "concat"], [10, 0, 0, "-", "conv"], [11, 0, 0, "-", "direct_q8"], [12, 0, 0, "-", "embed_layernorm"], [13, 0, 0, "-", "gather"], [14, 0, 0, "-", "gavgpool"], [15, 0, 0, "-", "gemm"], [17, 0, 0, "-", "lstm"], [18, 0, 0, "-", "matmul"], [19, 0, 0, "-", "maxpool"], [20, 0, 0, "-", "norm"], [21, 0, 0, "-", "ops"], [22, 0, 0, "-", "pad"], [23, 0, 0, "-", "pooling"], [24, 0, 0, "-", "reduce"], [25, 0, 0, "-", "resize"], [26, 0, 0, "-", "split"], [27, 0, 0, "-", "unary_op"]], "neural_compressor.adaptor.ox_utils.operators.activation": [[5, 1, 1, "", "ActivationOperator"], [5, 1, 1, "", "Float16ActivationOperator"], [5, 1, 1, "", "QActivationOperator"], [5, 1, 1, "", "RemovableActivationOperator"]], "neural_compressor.adaptor.ox_utils.operators.argmax": [[6, 1, 1, "", "ArgMaxOperator"], [6, 1, 1, "", "QArgMaxOperator"]], "neural_compressor.adaptor.ox_utils.operators.attention": [[7, 1, 1, "", "AttentionOperator"], [7, 1, 1, "", "QAttentionOperator"]], "neural_compressor.adaptor.ox_utils.operators.binary_op": [[8, 1, 1, "", "BinaryDirect8BitOperator"], [8, 1, 1, "", "BinaryOperator"], [8, 1, 1, "", "Float16BinaryOperator"], [8, 1, 1, "", "QBinaryOperator"]], "neural_compressor.adaptor.ox_utils.operators.concat": [[9, 1, 1, "", "ConcatOperator"], [9, 1, 1, "", "QConcatOperator"]], "neural_compressor.adaptor.ox_utils.operators.conv": [[10, 1, 1, "", "ConvOperator"], [10, 1, 1, "", "QConvOperator"]], "neural_compressor.adaptor.ox_utils.operators.direct_q8": [[11, 1, 1, "", "Direct8BitOperator"], [11, 1, 1, "", "QDirectOperator"]], "neural_compressor.adaptor.ox_utils.operators.embed_layernorm": [[12, 1, 1, "", "EmbedLayerNormalizationOperator"], [12, 1, 1, "", "QEmbedLayerNormalizationOperator"]], "neural_compressor.adaptor.ox_utils.operators.gather": [[13, 1, 1, "", "GatherOperator"], [13, 1, 1, "", "QGatherOperator"]], "neural_compressor.adaptor.ox_utils.operators.gavgpool": [[14, 1, 1, "", "GlobalAveragePoolOperator"], [14, 1, 1, "", "QGlobalAveragePoolOperator"]], "neural_compressor.adaptor.ox_utils.operators.gemm": [[15, 1, 1, "", "GemmOperator"], [15, 1, 1, "", "QGemmOperator"]], "neural_compressor.adaptor.ox_utils.operators.lstm": [[17, 1, 1, "", "LSTMOperator"]], "neural_compressor.adaptor.ox_utils.operators.matmul": [[18, 1, 1, "", "FusedMatMulOperator"], [18, 1, 1, "", "MatMulOperator"], [18, 1, 1, "", "QMatMulOperator"]], "neural_compressor.adaptor.ox_utils.operators.maxpool": [[19, 1, 1, "", "MaxPoolOperator"], [19, 1, 1, "", "QMaxPoolOperator"]], "neural_compressor.adaptor.ox_utils.operators.norm": [[20, 1, 1, "", "BatchNormalizationOperator"], [20, 1, 1, "", "NormalizationOperator"]], "neural_compressor.adaptor.ox_utils.operators.ops": [[21, 1, 1, "", "Operator"], [21, 1, 1, "", "QOperator"], [21, 2, 1, "", "op_registry"], [21, 2, 1, "", "qop_registry"]], "neural_compressor.adaptor.ox_utils.operators.pad": [[22, 1, 1, "", "PadOperator"], [22, 1, 1, "", "QPadOperator"]], "neural_compressor.adaptor.ox_utils.operators.pooling": [[23, 1, 1, "", "PoolOperator"], [23, 1, 1, "", "QPoolOperator"]], "neural_compressor.adaptor.ox_utils.operators.reduce": [[24, 1, 1, "", "ReduceMinMaxOperator"], [24, 1, 1, "", "ReduceOperator"]], "neural_compressor.adaptor.ox_utils.operators.resize": [[25, 1, 1, "", "QResizeOperator"], [25, 1, 1, "", "ResizeOperator"]], "neural_compressor.adaptor.ox_utils.operators.split": [[26, 1, 1, "", "QSplitOperator"], [26, 1, 1, "", "SplitOperator"]], "neural_compressor.adaptor.ox_utils.operators.unary_op": [[27, 1, 1, "", "UnaryDirect8BitOperator"], [27, 1, 1, "", "UnaryOperator"]], "neural_compressor.adaptor.ox_utils.quantizer": [[28, 1, 1, "", "Quantizer"]], "neural_compressor.adaptor.ox_utils.smooth_quant": [[29, 1, 1, "", "ORTSmoothQuant"], [29, 2, 1, "", "get_quant_dequant_output"], [29, 2, 1, "", "make_sub_graph"], [29, 2, 1, "", "quant_dequant_data"]], "neural_compressor.adaptor.ox_utils.util": [[30, 1, 1, "", "QuantFormat"], [30, 1, 1, "", "QuantType"], [30, 1, 1, "", "QuantizationMode"], [30, 1, 1, "", "QuantizedInitializer"], [30, 1, 1, "", "QuantizedValue"], [30, 1, 1, "", "QuantizedValueType"], [30, 1, 1, "", "ValueInfo"], [30, 2, 1, "", "attribute_to_kwarg"], [30, 2, 1, "", "calculate_scale_zp"], [30, 2, 1, "", "cast_tensor"], [30, 2, 1, "", "collate_preds"], [30, 2, 1, "", "dequantize_data"], [30, 2, 1, "", "dequantize_data_with_scale_zero"], [30, 2, 1, "", "dtype_to_name"], [30, 2, 1, "", "find_by_name"], [30, 2, 1, "", "float_to_bfloat16"], [30, 2, 1, "", "float_to_float16"], [30, 2, 1, "", "get_node_original_name"], [30, 2, 1, "", "infer_shapes"], [30, 2, 1, "", "is_B_transposed"], [30, 2, 1, "", "make_dquant_node"], [30, 2, 1, "", "make_quant_node"], [30, 2, 1, "", "quantize_data"], [30, 2, 1, "", "quantize_data_per_channel"], [30, 2, 1, "", "quantize_data_with_scale_zero"], [30, 2, 1, "", "quantize_nparray"], [30, 2, 1, "", "remove_init_from_model_input"], [30, 2, 1, "", "simple_progress_bar"], [30, 2, 1, "", "split_shared_bias"], [30, 2, 1, "", "to_numpy"], [30, 2, 1, "", "trt_env_setup"]], "neural_compressor.adaptor.ox_utils.weight_only": [[31, 2, 1, "", "apply_awq_clip"], [31, 2, 1, "", "apply_awq_scale"], [31, 2, 1, "", "awq_quantize"], [31, 2, 1, "", "get_blob_size"], [31, 2, 1, "", "get_weight_scale"], [31, 2, 1, "", "gptq"], [31, 2, 1, "", "gptq_quantize"], [31, 2, 1, "", "make_matmul_weight_only_node"], [31, 2, 1, "", "pad_tensor"], [31, 2, 1, "", "prepare_inputs"], [31, 2, 1, "", "qdq_tensor"], [31, 2, 1, "", "quant_tensor"], [31, 2, 1, "", "rtn_quantize"]], "neural_compressor.adaptor.tensorflow": [[32, 1, 1, "", "TensorFlowAdaptor"], [32, 1, 1, "", "TensorflowQuery"], [32, 1, 1, "", "Tensorflow_ITEXAdaptor"]], "neural_compressor.adaptor.tf_utils": [[33, 0, 0, "-", "graph_converter"], [34, 0, 0, "-", "graph_converter_without_calib"], [72, 0, 0, "-", "graph_rewriter"], [95, 0, 0, "-", "graph_util"], [97, 0, 0, "-", "quantize_graph"], [124, 0, 0, "-", "quantize_graph_common"], [125, 0, 0, "-", "smooth_quant_calibration"], [126, 0, 0, "-", "smooth_quant_scaler"], [127, 0, 0, "-", "tf2onnx_converter"], [130, 0, 0, "-", "transform_graph"], [133, 0, 0, "-", "util"]], "neural_compressor.adaptor.tf_utils.graph_converter": [[33, 1, 1, "", "GraphConverter"]], "neural_compressor.adaptor.tf_utils.graph_converter_without_calib": [[34, 1, 1, "", "GraphConverterWithoutCalib"]], "neural_compressor.adaptor.tf_utils.graph_rewriter": [[37, 0, 0, "-", "bf16"], [61, 0, 0, "-", "generic"], [71, 0, 0, "-", "graph_base"], [80, 0, 0, "-", "int8"], [86, 0, 0, "-", "onnx"], [91, 0, 0, "-", "qdq"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16": [[35, 0, 0, "-", "bf16_convert"], [36, 0, 0, "-", "dequantize_cast_optimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert": [[35, 1, 1, "", "BF16Convert"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer": [[36, 1, 1, "", "DequantizeCastOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic": [[38, 0, 0, "-", "convert_add_to_biasadd"], [39, 0, 0, "-", "convert_layout"], [40, 0, 0, "-", "convert_leakyrelu"], [41, 0, 0, "-", "convert_nan_to_random"], [42, 0, 0, "-", "convert_placeholder_to_const"], [43, 0, 0, "-", "dilated_contraction"], [44, 0, 0, "-", "dummy_biasadd"], [45, 0, 0, "-", "expanddims_optimizer"], [46, 0, 0, "-", "fetch_weight_from_reshape"], [47, 0, 0, "-", "fold_batch_norm"], [48, 0, 0, "-", "fold_constant"], [49, 0, 0, "-", "fuse_biasadd_add"], [50, 0, 0, "-", "fuse_column_wise_mul"], [51, 0, 0, "-", "fuse_conv_with_math"], [52, 0, 0, "-", "fuse_decomposed_bn"], [53, 0, 0, "-", "fuse_decomposed_in"], [54, 0, 0, "-", "fuse_gelu"], [55, 0, 0, "-", "fuse_layer_norm"], [56, 0, 0, "-", "fuse_pad_with_conv"], [57, 0, 0, "-", "fuse_pad_with_fp32_conv"], [58, 0, 0, "-", "fuse_reshape_transpose"], [59, 0, 0, "-", "graph_cse_optimizer"], [60, 0, 0, "-", "grappler_pass"], [62, 0, 0, "-", "insert_print_node"], [63, 0, 0, "-", "move_squeeze_after_relu"], [64, 0, 0, "-", "pre_optimize"], [65, 0, 0, "-", "remove_training_nodes"], [66, 0, 0, "-", "rename_batch_norm"], [67, 0, 0, "-", "split_shared_input"], [68, 0, 0, "-", "strip_equivalent_nodes"], [69, 0, 0, "-", "strip_unused_nodes"], [70, 0, 0, "-", "switch_optimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd": [[38, 1, 1, "", "ConvertAddToBiasAddOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout": [[39, 1, 1, "", "ConvertLayoutOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu": [[40, 1, 1, "", "ConvertLeakyReluOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random": [[41, 1, 1, "", "ConvertNanToRandom"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const": [[42, 1, 1, "", "ConvertPlaceholderToConst"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction": [[43, 1, 1, "", "DilatedContraction"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd": [[44, 1, 1, "", "InjectDummyBiasAddOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer": [[45, 1, 1, "", "ExpandDimsOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape": [[46, 1, 1, "", "FetchWeightFromReshapeOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm": [[47, 1, 1, "", "FoldBatchNormNodesOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant": [[48, 1, 1, "", "GraphFoldConstantOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add": [[49, 1, 1, "", "FuseBiasAddAndAddOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul": [[50, 1, 1, "", "FuseColumnWiseMulOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math": [[51, 1, 1, "", "FuseConvWithMathOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn": [[52, 1, 1, "", "FuseDecomposedBNOptimizer"], [52, 2, 1, "", "bypass_reshape"], [52, 2, 1, "", "get_const_dim_count"], [52, 2, 1, "", "node_from_map"], [52, 2, 1, "", "node_name_from_input"], [52, 2, 1, "", "valid_reshape_inputs"], [52, 2, 1, "", "values_from_const"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in": [[53, 1, 1, "", "FuseDecomposedINOptimizer"], [53, 2, 1, "", "bypass_reshape"], [53, 2, 1, "", "get_const_dim_count"], [53, 2, 1, "", "node_from_map"], [53, 2, 1, "", "node_name_from_input"], [53, 2, 1, "", "valid_reshape_inputs"], [53, 2, 1, "", "values_from_const"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu": [[54, 1, 1, "", "FuseGeluOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm": [[55, 1, 1, "", "FuseLayerNormOptimizer"], [55, 2, 1, "", "node_from_map"], [55, 2, 1, "", "node_name_from_input"], [55, 2, 1, "", "values_from_const"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv": [[56, 1, 1, "", "FusePadWithConv2DOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv": [[57, 1, 1, "", "FusePadWithFP32Conv2DOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose": [[58, 1, 1, "", "FuseTransposeReshapeOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer": [[59, 1, 1, "", "GraphCseOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass": [[60, 1, 1, "", "GrapplerOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node": [[62, 1, 1, "", "InsertPrintMinMaxNode"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu": [[63, 1, 1, "", "MoveSqueezeAfterReluOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize": [[64, 1, 1, "", "PreOptimization"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes": [[65, 1, 1, "", "RemoveTrainingNodesOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm": [[66, 1, 1, "", "RenameBatchNormOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input": [[67, 1, 1, "", "SplitSharedInputOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes": [[68, 1, 1, "", "StripEquivalentNodesOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes": [[69, 1, 1, "", "StripUnusedNodesOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer": [[70, 1, 1, "", "SwitchOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base": [[71, 1, 1, "", "GraphRewriterBase"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8": [[73, 0, 0, "-", "freeze_fake_quant"], [74, 0, 0, "-", "freeze_value"], [75, 0, 0, "-", "freeze_value_without_calib"], [76, 0, 0, "-", "fuse_conv_redundant_dequantize"], [77, 0, 0, "-", "fuse_conv_requantize"], [78, 0, 0, "-", "fuse_matmul_redundant_dequantize"], [79, 0, 0, "-", "fuse_matmul_requantize"], [81, 0, 0, "-", "meta_op_optimizer"], [82, 0, 0, "-", "post_hostconst_converter"], [83, 0, 0, "-", "post_quantized_op_cse"], [84, 0, 0, "-", "rnn_convert"], [85, 0, 0, "-", "scale_propagation"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant": [[73, 1, 1, "", "FreezeFakeQuantOpOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value": [[74, 1, 1, "", "FreezeValueTransformer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib": [[75, 1, 1, "", "FreezeValueWithoutCalibTransformer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize": [[76, 1, 1, "", "FuseConvRedundantDequantizeTransformer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize": [[77, 1, 1, "", "FuseConvRequantizeTransformer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize": [[78, 1, 1, "", "FuseMatMulRedundantDequantizeTransformer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize": [[79, 1, 1, "", "FuseMatMulRequantizeDequantizeNewAPITransformer"], [79, 1, 1, "", "FuseMatMulRequantizeDequantizeTransformer"], [79, 1, 1, "", "FuseMatMulRequantizeNewAPITransformer"], [79, 1, 1, "", "FuseMatMulRequantizeTransformer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer": [[81, 1, 1, "", "MetaInfoChangingMemOpOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter": [[82, 1, 1, "", "PostHostConstConverter"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse": [[83, 1, 1, "", "PostCseOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert": [[84, 1, 1, "", "QuantizedRNNConverter"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation": [[85, 1, 1, "", "ScaleProPagationTransformer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx": [[87, 0, 0, "-", "onnx_graph"], [88, 0, 0, "-", "onnx_node"], [89, 0, 0, "-", "onnx_schema"], [90, 0, 0, "-", "tf2onnx_utils"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph": [[87, 1, 1, "", "OnnxGraph"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node": [[88, 1, 1, "", "OnnxNode"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema": [[89, 1, 1, "", "OnnxOpSchema"], [89, 2, 1, "", "get_max_supported_opset_version"], [89, 2, 1, "", "get_schema"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils": [[90, 1, 1, "", "SeqType"], [90, 2, 1, "", "add_port_to_name"], [90, 2, 1, "", "are_shapes_equal"], [90, 2, 1, "", "assert_error"], [90, 2, 1, "", "compute_const_folding_using_tf"], [90, 2, 1, "", "convert_tensorflow_tensor_to_onnx"], [90, 2, 1, "", "find_opset"], [90, 2, 1, "", "get_index_from_strided_slice_of_shape"], [90, 2, 1, "", "get_subgraphs_from_onnx"], [90, 2, 1, "", "get_tensorflow_node_attr"], [90, 2, 1, "", "get_tensorflow_node_shape_attr"], [90, 2, 1, "", "get_tensorflow_tensor_data"], [90, 2, 1, "", "get_tensorflow_tensor_shape"], [90, 2, 1, "", "infer_onnx_shape_dtype"], [90, 2, 1, "", "initialize_name_counter"], [90, 2, 1, "", "is_list_or_tuple"], [90, 2, 1, "", "is_onnx_domain"], [90, 2, 1, "", "make_onnx_inputs_outputs"], [90, 2, 1, "", "make_onnx_shape"], [90, 2, 1, "", "map_numpy_to_onnx_dtype"], [90, 2, 1, "", "map_onnx_to_numpy_type"], [90, 2, 1, "", "map_tensorflow_dtype"], [90, 2, 1, "", "read_tensorflow_node_attrs"], [90, 2, 1, "", "save_protobuf"], [90, 2, 1, "", "set_name"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq": [[92, 0, 0, "-", "insert_qdq_pattern"], [93, 0, 0, "-", "merge_duplicated_qdq"], [94, 0, 0, "-", "share_qdq_y_pattern"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern": [[92, 1, 1, "", "GenerateGraphWithQDQPattern"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq": [[93, 1, 1, "", "MergeDuplicatedQDQOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern": [[94, 1, 1, "", "ShareQDQForItexYPatternOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_util": [[95, 1, 1, "", "GraphAnalyzer"], [95, 1, 1, "", "GraphRewriterHelper"]], "neural_compressor.adaptor.tf_utils.quantize_graph": [[99, 0, 0, "-", "qat"], [115, 0, 0, "-", "qdq"], [117, 0, 0, "-", "quantize_graph_base"], [118, 0, 0, "-", "quantize_graph_bn"], [119, 0, 0, "-", "quantize_graph_concatv2"], [120, 0, 0, "-", "quantize_graph_conv"], [121, 0, 0, "-", "quantize_graph_for_intel_cpu"], [122, 0, 0, "-", "quantize_graph_matmul"], [123, 0, 0, "-", "quantize_graph_pooling"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat": [[98, 0, 0, "-", "fake_quantize"], [100, 0, 0, "-", "quantize_config"], [101, 0, 0, "-", "quantize_helper"], [102, 0, 0, "-", "quantize_layers"], [107, 0, 0, "-", "quantize_wrapper"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize": [[98, 1, 1, "", "FakeQuantize"], [98, 1, 1, "", "FakeQuantizeBase"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config": [[100, 1, 1, "", "QuantizeConfig"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper": [[101, 2, 1, "", "init_quantize_config"], [101, 2, 1, "", "qat_clone_function"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers": [[103, 0, 0, "-", "optimize_layer"], [104, 0, 0, "-", "quantize_layer_add"], [105, 0, 0, "-", "quantize_layer_base"], [106, 0, 0, "-", "quantize_layer_bn"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer": [[103, 2, 1, "", "config_quantizable_layers"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add": [[104, 1, 1, "", "QuantizeLayerAdd"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base": [[105, 1, 1, "", "QuantizeLayerBase"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn": [[106, 1, 1, "", "QuantizeLayerBatchNormalization"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper": [[107, 1, 1, "", "QuantizeWrapper"], [107, 1, 1, "", "QuantizeWrapperBase"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq": [[108, 0, 0, "-", "fuse_qdq_bn"], [109, 0, 0, "-", "fuse_qdq_concatv2"], [110, 0, 0, "-", "fuse_qdq_conv"], [111, 0, 0, "-", "fuse_qdq_deconv"], [112, 0, 0, "-", "fuse_qdq_in"], [113, 0, 0, "-", "fuse_qdq_matmul"], [114, 0, 0, "-", "fuse_qdq_pooling"], [116, 0, 0, "-", "optimize_qdq"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn": [[108, 1, 1, "", "FuseNodeStartWithFusedBatchNormV3"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2": [[109, 1, 1, "", "FuseNodeStartWithConcatV2"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv": [[110, 1, 1, "", "FuseNodeStartWithConv2d"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv": [[111, 1, 1, "", "FuseNodeStartWithDeconv2d"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in": [[112, 1, 1, "", "FuseNodeStartWithFusedInstanceNorm"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul": [[113, 1, 1, "", "FuseNodeStartWithMatmul"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling": [[114, 1, 1, "", "FuseNodeStartWithPooling"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq": [[116, 1, 1, "", "OptimizeQDQGraph"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base": [[117, 1, 1, "", "QuantizeGraphBase"], [117, 1, 1, "", "QuantizeNodeBase"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn": [[118, 1, 1, "", "FuseNodeStartWithFusedBatchNormV3"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2": [[119, 1, 1, "", "FuseNodeStartWithConcatV2"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv": [[120, 1, 1, "", "FuseNodeStartWithConv2d"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu": [[121, 1, 1, "", "QuantizeGraphForIntel"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul": [[122, 1, 1, "", "FuseNodeStartWithMatmul"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling": [[123, 1, 1, "", "FuseNodeStartWithPooling"]], "neural_compressor.adaptor.tf_utils.quantize_graph_common": [[124, 1, 1, "", "QuantizeGraphHelper"]], "neural_compressor.adaptor.tf_utils.smooth_quant_calibration": [[125, 1, 1, "", "SmoothQuantCalibration"], [125, 1, 1, "", "SmoothQuantCalibrationLLM"]], "neural_compressor.adaptor.tf_utils.smooth_quant_scaler": [[126, 1, 1, "", "SmoothQuantScaler"], [126, 1, 1, "", "SmoothQuantScalerLLM"]], "neural_compressor.adaptor.tf_utils.tf2onnx_converter": [[127, 1, 1, "", "TensorflowQDQToOnnxQDQConverter"]], "neural_compressor.adaptor.tf_utils.transform_graph": [[128, 0, 0, "-", "bias_correction"], [129, 0, 0, "-", "graph_transform_base"], [131, 0, 0, "-", "insert_logging"], [132, 0, 0, "-", "rerange_quantized_concat"]], "neural_compressor.adaptor.tf_utils.transform_graph.bias_correction": [[128, 1, 1, "", "BiasCorrection"]], "neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base": [[129, 1, 1, "", "GraphTransformBase"]], "neural_compressor.adaptor.tf_utils.transform_graph.insert_logging": [[131, 1, 1, "", "InsertLogging"]], "neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat": [[132, 1, 1, "", "RerangeQuantizedConcat"]], "neural_compressor.adaptor.tf_utils.util": [[133, 2, 1, "", "apply_inlining"], [133, 2, 1, "", "collate_tf_preds"], [133, 2, 1, "", "construct_function_from_graph_def"], [133, 2, 1, "", "disable_random"], [133, 2, 1, "", "fix_ref_type_of_graph_def"], [133, 2, 1, "", "generate_feed_dict"], [133, 2, 1, "", "get_estimator_graph"], [133, 2, 1, "", "get_graph_def"], [133, 2, 1, "", "get_input_output_node_names"], [133, 2, 1, "", "get_model_input_shape"], [133, 2, 1, "", "get_tensor_by_name"], [133, 2, 1, "", "get_tensor_val_from_graph_node"], [133, 2, 1, "", "get_weight_from_input_tensor"], [133, 2, 1, "", "int8_node_name_reverse"], [133, 2, 1, "", "is_ckpt_format"], [133, 2, 1, "", "is_saved_model_format"], [133, 2, 1, "", "iterator_sess_run"], [133, 2, 1, "", "parse_saved_model"], [133, 2, 1, "", "read_graph"], [133, 2, 1, "", "reconstruct_saved_model"], [133, 2, 1, "", "strip_equivalent_nodes"], [133, 2, 1, "", "strip_unused_nodes"], [133, 2, 1, "", "version1_eq_version2"], [133, 2, 1, "", "version1_gt_version2"], [133, 2, 1, "", "version1_gte_version2"], [133, 2, 1, "", "version1_lt_version2"], [133, 2, 1, "", "version1_lte_version2"], [133, 2, 1, "", "write_graph"]], "neural_compressor.adaptor.torch_utils": [[134, 0, 0, "-", "bf16_convert"], [135, 0, 0, "-", "hawq_metric"], [137, 0, 0, "-", "layer_wise_quant"], [142, 0, 0, "-", "model_wrapper"], [143, 0, 0, "-", "pattern_detector"], [144, 0, 0, "-", "symbolic_trace"], [145, 0, 0, "-", "util"]], "neural_compressor.adaptor.torch_utils.bf16_convert": [[134, 1, 1, "", "BF16ModuleWrapper"], [134, 2, 1, "", "Convert"]], "neural_compressor.adaptor.torch_utils.hawq_metric": [[135, 1, 1, "", "HessianTrace"], [135, 1, 1, "", "Node_collector"], [135, 2, 1, "", "compare_weights"], [135, 2, 1, "", "hawq_top"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant": [[138, 0, 0, "-", "modified_pickle"], [139, 0, 0, "-", "quantize"], [140, 0, 0, "-", "torch_load"], [141, 0, 0, "-", "utils"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle": [[138, 3, 1, "", "PickleError"], [138, 3, 1, "", "PicklingError"], [138, 3, 1, "", "UnpicklingError"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize": [[139, 1, 1, "", "LayerWiseQuant"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load": [[140, 2, 1, "", "load"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils": [[141, 2, 1, "", "dowload_hf_model"], [141, 2, 1, "", "get_children"], [141, 2, 1, "", "get_module"], [141, 2, 1, "", "get_named_children"], [141, 2, 1, "", "get_super_module_by_name"], [141, 2, 1, "", "load_empty_model"], [141, 2, 1, "", "load_layer_wise_quantized_model"], [141, 2, 1, "", "load_tensor"], [141, 2, 1, "", "load_tensor_from_shard"], [141, 2, 1, "", "update_module"]], "neural_compressor.adaptor.torch_utils.model_wrapper": [[142, 1, 1, "", "FakeAffineTensorQuantFunction"], [142, 1, 1, "", "MulLinear"], [142, 1, 1, "", "TEQLinearFakeQuant"]], "neural_compressor.adaptor.torch_utils.pattern_detector": [[143, 1, 1, "", "TransformerBasedModelBlockPatternDetector"]], "neural_compressor.adaptor.torch_utils.symbolic_trace": [[144, 2, 1, "", "symbolic_trace"], [144, 2, 1, "", "trace_and_fuse_sub_graph"]], "neural_compressor.adaptor.torch_utils.util": [[145, 2, 1, "", "append_attr"], [145, 2, 1, "", "auto_copy"], [145, 2, 1, "", "calculate_quant_min_max"], [145, 2, 1, "", "calibration"], [145, 2, 1, "", "check_cfg_and_qconfig"], [145, 2, 1, "", "collate_torch_preds"], [145, 2, 1, "", "collect_weight_info"], [145, 2, 1, "", "fetch_module"], [145, 2, 1, "", "forward_wrapper"], [145, 2, 1, "", "generate_activation_observer"], [145, 2, 1, "", "get_absorb_layers"], [145, 2, 1, "", "get_block_prefix"], [145, 2, 1, "", "get_depth"], [145, 2, 1, "", "get_dict_at_depth"], [145, 2, 1, "", "get_element_under_depth"], [145, 2, 1, "", "get_embedding_contiguous"], [145, 2, 1, "", "get_example_input"], [145, 2, 1, "", "get_fallback_order"], [145, 2, 1, "", "get_hidden_states"], [145, 2, 1, "", "get_module_input_output"], [145, 2, 1, "", "get_mse_order_per_fp32"], [145, 2, 1, "", "get_mse_order_per_int8"], [145, 2, 1, "", "get_op_type_by_name"], [145, 2, 1, "", "get_quantizable_ops_from_cfgs"], [145, 2, 1, "", "get_torch_version"], [145, 2, 1, "", "input2tuple"], [145, 2, 1, "", "is_fused_module"], [145, 2, 1, "", "match_datatype_pattern"], [145, 2, 1, "", "move_input_device"], [145, 2, 1, "", "paser_cfgs"], [145, 2, 1, "", "set_module"], [145, 2, 1, "", "simple_inference"], [145, 2, 1, "", "update_sq_scale"]], "neural_compressor.algorithm": [[146, 0, 0, "-", "algorithm"], [147, 0, 0, "-", "fast_bias_correction"], [149, 0, 0, "-", "smooth_quant"], [150, 0, 0, "-", "weight_correction"]], "neural_compressor.algorithm.algorithm": [[146, 1, 1, "", "ALGORITHMS"], [146, 1, 1, "", "Algorithm"], [146, 1, 1, "", "AlgorithmScheduler"], [146, 2, 1, "", "algorithm_registry"]], "neural_compressor.algorithm.fast_bias_correction": [[147, 1, 1, "", "FastBiasCorrection"]], "neural_compressor.algorithm.smooth_quant": [[149, 1, 1, "", "SmoothQuant"]], "neural_compressor.algorithm.weight_correction": [[150, 1, 1, "", "WeightCorrection"]], "neural_compressor.benchmark": [[151, 2, 1, "", "benchmark_with_raw_cmd"], [151, 2, 1, "", "call_one"], [151, 2, 1, "", "config_instance"], [151, 2, 1, "", "fit"], [151, 2, 1, "", "generate_prefix"], [151, 2, 1, "", "get_architecture"], [151, 2, 1, "", "get_bounded_threads"], [151, 2, 1, "", "get_core_ids"], [151, 2, 1, "", "get_physical_ids"], [151, 2, 1, "", "get_threads"], [151, 2, 1, "", "get_threads_per_core"], [151, 2, 1, "", "profile"], [151, 2, 1, "", "run_instance"], [151, 2, 1, "", "set_all_env_var"], [151, 2, 1, "", "set_env_var"], [151, 2, 1, "", "summary_benchmark"]], "neural_compressor.common": [[152, 0, 0, "-", "base_config"], [153, 0, 0, "-", "base_tuning"], [154, 0, 0, "-", "benchmark"], [156, 0, 0, "-", "tuning_param"], [158, 0, 0, "-", "utils"]], "neural_compressor.common.base_config": [[152, 1, 1, "", "BaseConfig"], [152, 1, 1, "", "ComposableConfig"], [152, 1, 1, "", "ConfigRegistry"], [152, 2, 1, "", "get_all_config_set_from_config_registry"], [152, 2, 1, "", "register_config"], [152, 2, 1, "", "register_supported_configs_for_fwk"]], "neural_compressor.common.base_config.BaseConfig": [[152, 4, 1, "", "name"], [152, 4, 1, "", "params_list"]], "neural_compressor.common.base_config.ComposableConfig": [[152, 4, 1, "", "config_list"]], "neural_compressor.common.base_tuning": [[153, 1, 1, "", "ConfigLoader"], [153, 1, 1, "", "ConfigSet"], [153, 1, 1, "", "EvaluationFuncWrapper"], [153, 1, 1, "", "Evaluator"], [153, 1, 1, "", "Sampler"], [153, 1, 1, "", "SequentialSampler"], [153, 1, 1, "", "TuningConfig"], [153, 1, 1, "", "TuningMonitor"], [153, 2, 1, "", "init_tuning"]], "neural_compressor.common.base_tuning.ConfigSet": [[153, 4, 1, "", "config_list"]], "neural_compressor.common.benchmark": [[154, 2, 1, "", "benchmark"], [154, 2, 1, "", "dump_numa_info"], [154, 2, 1, "", "format_list2str"], [154, 2, 1, "", "generate_prefix"], [154, 2, 1, "", "get_linux_numa_info"], [154, 2, 1, "", "get_numa_node"], [154, 2, 1, "", "get_reversed_numa_info"], [154, 2, 1, "", "get_windows_numa_info"], [154, 2, 1, "", "parse_str2list"], [154, 2, 1, "", "run_multi_instance_command"], [154, 2, 1, "", "set_cores_for_instance"], [154, 2, 1, "", "summary_latency_throughput"]], "neural_compressor.common.tuning_param": [[156, 1, 1, "", "ParamLevel"], [156, 1, 1, "", "TuningParam"]], "neural_compressor.common.tuning_param.ParamLevel": [[156, 4, 1, "", "MODEL_LEVEL"], [156, 4, 1, "", "OP_LEVEL"], [156, 4, 1, "", "OP_TYPE_LEVEL"]], "neural_compressor.common.utils": [[157, 0, 0, "-", "constants"], [159, 0, 0, "-", "logger"], [160, 0, 0, "-", "save_load"], [161, 0, 0, "-", "utility"]], "neural_compressor.common.utils.constants": [[157, 1, 1, "", "Mode"]], "neural_compressor.common.utils.logger": [[159, 1, 1, "", "Logger"], [159, 1, 1, "", "TuningLogger"]], "neural_compressor.common.utils.save_load": [[160, 2, 1, "", "load_config_mapping"], [160, 2, 1, "", "save_config_mapping"]], "neural_compressor.common.utils.utility": [[161, 1, 1, "", "CpuInfo"], [161, 1, 1, "", "LazyImport"], [161, 1, 1, "", "ProcessorType"], [161, 1, 1, "", "Statistics"], [161, 2, 1, "", "call_counter"], [161, 2, 1, "", "detect_processor_type_based_on_hw"], [161, 2, 1, "", "dump_elapsed_time"], [161, 2, 1, "", "get_workspace"], [161, 2, 1, "", "log_process"], [161, 2, 1, "", "set_random_seed"], [161, 2, 1, "", "set_resume_from"], [161, 2, 1, "", "set_tensorboard"], [161, 2, 1, "", "set_workspace"], [161, 2, 1, "", "singleton"]], "neural_compressor.compression": [[162, 0, 0, "-", "callbacks"], [164, 0, 0, "-", "distillation"], [167, 0, 0, "-", "hpo"], [170, 0, 0, "-", "pruner"]], "neural_compressor.compression.callbacks": [[162, 1, 1, "", "BaseCallbacks"], [162, 1, 1, "", "DistillationCallbacks"], [162, 1, 1, "", "PruningCallbacks"], [162, 1, 1, "", "QuantizationAwareTrainingCallbacks"]], "neural_compressor.compression.callbacks.DistillationCallbacks": [[162, 4, 1, "", "_epoch_ran"], [162, 4, 1, "", "best_model"], [162, 4, 1, "", "best_score"], [162, 4, 1, "", "eval_frequency"]], "neural_compressor.compression.distillation": [[163, 0, 0, "-", "criterions"], [165, 0, 0, "-", "optimizers"], [166, 0, 0, "-", "utility"]], "neural_compressor.compression.distillation.criterions": [[163, 1, 1, "", "Criterions"], [163, 1, 1, "", "IntermediateLayersKnowledgeDistillationLoss"], [163, 1, 1, "", "KnowledgeDistillationFramework"], [163, 1, 1, "", "KnowledgeDistillationLoss"], [163, 1, 1, "", "PyTorchCriterions"], [163, 1, 1, "", "PyTorchCrossEntropyLoss"], [163, 1, 1, "", "PyTorchIntermediateLayersKnowledgeDistillationLoss"], [163, 1, 1, "", "PyTorchIntermediateLayersKnowledgeDistillationLossWrapper"], [163, 1, 1, "", "PyTorchKnowledgeDistillationLoss"], [163, 1, 1, "", "PyTorchKnowledgeDistillationLossWrapper"], [163, 1, 1, "", "PyTorchSelfKnowledgeDistillationLoss"], [163, 1, 1, "", "PyTorchSelfKnowledgeDistillationLossWrapper"], [163, 1, 1, "", "SelfKnowledgeDistillationLoss"], [163, 1, 1, "", "TensorFlowCrossEntropyLoss"], [163, 1, 1, "", "TensorFlowSparseCategoricalCrossentropy"], [163, 1, 1, "", "TensorflowCriterions"], [163, 1, 1, "", "TensorflowKnowledgeDistillationLoss"], [163, 1, 1, "", "TensorflowKnowledgeDistillationLossExternal"], [163, 1, 1, "", "TensorflowKnowledgeDistillationLossWrapper"], [163, 2, 1, "", "criterion_registry"]], "neural_compressor.compression.distillation.optimizers": [[165, 1, 1, "", "Optimizers"], [165, 1, 1, "", "PyTorchOptimizers"], [165, 1, 1, "", "PyTorchSGD"], [165, 1, 1, "", "TensorFlowAdam"], [165, 1, 1, "", "TensorFlowAdamW"], [165, 1, 1, "", "TensorFlowSGD"], [165, 1, 1, "", "TensorflowOptimizers"], [165, 2, 1, "", "optimizer_registry"]], "neural_compressor.compression.distillation.utility": [[166, 2, 1, "", "get_activation"], [166, 2, 1, "", "record_output"]], "neural_compressor.compression.hpo": [[168, 0, 0, "-", "sa_optimizer"]], "neural_compressor.compression.pruner": [[169, 0, 0, "-", "criteria"], [172, 0, 0, "-", "model_slim"], [176, 0, 0, "-", "patterns"], [170, 2, 1, "", "prepare_pruning"], [183, 0, 0, "-", "pruners"], [188, 0, 0, "-", "pruning"], [189, 0, 0, "-", "regs"], [170, 2, 1, "", "save"], [190, 0, 0, "-", "schedulers"], [191, 0, 0, "-", "tf_criteria"], [192, 0, 0, "-", "utils"], [193, 0, 0, "-", "wanda"]], "neural_compressor.compression.pruner.criteria": [[169, 1, 1, "", "BlockMaskCriterion"], [169, 1, 1, "", "GradientCriterion"], [169, 1, 1, "", "MagnitudeCriterion"], [169, 1, 1, "", "PruningCriterion"], [169, 1, 1, "", "RetrainFreeCriterion"], [169, 1, 1, "", "SnipCriterion"], [169, 1, 1, "", "SnipMomentumCriterion"], [169, 2, 1, "", "get_criterion"], [169, 2, 1, "", "register_criterion"]], "neural_compressor.compression.pruner.criteria.BlockMaskCriterion": [[169, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.criteria.GradientCriterion": [[169, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.criteria.MagnitudeCriterion": [[169, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.criteria.PruningCriterion": [[169, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.criteria.RetrainFreeCriterion": [[169, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.criteria.SnipCriterion": [[169, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.criteria.SnipMomentumCriterion": [[169, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.model_slim": [[171, 0, 0, "-", "auto_slim"], [173, 0, 0, "-", "pattern_analyzer"], [174, 0, 0, "-", "weight_slim"]], "neural_compressor.compression.pruner.model_slim.auto_slim": [[171, 2, 1, "", "generate_ffn2_pruning_config"], [171, 2, 1, "", "generate_mha_pruning_config"], [171, 2, 1, "", "model_slim"], [171, 2, 1, "", "model_slim_ffn2"], [171, 2, 1, "", "model_slim_mha"], [171, 2, 1, "", "parse_auto_slim_config"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer": [[173, 1, 1, "", "ClassifierHeadSearcher"], [173, 1, 1, "", "ClassifierHeadSearcherTF"], [173, 1, 1, "", "JitBasicSearcher"], [173, 1, 1, "", "Linear2LinearSearcher"], [173, 1, 1, "", "RecipeSearcher"], [173, 1, 1, "", "SelfMHASearcher"], [173, 2, 1, "", "get_attributes"], [173, 2, 1, "", "get_common_module"], [173, 2, 1, "", "print_iterables"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcher": [[173, 4, 1, "", "device"], [173, 4, 1, "", "flatten_static_graph"], [173, 4, 1, "", "model"], [173, 4, 1, "", "static_graph"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcherTF": [[173, 4, 1, "", "device"], [173, 4, 1, "", "flatten_static_graph"], [173, 4, 1, "", "model"], [173, 4, 1, "", "static_graph"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher": [[173, 4, 1, "", "device"], [173, 4, 1, "", "flatten_static_graph"], [173, 4, 1, "", "model"], [173, 4, 1, "", "searching_results"], [173, 4, 1, "", "static_graph"], [173, 4, 1, "", "target_layers"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher": [[173, 4, 1, "", "current_pattern"], [173, 4, 1, "", "device"], [173, 4, 1, "", "flatten_static_graph"], [173, 4, 1, "", "model"], [173, 4, 1, "", "searching_results"], [173, 4, 1, "", "static_graph"], [173, 4, 1, "", "target_layers"], [173, 4, 1, "", "target_op_lut"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer.RecipeSearcher": [[173, 4, 1, "", "model"], [173, 4, 1, "", "recipe"], [173, 4, 1, "", "searching_results"], [173, 4, 1, "", "targets"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer.SelfMHASearcher": [[173, 4, 1, "", "device"], [173, 4, 1, "", "flatten_static_graph"], [173, 4, 1, "", "model"], [173, 4, 1, "", "static_graph"]], "neural_compressor.compression.pruner.model_slim.weight_slim": [[174, 1, 1, "", "LinearCompression"], [174, 1, 1, "", "LinearCompressionIterator"], [174, 1, 1, "", "PostCompressionUtils"]], "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompression": [[174, 4, 1, "", "device"], [174, 4, 1, "", "layer_1"], [174, 4, 1, "", "layer_2"]], "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompressionIterator": [[174, 4, 1, "", "linear_patterns"]], "neural_compressor.compression.pruner.patterns": [[175, 0, 0, "-", "base"], [176, 2, 1, "", "get_pattern"], [177, 0, 0, "-", "mha"], [178, 0, 0, "-", "ninm"], [179, 0, 0, "-", "nxm"]], "neural_compressor.compression.pruner.patterns.base": [[175, 1, 1, "", "BasePattern"], [175, 1, 1, "", "KerasBasePattern"], [175, 1, 1, "", "PytorchBasePattern"], [175, 2, 1, "", "register_pattern"]], "neural_compressor.compression.pruner.patterns.base.BasePattern": [[175, 4, 1, "", "config"], [175, 4, 1, "", "invalid_layers"], [175, 4, 1, "", "is_global"], [175, 4, 1, "", "keep_mask_layers"], [175, 4, 1, "", "max_sparsity_ratio_per_op"], [175, 4, 1, "", "min_sparsity_ratio_per_op"], [175, 4, 1, "", "modules"], [175, 4, 1, "", "pattern"], [175, 4, 1, "", "target_sparsity"]], "neural_compressor.compression.pruner.patterns.base.KerasBasePattern": [[175, 4, 1, "", "config"], [175, 4, 1, "", "invalid_layers"], [175, 4, 1, "", "is_global"], [175, 4, 1, "", "keep_mask_layers"], [175, 4, 1, "", "max_sparsity_ratio_per_op"], [175, 4, 1, "", "min_sparsity_ratio_per_op"], [175, 4, 1, "", "modules"], [175, 4, 1, "", "pattern"], [175, 4, 1, "", "target_sparsity"]], "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern": [[175, 4, 1, "", "config"], [175, 4, 1, "", "invalid_layers"], [175, 4, 1, "", "is_global"], [175, 4, 1, "", "keep_mask_layers"], [175, 4, 1, "", "max_sparsity_ratio_per_op"], [175, 4, 1, "", "min_sparsity_ratio_per_op"], [175, 4, 1, "", "modules"], [175, 4, 1, "", "pattern"], [175, 4, 1, "", "target_sparsity"]], "neural_compressor.compression.pruner.patterns.mha": [[177, 1, 1, "", "PatternMHA"]], "neural_compressor.compression.pruner.patterns.mha.PatternMHA": [[177, 4, 1, "", "M"], [177, 4, 1, "", "N"]], "neural_compressor.compression.pruner.patterns.ninm": [[178, 1, 1, "", "PytorchPatternNInM"]], "neural_compressor.compression.pruner.patterns.ninm.PytorchPatternNInM": [[178, 4, 1, "", "M"], [178, 4, 1, "", "N"]], "neural_compressor.compression.pruner.patterns.nxm": [[179, 1, 1, "", "KerasPatternNxM"], [179, 1, 1, "", "PytorchPatternNxM"]], "neural_compressor.compression.pruner.patterns.nxm.KerasPatternNxM": [[179, 4, 1, "", "block_size"]], "neural_compressor.compression.pruner.patterns.nxm.PytorchPatternNxM": [[179, 4, 1, "", "block_size"]], "neural_compressor.compression.pruner.pruners": [[180, 0, 0, "-", "base"], [181, 0, 0, "-", "basic"], [182, 0, 0, "-", "block_mask"], [183, 2, 1, "", "get_pruner"], [184, 0, 0, "-", "mha"], [183, 2, 1, "", "parse_valid_pruner_types"], [185, 0, 0, "-", "pattern_lock"], [186, 0, 0, "-", "progressive"], [187, 0, 0, "-", "retrain_free"]], "neural_compressor.compression.pruner.pruners.base": [[180, 1, 1, "", "BasePruner"], [180, 1, 1, "", "KerasBasePruner"], [180, 1, 1, "", "PytorchBasePruner"], [180, 2, 1, "", "register_pruner"]], "neural_compressor.compression.pruner.pruners.base.BasePruner": [[180, 4, 1, "", "config"], [180, 4, 1, "", "current_sparsity_ratio"], [180, 4, 1, "", "end_step"], [180, 4, 1, "", "global_step"], [180, 4, 1, "", "masks"], [180, 4, 1, "", "max_sparsity_ratio_per_op"], [180, 4, 1, "", "modules"], [180, 4, 1, "", "pattern"], [180, 4, 1, "", "pruning_frequency"], [180, 4, 1, "", "scheduler"], [180, 4, 1, "", "scores"], [180, 4, 1, "", "start_step"], [180, 4, 1, "", "target_sparsity_ratio"]], "neural_compressor.compression.pruner.pruners.base.KerasBasePruner": [[180, 4, 1, "", "config"], [180, 4, 1, "", "current_sparsity_ratio"], [180, 4, 1, "", "end_step"], [180, 4, 1, "", "global_step"], [180, 4, 1, "", "masks"], [180, 4, 1, "", "max_sparsity_ratio_per_op"], [180, 4, 1, "", "modules"], [180, 4, 1, "", "pattern"], [180, 4, 1, "", "pruning_frequency"], [180, 4, 1, "", "scheduler"], [180, 4, 1, "", "scores"], [180, 4, 1, "", "start_step"], [180, 4, 1, "", "target_sparsity_ratio"]], "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner": [[180, 4, 1, "", "config"], [180, 4, 1, "", "current_sparsity_ratio"], [180, 4, 1, "", "end_step"], [180, 4, 1, "", "global_step"], [180, 4, 1, "", "masks"], [180, 4, 1, "", "max_sparsity_ratio_per_op"], [180, 4, 1, "", "modules"], [180, 4, 1, "", "pattern"], [180, 4, 1, "", "pruning_frequency"], [180, 4, 1, "", "scheduler"], [180, 4, 1, "", "scores"], [180, 4, 1, "", "start_step"], [180, 4, 1, "", "target_sparsity_ratio"]], "neural_compressor.compression.pruner.pruners.basic": [[181, 1, 1, "", "KerasBasicPruner"], [181, 1, 1, "", "PytorchBasicPruner"]], "neural_compressor.compression.pruner.pruners.basic.KerasBasicPruner": [[181, 4, 1, "", "criterion"], [181, 4, 1, "", "pattern"], [181, 4, 1, "", "reg"], [181, 4, 1, "", "scheduler"]], "neural_compressor.compression.pruner.pruners.basic.PytorchBasicPruner": [[181, 4, 1, "", "criterion"], [181, 4, 1, "", "pattern"], [181, 4, 1, "", "reg"], [181, 4, 1, "", "scheduler"]], "neural_compressor.compression.pruner.pruners.block_mask": [[182, 1, 1, "", "PytorchBlockMaskPruner"]], "neural_compressor.compression.pruner.pruners.block_mask.PytorchBlockMaskPruner": [[182, 4, 1, "", "criterion"], [182, 4, 1, "", "pattern"], [182, 4, 1, "", "reg"], [182, 4, 1, "", "scheduler"]], "neural_compressor.compression.pruner.pruners.mha": [[184, 1, 1, "", "PythonMultiheadAttentionPruner"]], "neural_compressor.compression.pruner.pruners.mha.PythonMultiheadAttentionPruner": [[184, 4, 1, "", "head_masks"], [184, 4, 1, "", "linear_layers"], [184, 4, 1, "", "mha_compressions"], [184, 4, 1, "", "mha_scores"]], "neural_compressor.compression.pruner.pruners.pattern_lock": [[185, 1, 1, "", "PytorchPatternLockPruner"]], "neural_compressor.compression.pruner.pruners.progressive": [[186, 1, 1, "", "PytorchProgressivePruner"]], "neural_compressor.compression.pruner.pruners.retrain_free": [[187, 1, 1, "", "PytorchRetrainFreePruner"]], "neural_compressor.compression.pruner.pruners.retrain_free.PytorchRetrainFreePruner": [[187, 4, 1, "", "criterion"], [187, 4, 1, "", "pattern"], [187, 4, 1, "", "reg"], [187, 4, 1, "", "scheduler"]], "neural_compressor.compression.pruner.pruning": [[188, 1, 1, "", "BasePruning"], [188, 1, 1, "", "BasicPruning"], [188, 1, 1, "", "RetrainFreePruning"], [188, 1, 1, "", "SparseGPTPruning"], [188, 2, 1, "", "register_pruning"]], "neural_compressor.compression.pruner.pruning.BasePruning": [[188, 4, 1, "", "config_file_path"], [188, 4, 1, "", "model"], [188, 4, 1, "", "pruner_info"], [188, 4, 1, "", "pruners"]], "neural_compressor.compression.pruner.pruning.BasicPruning": [[188, 4, 1, "", "config_file_path"], [188, 4, 1, "", "model"], [188, 4, 1, "", "pruner_info"], [188, 4, 1, "", "pruners"]], "neural_compressor.compression.pruner.pruning.RetrainFreePruning": [[188, 4, 1, "", "config_file_path"], [188, 4, 1, "", "model"], [188, 4, 1, "", "pruner_info"], [188, 4, 1, "", "pruners"]], "neural_compressor.compression.pruner.regs": [[189, 1, 1, "", "BaseReg"], [189, 1, 1, "", "GroupLasso"], [189, 2, 1, "", "get_reg"], [189, 2, 1, "", "get_reg_type"], [189, 2, 1, "", "register_reg"]], "neural_compressor.compression.pruner.regs.GroupLasso": [[189, 4, 1, "", "alpha"], [189, 4, 1, "", "reg_terms"]], "neural_compressor.compression.pruner.schedulers": [[190, 1, 1, "", "IterativeScheduler"], [190, 1, 1, "", "OneshotScheduler"], [190, 1, 1, "", "PruningScheduler"], [190, 2, 1, "", "get_scheduler"], [190, 2, 1, "", "register_scheduler"]], "neural_compressor.compression.pruner.schedulers.PruningScheduler": [[190, 4, 1, "", "config"]], "neural_compressor.compression.pruner.tf_criteria": [[191, 1, 1, "", "MagnitudeCriterion"], [191, 1, 1, "", "PruningCriterion"], [191, 2, 1, "", "get_tf_criterion"], [191, 2, 1, "", "register_criterion"]], "neural_compressor.compression.pruner.tf_criteria.MagnitudeCriterion": [[191, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.tf_criteria.PruningCriterion": [[191, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.utils": [[192, 2, 1, "", "check_config"], [192, 2, 1, "", "check_key_validity"], [192, 2, 1, "", "collect_layer_inputs"], [192, 2, 1, "", "get_layers"], [192, 2, 1, "", "get_sparsity_ratio"], [192, 2, 1, "", "get_sparsity_ratio_tf"], [192, 2, 1, "", "parse_last_linear"], [192, 2, 1, "", "parse_last_linear_tf"], [192, 2, 1, "", "parse_to_prune"], [192, 2, 1, "", "parse_to_prune_tf"], [192, 2, 1, "", "process_and_check_config"], [192, 2, 1, "", "process_config"], [192, 2, 1, "", "process_weight_config"], [192, 2, 1, "", "process_yaml_config"], [192, 2, 1, "", "reset_none_to_default"], [192, 2, 1, "", "update_params"]], "neural_compressor.compression.pruner.wanda": [[194, 0, 0, "-", "utils"]], "neural_compressor.compression.pruner.wanda.utils": [[194, 2, 1, "", "find_layers"]], "neural_compressor.config": [[195, 1, 1, "", "AccuracyCriterion"], [195, 1, 1, "", "BenchmarkConfig"], [195, 1, 1, "", "DistillationConfig"], [195, 1, 1, "", "DotDict"], [195, 1, 1, "", "ExportConfig"], [195, 1, 1, "", "HPOConfig"], [195, 1, 1, "", "IntermediateLayersKnowledgeDistillationLossConfig"], [195, 1, 1, "", "Keras"], [195, 1, 1, "", "KnowledgeDistillationLossConfig"], [195, 1, 1, "", "MXNet"], [195, 1, 1, "", "MixedPrecisionConfig"], [195, 1, 1, "", "NASConfig"], [195, 1, 1, "", "ONNX"], [195, 1, 1, "", "ONNXQlinear2QDQConfig"], [195, 1, 1, "", "Options"], [195, 1, 1, "", "PostTrainingQuantConfig"], [195, 1, 1, "", "PyTorch"], [195, 1, 1, "", "QuantizationAwareTrainingConfig"], [195, 1, 1, "", "SelfKnowledgeDistillationLossConfig"], [195, 1, 1, "", "TF2ONNXConfig"], [195, 1, 1, "", "TensorFlow"], [195, 1, 1, "", "Torch2ONNXConfig"], [195, 1, 1, "", "TuningCriterion"], [195, 1, 1, "", "WeightPruningConfig"]], "neural_compressor.contrib": [[197, 0, 0, "-", "strategy"]], "neural_compressor.contrib.strategy": [[198, 0, 0, "-", "sigopt"], [199, 0, 0, "-", "tpe"]], "neural_compressor.contrib.strategy.sigopt": [[198, 1, 1, "", "SigOptTuneStrategy"]], "neural_compressor.contrib.strategy.tpe": [[199, 1, 1, "", "TpeTuneStrategy"]], "neural_compressor.data": [[215, 0, 0, "-", "datasets"], [219, 0, 0, "-", "filters"], [222, 0, 0, "-", "transforms"]], "neural_compressor.data.dataloaders": [[200, 0, 0, "-", "base_dataloader"], [201, 0, 0, "-", "dataloader"], [202, 0, 0, "-", "default_dataloader"], [203, 0, 0, "-", "fetcher"], [204, 0, 0, "-", "mxnet_dataloader"], [205, 0, 0, "-", "onnxrt_dataloader"], [206, 0, 0, "-", "pytorch_dataloader"], [207, 0, 0, "-", "sampler"], [208, 0, 0, "-", "tensorflow_dataloader"]], "neural_compressor.data.dataloaders.base_dataloader": [[200, 1, 1, "", "BaseDataLoader"]], "neural_compressor.data.dataloaders.dataloader": [[201, 1, 1, "", "DataLoader"], [201, 2, 1, "", "check_dataloader"]], "neural_compressor.data.dataloaders.default_dataloader": [[202, 1, 1, "", "DefaultDataLoader"], [202, 2, 1, "", "default_collate"]], "neural_compressor.data.dataloaders.fetcher": [[203, 1, 1, "", "Fetcher"], [203, 1, 1, "", "IndexFetcher"], [203, 1, 1, "", "IterableFetcher"]], "neural_compressor.data.dataloaders.mxnet_dataloader": [[204, 1, 1, "", "MXNetDataLoader"]], "neural_compressor.data.dataloaders.onnxrt_dataloader": [[205, 1, 1, "", "ONNXRTBertDataLoader"], [205, 1, 1, "", "ONNXRTDataLoader"]], "neural_compressor.data.dataloaders.pytorch_dataloader": [[206, 1, 1, "", "PyTorchDataLoader"]], "neural_compressor.data.dataloaders.sampler": [[207, 1, 1, "", "BatchSampler"], [207, 1, 1, "", "IterableSampler"], [207, 1, 1, "", "Sampler"], [207, 1, 1, "", "SequentialSampler"]], "neural_compressor.data.dataloaders.tensorflow_dataloader": [[208, 1, 1, "", "TFDataDataLoader"], [208, 1, 1, "", "TensorflowBertDataLoader"], [208, 1, 1, "", "TensorflowDataLoader"], [208, 1, 1, "", "TensorflowModelZooBertDataLoader"]], "neural_compressor.data.datasets": [[209, 0, 0, "-", "bert_dataset"], [210, 0, 0, "-", "coco_dataset"], [211, 0, 0, "-", "dataset"], [212, 0, 0, "-", "dummy_dataset"], [213, 0, 0, "-", "dummy_dataset_v2"], [214, 0, 0, "-", "imagenet_dataset"], [216, 0, 0, "-", "style_transfer_dataset"]], "neural_compressor.data.datasets.bert_dataset": [[209, 1, 1, "", "InputFeatures"], [209, 1, 1, "", "ONNXRTBertDataset"], [209, 1, 1, "", "ParseDecodeBert"], [209, 1, 1, "", "PytorchBertDataset"], [209, 1, 1, "", "TensorflowBertDataset"], [209, 1, 1, "", "TensorflowModelZooBertDataset"], [209, 2, 1, "", "convert_examples_to_features"], [209, 2, 1, "", "load_and_cache_examples"]], "neural_compressor.data.datasets.coco_dataset": [[210, 1, 1, "", "COCONpy"], [210, 1, 1, "", "COCORaw"], [210, 1, 1, "", "COCORecordDataset"], [210, 1, 1, "", "ParseDecodeCoco"]], "neural_compressor.data.datasets.dataset": [[211, 1, 1, "", "CIFAR10"], [211, 1, 1, "", "CIFAR100"], [211, 1, 1, "", "Dataset"], [211, 1, 1, "", "Datasets"], [211, 1, 1, "", "FashionMNIST"], [211, 1, 1, "", "ImageFolder"], [211, 1, 1, "", "IterableDataset"], [211, 1, 1, "", "MNIST"], [211, 1, 1, "", "MXNetCIFAR10"], [211, 1, 1, "", "MXNetCIFAR100"], [211, 1, 1, "", "MXNetDatasets"], [211, 1, 1, "", "MXNetFashionMNIST"], [211, 1, 1, "", "MXNetImageFolder"], [211, 1, 1, "", "MXNetMNIST"], [211, 1, 1, "", "ONNXRTITDatasets"], [211, 1, 1, "", "ONNXRTQLDatasets"], [211, 1, 1, "", "PyTorchDatasets"], [211, 1, 1, "", "PytorchCIFAR10"], [211, 1, 1, "", "PytorchCIFAR100"], [211, 1, 1, "", "PytorchFashionMNIST"], [211, 1, 1, "", "PytorchMNIST"], [211, 1, 1, "", "PytorchMxnetWrapDataset"], [211, 1, 1, "", "PytorchMxnetWrapFunction"], [211, 1, 1, "", "Tensorflow"], [211, 1, 1, "", "TensorflowCIFAR10"], [211, 1, 1, "", "TensorflowCIFAR100"], [211, 1, 1, "", "TensorflowDatasets"], [211, 1, 1, "", "TensorflowFashionMNIST"], [211, 1, 1, "", "TensorflowImageRecord"], [211, 1, 1, "", "TensorflowMNIST"], [211, 1, 1, "", "TensorflowTFRecordDataset"], [211, 1, 1, "", "TensorflowVOCRecord"], [211, 2, 1, "", "calculate_md5"], [211, 2, 1, "", "check_integrity"], [211, 2, 1, "", "dataset_registry"], [211, 2, 1, "", "download_url"], [211, 5, 1, "", "framework_datasets"], [211, 2, 1, "", "gen_bar_updater"]], "neural_compressor.data.datasets.dummy_dataset": [[212, 1, 1, "", "DummyDataset"]], "neural_compressor.data.datasets.dummy_dataset_v2": [[213, 1, 1, "", "DummyDataset"], [213, 1, 1, "", "SparseDummyDataset"]], "neural_compressor.data.datasets.imagenet_dataset": [[214, 1, 1, "", "ImagenetRaw"], [214, 1, 1, "", "MXNetImagenetRaw"], [214, 1, 1, "", "ONNXRTImagenetDataset"], [214, 1, 1, "", "PytorchImagenetRaw"], [214, 1, 1, "", "TensorflowImagenetDataset"], [214, 1, 1, "", "TensorflowImagenetRaw"]], "neural_compressor.data.datasets.style_transfer_dataset": [[216, 1, 1, "", "StyleTransferDataset"]], "neural_compressor.data.filters": [[217, 0, 0, "-", "coco_filter"], [218, 0, 0, "-", "filter"]], "neural_compressor.data.filters.coco_filter": [[217, 1, 1, "", "LabelBalanceCOCORawFilter"], [217, 1, 1, "", "LabelBalanceCOCORecordFilter"]], "neural_compressor.data.filters.filter": [[218, 1, 1, "", "FILTERS"], [218, 1, 1, "", "Filter"], [218, 1, 1, "", "MXNetFilters"], [218, 1, 1, "", "ONNXRTITFilters"], [218, 1, 1, "", "ONNXRTQLFilters"], [218, 1, 1, "", "PyTorchFilters"], [218, 1, 1, "", "TensorflowFilters"], [218, 2, 1, "", "filter_registry"]], "neural_compressor.data.transforms": [[221, 0, 0, "-", "imagenet_transform"], [223, 0, 0, "-", "postprocess"], [224, 0, 0, "-", "tokenization"], [225, 0, 0, "-", "transform"]], "neural_compressor.data.transforms.imagenet_transform": [[221, 1, 1, "", "BilinearImagenetTransform"], [221, 1, 1, "", "LabelShift"], [221, 1, 1, "", "ONNXResizeCropImagenetTransform"], [221, 1, 1, "", "OnnxBilinearImagenetTransform"], [221, 1, 1, "", "ParseDecodeImagenet"], [221, 1, 1, "", "ParseDecodeImagenetTransform"], [221, 1, 1, "", "QuantizedInput"], [221, 1, 1, "", "ResizeWithAspectRatio"], [221, 1, 1, "", "TensorflowResizeCropImagenetTransform"], [221, 1, 1, "", "TensorflowShiftRescale"], [221, 1, 1, "", "TensorflowTransposeLastChannel"]], "neural_compressor.data.transforms.postprocess": [[223, 1, 1, "", "Postprocess"]], "neural_compressor.data.transforms.tokenization": [[224, 1, 1, "", "BasicTokenizer"], [224, 1, 1, "", "FullTokenizer"], [224, 1, 1, "", "WordpieceTokenizer"], [224, 2, 1, "", "convert_by_vocab"], [224, 2, 1, "", "convert_to_unicode"], [224, 2, 1, "", "load_vocab"], [224, 2, 1, "", "whitespace_tokenize"]], "neural_compressor.data.transforms.transform": [[225, 1, 1, "", "AlignImageChannelTransform"], [225, 1, 1, "", "BaseTransform"], [225, 1, 1, "", "CastONNXTransform"], [225, 1, 1, "", "CastPyTorchTransform"], [225, 1, 1, "", "CastTFTransform"], [225, 1, 1, "", "CenterCropTFTransform"], [225, 1, 1, "", "CenterCropTransform"], [225, 1, 1, "", "CollectTransform"], [225, 1, 1, "", "ComposeTransform"], [225, 1, 1, "", "CropResizeTFTransform"], [225, 1, 1, "", "CropResizeTransform"], [225, 1, 1, "", "CropToBoundingBox"], [225, 1, 1, "", "InputFeatures"], [225, 1, 1, "", "MXNetCropResizeTransform"], [225, 1, 1, "", "MXNetCropToBoundingBox"], [225, 1, 1, "", "MXNetNormalizeTransform"], [225, 1, 1, "", "MXNetTransforms"], [225, 1, 1, "", "MXNetTranspose"], [225, 1, 1, "", "NormalizeTFTransform"], [225, 1, 1, "", "NormalizeTransform"], [225, 1, 1, "", "ONNXRTCropToBoundingBox"], [225, 1, 1, "", "ONNXRTITTransforms"], [225, 1, 1, "", "ONNXRTQLTransforms"], [225, 1, 1, "", "PaddedCenterCropTransform"], [225, 1, 1, "", "ParseDecodeVocTransform"], [225, 1, 1, "", "PyTorchAlignImageChannel"], [225, 1, 1, "", "PyTorchCropResizeTransform"], [225, 1, 1, "", "PyTorchNormalizeTransform"], [225, 1, 1, "", "PyTorchTransforms"], [225, 1, 1, "", "PyTorchTranspose"], [225, 1, 1, "", "PytorchMxnetTransform"], [225, 1, 1, "", "PytorchMxnetWrapFunction"], [225, 1, 1, "", "RandomCropTFTransform"], [225, 1, 1, "", "RandomCropTransform"], [225, 1, 1, "", "RandomHorizontalFlip"], [225, 1, 1, "", "RandomResizedCropMXNetTransform"], [225, 1, 1, "", "RandomResizedCropPytorchTransform"], [225, 1, 1, "", "RandomResizedCropTFTransform"], [225, 1, 1, "", "RandomResizedCropTransform"], [225, 1, 1, "", "RandomVerticalFlip"], [225, 1, 1, "", "RescaleKerasPretrainTransform"], [225, 1, 1, "", "RescaleTFTransform"], [225, 1, 1, "", "RescaleTransform"], [225, 1, 1, "", "ResizeMXNetTransform"], [225, 1, 1, "", "ResizePytorchTransform"], [225, 1, 1, "", "ResizeTFTransform"], [225, 1, 1, "", "ResizeTransform"], [225, 1, 1, "", "ResizeWithRatio"], [225, 1, 1, "", "SquadExample"], [225, 1, 1, "", "TFModelZooCollectTransform"], [225, 1, 1, "", "TFSquadV1ModelZooPostTransform"], [225, 1, 1, "", "TFSquadV1PostTransform"], [225, 1, 1, "", "TRANSFORMS"], [225, 1, 1, "", "TensorflowCropToBoundingBox"], [225, 1, 1, "", "TensorflowRandomHorizontalFlip"], [225, 1, 1, "", "TensorflowRandomVerticalFlip"], [225, 1, 1, "", "TensorflowResizeWithRatio"], [225, 1, 1, "", "TensorflowTransform"], [225, 1, 1, "", "TensorflowTransforms"], [225, 1, 1, "", "TensorflowTranspose"], [225, 1, 1, "", "TensorflowWrapFunction"], [225, 1, 1, "", "ToArray"], [225, 1, 1, "", "ToNDArrayTransform"], [225, 1, 1, "", "Transforms"], [225, 1, 1, "", "Transpose"], [225, 2, 1, "", "convert_examples_to_features"], [225, 2, 1, "", "get_final_text"], [225, 2, 1, "", "get_torchvision_map"], [225, 2, 1, "", "read_squad_examples"], [225, 2, 1, "", "transform_registry"]], "neural_compressor.metric": [[227, 0, 0, "-", "bleu"], [228, 0, 0, "-", "bleu_util"], [229, 0, 0, "-", "coco_label_map"], [230, 0, 0, "-", "coco_tools"], [231, 0, 0, "-", "evaluate_squad"], [232, 0, 0, "-", "f1"], [234, 0, 0, "-", "metric"]], "neural_compressor.metric.bleu": [[227, 1, 1, "", "BLEU"], [227, 1, 1, "", "UnicodeRegex"], [227, 2, 1, "", "bleu_tokenize"]], "neural_compressor.metric.bleu.BLEU": [[227, 4, 1, "", "labels"], [227, 4, 1, "", "predictions"]], "neural_compressor.metric.bleu.UnicodeRegex": [[227, 4, 1, "", "nondigit_punct_re"], [227, 4, 1, "", "punct_nondigit_re"], [227, 4, 1, "", "symbol_re"]], "neural_compressor.metric.bleu_util": [[228, 2, 1, "", "compute_bleu"]], "neural_compressor.metric.coco_tools": [[230, 1, 1, "", "COCOEvalWrapper"], [230, 1, 1, "", "COCOWrapper"], [230, 2, 1, "", "ExportSingleImageDetectionBoxesToCoco"], [230, 2, 1, "", "ExportSingleImageDetectionMasksToCoco"], [230, 2, 1, "", "ExportSingleImageGroundtruthToCoco"]], "neural_compressor.metric.coco_tools.COCOWrapper": [[230, 4, 1, "", "dataset"], [230, 4, 1, "", "detection_type"]], "neural_compressor.metric.evaluate_squad": [[231, 2, 1, "", "evaluate"], [231, 2, 1, "", "exact_match_score"], [231, 2, 1, "", "f1_score"], [231, 2, 1, "", "metric_max_over_ground_truths"]], "neural_compressor.metric.f1": [[232, 2, 1, "", "evaluate"], [232, 2, 1, "", "f1_score"], [232, 2, 1, "", "metric_max_over_ground_truths"], [232, 2, 1, "", "normalize_answer"]], "neural_compressor.metric.metric": [[234, 1, 1, "", "Accuracy"], [234, 1, 1, "", "BaseMetric"], [234, 1, 1, "", "COCOmAPv2"], [234, 1, 1, "", "F1"], [234, 1, 1, "", "GeneralTopK"], [234, 1, 1, "", "Loss"], [234, 1, 1, "", "MAE"], [234, 1, 1, "", "METRICS"], [234, 1, 1, "", "MSE"], [234, 1, 1, "", "MXNetMetrics"], [234, 1, 1, "", "Metric"], [234, 1, 1, "", "ONNXRTGLUE"], [234, 1, 1, "", "ONNXRTITMetrics"], [234, 1, 1, "", "ONNXRTQLMetrics"], [234, 1, 1, "", "PyTorchLoss"], [234, 1, 1, "", "PyTorchMetrics"], [234, 1, 1, "", "RMSE"], [234, 1, 1, "", "ROC"], [234, 1, 1, "", "SquadF1"], [234, 1, 1, "", "TensorflowCOCOMAP"], [234, 1, 1, "", "TensorflowMAP"], [234, 1, 1, "", "TensorflowMetrics"], [234, 1, 1, "", "TensorflowTopK"], [234, 1, 1, "", "TensorflowVOCMAP"], [234, 1, 1, "", "WrapMXNetMetric"], [234, 1, 1, "", "WrapONNXRTMetric"], [234, 1, 1, "", "WrapPyTorchMetric"], [234, 1, 1, "", "mIOU"], [234, 2, 1, "", "metric_registry"], [234, 2, 1, "", "register_customer_metric"]], "neural_compressor.metric.metric.Accuracy": [[234, 4, 1, "", "label_list"], [234, 4, 1, "", "pred_list"], [234, 4, 1, "", "sample"]], "neural_compressor.metric.metric.GeneralTopK": [[234, 4, 1, "", "k"], [234, 4, 1, "", "num_correct"], [234, 4, 1, "", "num_sample"]], "neural_compressor.metric.metric.Loss": [[234, 4, 1, "", "sample"], [234, 4, 1, "", "sum"]], "neural_compressor.metric.metric.MAE": [[234, 4, 1, "", "compare_label"], [234, 4, 1, "", "label_list"], [234, 4, 1, "", "pred_list"]], "neural_compressor.metric.metric.METRICS": [[234, 4, 1, "", "metrics"]], "neural_compressor.metric.metric.MSE": [[234, 4, 1, "", "compare_label"], [234, 4, 1, "", "label_list"], [234, 4, 1, "", "pred_list"]], "neural_compressor.metric.metric.MXNetMetrics": [[234, 4, 1, "", "metrics"]], "neural_compressor.metric.metric.ONNXRTITMetrics": [[234, 4, 1, "", "metrics"]], "neural_compressor.metric.metric.ONNXRTQLMetrics": [[234, 4, 1, "", "metrics"]], "neural_compressor.metric.metric.PyTorchMetrics": [[234, 4, 1, "", "metrics"]], "neural_compressor.metric.metric.RMSE": [[234, 4, 1, "", "mse"]], "neural_compressor.metric.metric.TensorflowMetrics": [[234, 4, 1, "", "metrics"]], "neural_compressor.metric.metric.TensorflowTopK": [[234, 4, 1, "", "k"], [234, 4, 1, "", "num_correct"], [234, 4, 1, "", "num_sample"]], "neural_compressor.mix_precision": [[235, 2, 1, "", "fit"]], "neural_compressor.model": [[236, 0, 0, "-", "base_model"], [238, 0, 0, "-", "keras_model"], [239, 0, 0, "-", "model"], [240, 0, 0, "-", "mxnet_model"], [241, 0, 0, "-", "nets_factory"], [242, 0, 0, "-", "onnx_model"], [243, 0, 0, "-", "tensorflow_model"], [244, 0, 0, "-", "torch_model"]], "neural_compressor.model.base_model": [[236, 1, 1, "", "BaseModel"]], "neural_compressor.model.keras_model": [[238, 1, 1, "", "KerasModel"]], "neural_compressor.model.model": [[239, 1, 1, "", "Model"], [239, 2, 1, "", "get_model_fwk_name"]], "neural_compressor.model.mxnet_model": [[240, 1, 1, "", "MXNetModel"]], "neural_compressor.model.nets_factory": [[241, 1, 1, "", "TFSlimNetsFactory"]], "neural_compressor.model.onnx_model": [[242, 1, 1, "", "ONNXModel"]], "neural_compressor.model.tensorflow_model": [[243, 1, 1, "", "TensorflowBaseModel"], [243, 1, 1, "", "TensorflowCheckpointModel"], [243, 1, 1, "", "TensorflowLLMModel"], [243, 1, 1, "", "TensorflowModel"], [243, 1, 1, "", "TensorflowQATModel"], [243, 1, 1, "", "TensorflowSavedModelModel"], [243, 2, 1, "", "checkpoint_session"], [243, 2, 1, "", "estimator_session"], [243, 2, 1, "", "frozen_pb_session"], [243, 2, 1, "", "get_model_type"], [243, 2, 1, "", "graph_def_session"], [243, 2, 1, "", "graph_session"], [243, 2, 1, "", "keras_session"], [243, 2, 1, "", "load_saved_model"], [243, 2, 1, "", "saved_model_session"], [243, 2, 1, "", "slim_session"], [243, 2, 1, "", "try_loading_keras"], [243, 2, 1, "", "validate_and_inference_input_output"], [243, 2, 1, "", "validate_graph_node"]], "neural_compressor.model.torch_model": [[244, 1, 1, "", "IPEXModel"], [244, 1, 1, "", "PyTorchBaseModel"], [244, 1, 1, "", "PyTorchFXModel"], [244, 1, 1, "", "PyTorchModel"]], "neural_compressor.objective": [[245, 1, 1, "", "Accuracy"], [245, 1, 1, "", "Footprint"], [245, 1, 1, "", "ModelSize"], [245, 1, 1, "", "MultiObjective"], [245, 1, 1, "", "Objective"], [245, 1, 1, "", "Performance"], [245, 2, 1, "", "objective_custom_registry"], [245, 2, 1, "", "objective_registry"]], "neural_compressor.profiling.parser": [[247, 0, 0, "-", "factory"], [250, 0, 0, "-", "parser"], [251, 0, 0, "-", "result"]], "neural_compressor.profiling.parser.factory": [[247, 1, 1, "", "ParserFactory"]], "neural_compressor.profiling.parser.onnx_parser": [[248, 0, 0, "-", "factory"], [249, 0, 0, "-", "parser"]], "neural_compressor.profiling.parser.onnx_parser.factory": [[248, 1, 1, "", "OnnxrtParserFactory"]], "neural_compressor.profiling.parser.onnx_parser.parser": [[249, 1, 1, "", "OnnxProfilingParser"]], "neural_compressor.profiling.parser.parser": [[250, 1, 1, "", "ProfilingParser"]], "neural_compressor.profiling.parser.result": [[251, 1, 1, "", "ProfilingResult"]], "neural_compressor.profiling.parser.tensorflow_parser": [[252, 0, 0, "-", "factory"], [253, 0, 0, "-", "parser"]], "neural_compressor.profiling.parser.tensorflow_parser.factory": [[252, 1, 1, "", "TensorFlowParserFactory"]], "neural_compressor.profiling.parser.tensorflow_parser.parser": [[253, 1, 1, "", "TensorFlowProfilingParser"]], "neural_compressor.profiling.profiler": [[254, 0, 0, "-", "factory"], [258, 0, 0, "-", "profiler"]], "neural_compressor.profiling.profiler.factory": [[254, 1, 1, "", "ProfilerFactory"]], "neural_compressor.profiling.profiler.onnxrt_profiler": [[255, 0, 0, "-", "factory"], [256, 0, 0, "-", "profiler"], [257, 0, 0, "-", "utils"]], "neural_compressor.profiling.profiler.onnxrt_profiler.factory": [[255, 1, 1, "", "ProfilerFactory"]], "neural_compressor.profiling.profiler.onnxrt_profiler.profiler": [[256, 1, 1, "", "Profiler"]], "neural_compressor.profiling.profiler.onnxrt_profiler.utils": [[257, 2, 1, "", "create_onnx_config"]], "neural_compressor.profiling.profiler.profiler": [[258, 1, 1, "", "Profiler"]], "neural_compressor.profiling.profiler.tensorflow_profiler": [[259, 0, 0, "-", "factory"], [260, 0, 0, "-", "profiler"], [261, 0, 0, "-", "utils"]], "neural_compressor.profiling.profiler.tensorflow_profiler.factory": [[259, 1, 1, "", "ProfilerFactory"]], "neural_compressor.profiling.profiler.tensorflow_profiler.profiler": [[260, 1, 1, "", "Profiler"]], "neural_compressor.profiling.profiler.tensorflow_profiler.utils": [[261, 2, 1, "", "create_tf_config"], [261, 2, 1, "", "delete_assign"], [261, 2, 1, "", "set_eager_execution"]], "neural_compressor.quantization": [[262, 2, 1, "", "fit"]], "neural_compressor.strategy": [[263, 0, 0, "-", "auto"], [264, 0, 0, "-", "auto_mixed_precision"], [265, 0, 0, "-", "basic"], [266, 0, 0, "-", "bayesian"], [267, 0, 0, "-", "conservative"], [268, 0, 0, "-", "exhaustive"], [269, 0, 0, "-", "hawq_v2"], [271, 0, 0, "-", "mse"], [272, 0, 0, "-", "mse_v2"], [273, 0, 0, "-", "random"], [274, 0, 0, "-", "strategy"], [276, 0, 0, "-", "utils"]], "neural_compressor.strategy.auto": [[263, 1, 1, "", "AutoTuneStrategy"]], "neural_compressor.strategy.auto_mixed_precision": [[264, 1, 1, "", "AutoMixedPrecisionTuneStrategy"]], "neural_compressor.strategy.basic": [[265, 1, 1, "", "BasicTuneStrategy"]], "neural_compressor.strategy.bayesian": [[266, 1, 1, "", "BayesianOptimization"], [266, 1, 1, "", "BayesianTuneStrategy"], [266, 1, 1, "", "TargetSpace"], [266, 2, 1, "", "acq_max"]], "neural_compressor.strategy.conservative": [[267, 1, 1, "", "ConservativeTuneStrategy"]], "neural_compressor.strategy.exhaustive": [[268, 1, 1, "", "ExhaustiveTuneStrategy"]], "neural_compressor.strategy.hawq_v2": [[269, 1, 1, "", "HAWQ_V2TuneStrategy"]], "neural_compressor.strategy.mse": [[271, 1, 1, "", "MSETuneStrategy"]], "neural_compressor.strategy.mse_v2": [[272, 1, 1, "", "MSE_V2TuneStrategy"]], "neural_compressor.strategy.random": [[273, 1, 1, "", "RandomTuneStrategy"]], "neural_compressor.strategy.strategy": [[274, 1, 1, "", "TuneStrategy"], [274, 1, 1, "", "TuneStrategyMeta"], [274, 2, 1, "", "strategy_registry"]], "neural_compressor.strategy.utils": [[275, 0, 0, "-", "constant"], [277, 0, 0, "-", "tuning_sampler"], [278, 0, 0, "-", "tuning_space"], [279, 0, 0, "-", "tuning_structs"], [280, 0, 0, "-", "utility"]], "neural_compressor.strategy.utils.tuning_sampler": [[277, 1, 1, "", "BlockFallbackTuningSampler"], [277, 1, 1, "", "FallbackTuningSampler"], [277, 1, 1, "", "LowerBitsSampler"], [277, 1, 1, "", "ModelWiseTuningSampler"], [277, 1, 1, "", "OpTypeWiseTuningSampler"], [277, 1, 1, "", "OpWiseTuningSampler"], [277, 1, 1, "", "SmoothQuantSampler"], [277, 1, 1, "", "TuningOrder"], [277, 1, 1, "", "TuningSampler"], [277, 1, 1, "", "WeightOnlyQuantSampler"]], "neural_compressor.strategy.utils.tuning_space": [[278, 1, 1, "", "TuningItem"], [278, 1, 1, "", "TuningSpace"], [278, 2, 1, "", "initial_tuning_cfg_with_quant_mode"], [278, 2, 1, "", "pattern_to_internal"], [278, 2, 1, "", "pattern_to_path"], [278, 2, 1, "", "quant_mode_from_pattern"]], "neural_compressor.strategy.utils.tuning_structs": [[279, 1, 1, "", "OpTuningConfig"]], "neural_compressor.strategy.utils.utility": [[280, 1, 1, "", "ClassRegister"], [280, 1, 1, "", "OrderedDefaultDict"], [280, 1, 1, "", "QuantOptions"], [280, 1, 1, "", "QuantType"], [280, 2, 1, "", "build_slave_faker_model"], [280, 2, 1, "", "extract_data_type"], [280, 2, 1, "", "get_adaptor_name"], [280, 2, 1, "", "preprocess_user_cfg"], [280, 2, 1, "", "reverted_data_type"]], "neural_compressor.template": [[281, 0, 0, "-", "api_doc_example"]], "neural_compressor.template.api_doc_example": [[281, 1, 1, "", "ExampleClass"], [281, 4, 1, "", "attribute1"], [281, 2, 1, "", "function1"], [281, 2, 1, "", "function2"], [281, 2, 1, "", "function3"], [281, 2, 1, "", "generator1"], [281, 5, 1, "", "module_debug_level1"]], "neural_compressor.template.api_doc_example.ExampleClass": [[281, 4, 1, "", "attr1"], [281, 4, 1, "", "attr2"], [281, 4, 1, "", "attr5"]], "neural_compressor.tensorflow": [[282, 0, 0, "-", "algorithms"], [291, 0, 0, "-", "keras"], [304, 0, 0, "-", "quantization"], [388, 0, 0, "-", "utils"]], "neural_compressor.tensorflow.algorithms": [[285, 0, 0, "-", "smoother"], [287, 0, 0, "-", "static_quant"]], "neural_compressor.tensorflow.algorithms.smoother": [[283, 0, 0, "-", "calibration"], [284, 0, 0, "-", "core"], [286, 0, 0, "-", "scaler"]], "neural_compressor.tensorflow.algorithms.smoother.calibration": [[283, 1, 1, "", "SmoothQuantCalibration"], [283, 1, 1, "", "SmoothQuantCalibrationLLM"]], "neural_compressor.tensorflow.algorithms.smoother.core": [[284, 1, 1, "", "SmoothQuant"]], "neural_compressor.tensorflow.algorithms.smoother.scaler": [[286, 1, 1, "", "SmoothQuantScaler"], [286, 1, 1, "", "SmoothQuantScalerLLM"]], "neural_compressor.tensorflow.algorithms.static_quant": [[288, 0, 0, "-", "keras"], [289, 0, 0, "-", "tensorflow"]], "neural_compressor.tensorflow.algorithms.static_quant.keras": [[288, 1, 1, "", "KerasAdaptor"], [288, 1, 1, "", "KerasConfigConverter"], [288, 1, 1, "", "KerasQuery"], [288, 1, 1, "", "KerasSurgery"]], "neural_compressor.tensorflow.algorithms.static_quant.tensorflow": [[289, 1, 1, "", "TensorFlowAdaptor"], [289, 1, 1, "", "TensorFlowConfig"], [289, 1, 1, "", "TensorflowConfigConverter"], [289, 1, 1, "", "TensorflowQuery"], [289, 1, 1, "", "Tensorflow_ITEXAdaptor"]], "neural_compressor.tensorflow.keras": [[295, 0, 0, "-", "layers"], [300, 0, 0, "-", "quantization"]], "neural_compressor.tensorflow.keras.layers": [[292, 0, 0, "-", "conv2d"], [293, 0, 0, "-", "dense"], [294, 0, 0, "-", "depthwise_conv2d"], [296, 0, 0, "-", "layer_initializer"], [297, 0, 0, "-", "pool2d"], [298, 0, 0, "-", "separable_conv2d"]], "neural_compressor.tensorflow.keras.layers.conv2d": [[292, 1, 1, "", "QConv2D"], [292, 2, 1, "", "initialize_int8_conv2d"]], "neural_compressor.tensorflow.keras.layers.dense": [[293, 1, 1, "", "QDense"], [293, 2, 1, "", "initialize_int8_dense"]], "neural_compressor.tensorflow.keras.layers.depthwise_conv2d": [[294, 1, 1, "", "QDepthwiseConv2D"], [294, 2, 1, "", "initialize_int8_depthwise_conv2d"]], "neural_compressor.tensorflow.keras.layers.pool2d": [[297, 1, 1, "", "QAvgPool2D"], [297, 1, 1, "", "QMaxPool2D"], [297, 2, 1, "", "initialize_int8_avgpool"], [297, 2, 1, "", "initialize_int8_maxpool"]], "neural_compressor.tensorflow.keras.layers.separable_conv2d": [[298, 1, 1, "", "QSeparableConv2D"], [298, 2, 1, "", "initialize_int8_separable_conv2d"]], "neural_compressor.tensorflow.keras.quantization": [[299, 0, 0, "-", "config"]], "neural_compressor.tensorflow.keras.quantization.config": [[299, 1, 1, "", "OperatorConfig"], [299, 1, 1, "", "StaticQuantConfig"], [299, 2, 1, "", "get_all_registered_configs"], [299, 2, 1, "", "get_default_static_quant_config"]], "neural_compressor.tensorflow.quantization": [[301, 0, 0, "-", "algorithm_entry"], [302, 0, 0, "-", "autotune"], [303, 0, 0, "-", "config"], [305, 0, 0, "-", "quantize"], [361, 0, 0, "-", "utils"]], "neural_compressor.tensorflow.quantization.algorithm_entry": [[301, 2, 1, "", "smooth_quant_entry"], [301, 2, 1, "", "static_quant_entry"]], "neural_compressor.tensorflow.quantization.autotune": [[302, 2, 1, "", "autotune"], [302, 2, 1, "", "get_all_config_set"]], "neural_compressor.tensorflow.quantization.config": [[303, 1, 1, "", "SmoothQuantConfig"], [303, 1, 1, "", "StaticQuantConfig"], [303, 2, 1, "", "get_default_sq_config"], [303, 2, 1, "", "get_default_static_quant_config"]], "neural_compressor.tensorflow.quantization.quantize": [[305, 2, 1, "", "need_apply"], [305, 2, 1, "", "quantize_model"], [305, 2, 1, "", "quantize_model_with_single_config"]], "neural_compressor.tensorflow.quantization.utils": [[306, 0, 0, "-", "graph_converter"], [344, 0, 0, "-", "graph_rewriter"], [360, 0, 0, "-", "graph_util"], [362, 0, 0, "-", "quantize_graph"], [379, 0, 0, "-", "quantize_graph_common"], [382, 0, 0, "-", "transform_graph"], [385, 0, 0, "-", "utility"]], "neural_compressor.tensorflow.quantization.utils.graph_converter": [[306, 1, 1, "", "GraphConverter"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter": [[309, 0, 0, "-", "bf16"], [333, 0, 0, "-", "generic"], [343, 0, 0, "-", "graph_base"], [351, 0, 0, "-", "int8"], [356, 0, 0, "-", "qdq"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16": [[307, 0, 0, "-", "bf16_convert"], [308, 0, 0, "-", "dequantize_cast_optimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert": [[307, 1, 1, "", "BF16Convert"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer": [[308, 1, 1, "", "DequantizeCastOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic": [[310, 0, 0, "-", "convert_add_to_biasadd"], [311, 0, 0, "-", "convert_layout"], [312, 0, 0, "-", "convert_leakyrelu"], [313, 0, 0, "-", "convert_nan_to_random"], [314, 0, 0, "-", "convert_placeholder_to_const"], [315, 0, 0, "-", "dilated_contraction"], [316, 0, 0, "-", "dummy_biasadd"], [317, 0, 0, "-", "expanddims_optimizer"], [318, 0, 0, "-", "fetch_weight_from_reshape"], [319, 0, 0, "-", "fold_batch_norm"], [320, 0, 0, "-", "fold_constant"], [321, 0, 0, "-", "fuse_biasadd_add"], [322, 0, 0, "-", "fuse_column_wise_mul"], [323, 0, 0, "-", "fuse_conv_with_math"], [324, 0, 0, "-", "fuse_decomposed_bn"], [325, 0, 0, "-", "fuse_decomposed_in"], [326, 0, 0, "-", "fuse_gelu"], [327, 0, 0, "-", "fuse_layer_norm"], [328, 0, 0, "-", "fuse_pad_with_conv"], [329, 0, 0, "-", "fuse_pad_with_fp32_conv"], [330, 0, 0, "-", "fuse_reshape_transpose"], [331, 0, 0, "-", "graph_cse_optimizer"], [332, 0, 0, "-", "grappler_pass"], [334, 0, 0, "-", "insert_print_node"], [335, 0, 0, "-", "move_squeeze_after_relu"], [336, 0, 0, "-", "pre_optimize"], [337, 0, 0, "-", "remove_training_nodes"], [338, 0, 0, "-", "rename_batch_norm"], [339, 0, 0, "-", "split_shared_input"], [340, 0, 0, "-", "strip_equivalent_nodes"], [341, 0, 0, "-", "strip_unused_nodes"], [342, 0, 0, "-", "switch_optimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd": [[310, 1, 1, "", "ConvertAddToBiasAddOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout": [[311, 1, 1, "", "ConvertLayoutOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu": [[312, 1, 1, "", "ConvertLeakyReluOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random": [[313, 1, 1, "", "ConvertNanToRandom"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const": [[314, 1, 1, "", "ConvertPlaceholderToConst"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction": [[315, 1, 1, "", "DilatedContraction"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd": [[316, 1, 1, "", "InjectDummyBiasAddOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer": [[317, 1, 1, "", "ExpandDimsOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape": [[318, 1, 1, "", "FetchWeightFromReshapeOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm": [[319, 1, 1, "", "FoldBatchNormNodesOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant": [[320, 1, 1, "", "GraphFoldConstantOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add": [[321, 1, 1, "", "FuseBiasAddAndAddOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul": [[322, 1, 1, "", "FuseColumnWiseMulOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math": [[323, 1, 1, "", "FuseConvWithMathOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn": [[324, 1, 1, "", "FuseDecomposedBNOptimizer"], [324, 2, 1, "", "bypass_reshape"], [324, 2, 1, "", "get_const_dim_count"], [324, 2, 1, "", "node_from_map"], [324, 2, 1, "", "node_name_from_input"], [324, 2, 1, "", "valid_reshape_inputs"], [324, 2, 1, "", "values_from_const"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in": [[325, 1, 1, "", "FuseDecomposedINOptimizer"], [325, 2, 1, "", "bypass_reshape"], [325, 2, 1, "", "get_const_dim_count"], [325, 2, 1, "", "node_from_map"], [325, 2, 1, "", "node_name_from_input"], [325, 2, 1, "", "valid_reshape_inputs"], [325, 2, 1, "", "values_from_const"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu": [[326, 1, 1, "", "FuseGeluOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm": [[327, 1, 1, "", "FuseLayerNormOptimizer"], [327, 2, 1, "", "node_from_map"], [327, 2, 1, "", "node_name_from_input"], [327, 2, 1, "", "values_from_const"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv": [[328, 1, 1, "", "FusePadWithConv2DOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv": [[329, 1, 1, "", "FusePadWithFP32Conv2DOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose": [[330, 1, 1, "", "FuseTransposeReshapeOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer": [[331, 1, 1, "", "GraphCseOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass": [[332, 1, 1, "", "GrapplerOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node": [[334, 1, 1, "", "InsertPrintMinMaxNode"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu": [[335, 1, 1, "", "MoveSqueezeAfterReluOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize": [[336, 1, 1, "", "PreOptimization"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes": [[337, 1, 1, "", "RemoveTrainingNodesOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm": [[338, 1, 1, "", "RenameBatchNormOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input": [[339, 1, 1, "", "SplitSharedInputOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes": [[340, 1, 1, "", "StripEquivalentNodesOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes": [[341, 1, 1, "", "StripUnusedNodesOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer": [[342, 1, 1, "", "SwitchOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base": [[343, 1, 1, "", "GraphRewriterBase"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8": [[345, 0, 0, "-", "freeze_fake_quant"], [346, 0, 0, "-", "freeze_value"], [347, 0, 0, "-", "fuse_conv_redundant_dequantize"], [348, 0, 0, "-", "fuse_conv_requantize"], [349, 0, 0, "-", "fuse_matmul_redundant_dequantize"], [350, 0, 0, "-", "fuse_matmul_requantize"], [352, 0, 0, "-", "meta_op_optimizer"], [353, 0, 0, "-", "post_hostconst_converter"], [354, 0, 0, "-", "post_quantized_op_cse"], [355, 0, 0, "-", "scale_propagation"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant": [[345, 1, 1, "", "FreezeFakeQuantOpOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value": [[346, 1, 1, "", "FreezeValueTransformer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize": [[347, 1, 1, "", "FuseConvRedundantDequantizeTransformer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize": [[348, 1, 1, "", "FuseConvRequantizeTransformer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize": [[349, 1, 1, "", "FuseMatMulRedundantDequantizeTransformer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize": [[350, 1, 1, "", "FuseMatMulRequantizeDequantizeNewAPITransformer"], [350, 1, 1, "", "FuseMatMulRequantizeDequantizeTransformer"], [350, 1, 1, "", "FuseMatMulRequantizeNewAPITransformer"], [350, 1, 1, "", "FuseMatMulRequantizeTransformer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer": [[352, 1, 1, "", "MetaInfoChangingMemOpOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter": [[353, 1, 1, "", "PostHostConstConverter"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse": [[354, 1, 1, "", "PostCseOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation": [[355, 1, 1, "", "ScaleProPagationTransformer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq": [[357, 0, 0, "-", "insert_qdq_pattern"], [358, 0, 0, "-", "merge_duplicated_qdq"], [359, 0, 0, "-", "share_qdq_y_pattern"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern": [[357, 1, 1, "", "GenerateGraphWithQDQPattern"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq": [[358, 1, 1, "", "MergeDuplicatedQDQOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern": [[359, 1, 1, "", "ShareQDQForItexYPatternOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_util": [[360, 1, 1, "", "GraphAnalyzer"], [360, 1, 1, "", "GraphRewriterHelper"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph": [[370, 0, 0, "-", "qdq"], [372, 0, 0, "-", "quantize_graph_base"], [373, 0, 0, "-", "quantize_graph_bn"], [374, 0, 0, "-", "quantize_graph_concatv2"], [375, 0, 0, "-", "quantize_graph_conv"], [376, 0, 0, "-", "quantize_graph_for_intel_cpu"], [377, 0, 0, "-", "quantize_graph_matmul"], [378, 0, 0, "-", "quantize_graph_pooling"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq": [[363, 0, 0, "-", "fuse_qdq_bn"], [364, 0, 0, "-", "fuse_qdq_concatv2"], [365, 0, 0, "-", "fuse_qdq_conv"], [366, 0, 0, "-", "fuse_qdq_deconv"], [367, 0, 0, "-", "fuse_qdq_in"], [368, 0, 0, "-", "fuse_qdq_matmul"], [369, 0, 0, "-", "fuse_qdq_pooling"], [371, 0, 0, "-", "optimize_qdq"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn": [[363, 1, 1, "", "FuseNodeStartWithFusedBatchNormV3"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2": [[364, 1, 1, "", "FuseNodeStartWithConcatV2"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv": [[365, 1, 1, "", "FuseNodeStartWithConv2d"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv": [[366, 1, 1, "", "FuseNodeStartWithDeconv2d"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in": [[367, 1, 1, "", "FuseNodeStartWithFusedInstanceNorm"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul": [[368, 1, 1, "", "FuseNodeStartWithMatmul"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling": [[369, 1, 1, "", "FuseNodeStartWithPooling"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq": [[371, 1, 1, "", "OptimizeQDQGraph"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base": [[372, 1, 1, "", "QuantizeGraphBase"], [372, 1, 1, "", "QuantizeNodeBase"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn": [[373, 1, 1, "", "FuseNodeStartWithFusedBatchNormV3"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2": [[374, 1, 1, "", "FuseNodeStartWithConcatV2"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv": [[375, 1, 1, "", "FuseNodeStartWithConv2d"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu": [[376, 1, 1, "", "QuantizeGraphForIntel"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul": [[377, 1, 1, "", "FuseNodeStartWithMatmul"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling": [[378, 1, 1, "", "FuseNodeStartWithPooling"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph_common": [[379, 1, 1, "", "QuantizeGraphHelper"]], "neural_compressor.tensorflow.quantization.utils.transform_graph": [[380, 0, 0, "-", "bias_correction"], [381, 0, 0, "-", "graph_transform_base"], [383, 0, 0, "-", "insert_logging"], [384, 0, 0, "-", "rerange_quantized_concat"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction": [[380, 1, 1, "", "BiasCorrection"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base": [[381, 1, 1, "", "GraphTransformBase"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging": [[383, 1, 1, "", "InsertLogging"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat": [[384, 1, 1, "", "RerangeQuantizedConcat"]], "neural_compressor.tensorflow.quantization.utils.utility": [[385, 2, 1, "", "apply_inlining"], [385, 2, 1, "", "collate_tf_preds"], [385, 2, 1, "", "construct_function_from_graph_def"], [385, 2, 1, "", "fix_ref_type_of_graph_def"], [385, 2, 1, "", "generate_feed_dict"], [385, 2, 1, "", "get_graph_def"], [385, 2, 1, "", "get_input_output_node_names"], [385, 2, 1, "", "get_model_input_shape"], [385, 2, 1, "", "get_tensor_by_name"], [385, 2, 1, "", "is_ckpt_format"], [385, 2, 1, "", "is_saved_model_format"], [385, 2, 1, "", "iterator_sess_run"], [385, 2, 1, "", "parse_saved_model"], [385, 2, 1, "", "read_graph"], [385, 2, 1, "", "reconstruct_saved_model"], [385, 2, 1, "", "strip_equivalent_nodes"], [385, 2, 1, "", "strip_unused_nodes"], [385, 2, 1, "", "write_graph"]], "neural_compressor.tensorflow.utils": [[386, 0, 0, "-", "constants"], [387, 0, 0, "-", "data"], [389, 0, 0, "-", "model"], [390, 0, 0, "-", "model_wrappers"], [391, 0, 0, "-", "utility"]], "neural_compressor.tensorflow.utils.data": [[387, 1, 1, "", "BaseDataLoader"], [387, 1, 1, "", "BatchSampler"], [387, 1, 1, "", "DummyDataset"], [387, 1, 1, "", "DummyDatasetV2"], [387, 1, 1, "", "IndexFetcher"], [387, 1, 1, "", "IterableFetcher"], [387, 1, 1, "", "IterableSampler"], [387, 1, 1, "", "SequentialSampler"], [387, 2, 1, "", "default_collate"]], "neural_compressor.tensorflow.utils.model": [[389, 1, 1, "", "Model"], [389, 1, 1, "", "TensorflowGlobalConfig"]], "neural_compressor.tensorflow.utils.model_wrappers": [[390, 1, 1, "", "BaseModel"], [390, 1, 1, "", "KerasModel"], [390, 1, 1, "", "TensorflowBaseModel"], [390, 1, 1, "", "TensorflowCheckpointModel"], [390, 1, 1, "", "TensorflowLLMModel"], [390, 1, 1, "", "TensorflowModel"], [390, 1, 1, "", "TensorflowSavedModelModel"], [390, 2, 1, "", "checkpoint_session"], [390, 2, 1, "", "estimator_session"], [390, 2, 1, "", "frozen_pb_session"], [390, 2, 1, "", "get_model_type"], [390, 2, 1, "", "get_tf_model_type"], [390, 2, 1, "", "graph_def_session"], [390, 2, 1, "", "graph_session"], [390, 2, 1, "", "keras_session"], [390, 2, 1, "", "load_saved_model"], [390, 2, 1, "", "saved_model_session"], [390, 2, 1, "", "slim_session"], [390, 2, 1, "", "try_loading_keras"], [390, 2, 1, "", "validate_and_inference_input_output"], [390, 2, 1, "", "validate_graph_node"]], "neural_compressor.tensorflow.utils.utility": [[391, 1, 1, "", "CaptureOutputToFile"], [391, 1, 1, "", "CpuInfo"], [391, 1, 1, "", "TFSlimNetsFactory"], [391, 2, 1, "", "combine_histogram"], [391, 2, 1, "", "deep_get"], [391, 2, 1, "", "disable_random"], [391, 2, 1, "", "dump_elapsed_time"], [391, 2, 1, "", "get_all_fp32_data"], [391, 2, 1, "", "get_tensor_histogram"], [391, 2, 1, "", "itex_installed"], [391, 2, 1, "", "register_algo"], [391, 2, 1, "", "singleton"], [391, 2, 1, "", "valid_keras_format"], [391, 2, 1, "", "version1_eq_version2"], [391, 2, 1, "", "version1_gt_version2"], [391, 2, 1, "", "version1_gte_version2"], [391, 2, 1, "", "version1_lt_version2"], [391, 2, 1, "", "version1_lte_version2"]], "neural_compressor.torch": [[394, 0, 0, "-", "algorithms"], [434, 0, 0, "-", "export"], [440, 0, 0, "-", "quantization"], [447, 0, 0, "-", "utils"]], "neural_compressor.torch.algorithms": [[392, 0, 0, "-", "base_algorithm"], [395, 0, 0, "-", "layer_wise"], [400, 0, 0, "-", "mixed_precision"], [402, 0, 0, "-", "mx_quant"], [407, 0, 0, "-", "pt2e_quant"], [410, 0, 0, "-", "smooth_quant"], [414, 0, 0, "-", "static_quant"], [428, 0, 0, "-", "weight_only"]], "neural_compressor.torch.algorithms.base_algorithm": [[392, 1, 1, "", "Quantizer"]], "neural_compressor.torch.algorithms.fp8_quant.utils": [[393, 0, 0, "-", "logger"]], "neural_compressor.torch.algorithms.layer_wise": [[396, 0, 0, "-", "load"], [397, 0, 0, "-", "modified_pickle"], [398, 0, 0, "-", "utils"]], "neural_compressor.torch.algorithms.layer_wise.load": [[396, 2, 1, "", "load"]], "neural_compressor.torch.algorithms.layer_wise.modified_pickle": [[397, 3, 1, "", "PickleError"], [397, 3, 1, "", "PicklingError"], [397, 3, 1, "", "UnpicklingError"]], "neural_compressor.torch.algorithms.layer_wise.utils": [[398, 1, 1, "", "QDQLayer"], [398, 2, 1, "", "clean_module_weight"], [398, 2, 1, "", "dowload_hf_model"], [398, 2, 1, "", "get_children"], [398, 2, 1, "", "get_module"], [398, 2, 1, "", "get_named_children"], [398, 2, 1, "", "get_super_module_by_name"], [398, 2, 1, "", "load_empty_model"], [398, 2, 1, "", "load_layer_wise_quantized_model"], [398, 2, 1, "", "load_module"], [398, 2, 1, "", "load_tensor"], [398, 2, 1, "", "load_tensor_from_shard"], [398, 2, 1, "", "load_value"], [398, 2, 1, "", "register_weight_hooks"], [398, 2, 1, "", "update_module"]], "neural_compressor.torch.algorithms.mixed_precision": [[399, 0, 0, "-", "half_precision_convert"], [401, 0, 0, "-", "module_wrappers"]], "neural_compressor.torch.algorithms.mixed_precision.half_precision_convert": [[399, 1, 1, "", "HalfPrecisionConverter"]], "neural_compressor.torch.algorithms.mixed_precision.module_wrappers": [[401, 1, 1, "", "HalfPrecisionModuleWrapper"]], "neural_compressor.torch.algorithms.mx_quant": [[403, 0, 0, "-", "mx"], [404, 0, 0, "-", "utils"]], "neural_compressor.torch.algorithms.mx_quant.mx": [[403, 1, 1, "", "MXLinear"], [403, 1, 1, "", "MXQuantizer"]], "neural_compressor.torch.algorithms.mx_quant.utils": [[404, 1, 1, "", "ElemFormat"], [404, 1, 1, "", "RoundingMode"], [404, 2, 1, "", "quantize_elemwise_op"], [404, 2, 1, "", "quantize_mx_op"]], "neural_compressor.torch.algorithms.pt2e_quant": [[405, 0, 0, "-", "core"], [406, 0, 0, "-", "half_precision_rewriter"], [408, 0, 0, "-", "save_load"], [409, 0, 0, "-", "utility"]], "neural_compressor.torch.algorithms.pt2e_quant.core": [[405, 1, 1, "", "W8A8PT2EQuantizer"]], "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter": [[406, 1, 1, "", "PatternPair"], [406, 2, 1, "", "apply_single_pattern_pair"], [406, 2, 1, "", "get_filter_fn"], [406, 2, 1, "", "get_half_precision_node_set"], [406, 2, 1, "", "get_unquantized_node_set"], [406, 2, 1, "", "pattern_factory"], [406, 2, 1, "", "transformation"]], "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.PatternPair": [[406, 4, 1, "", "fn"], [406, 4, 1, "", "replace_pattern"], [406, 4, 1, "", "search_pattern"]], "neural_compressor.torch.algorithms.pt2e_quant.save_load": [[408, 2, 1, "", "load"], [408, 2, 1, "", "save"]], "neural_compressor.torch.algorithms.pt2e_quant.utility": [[409, 2, 1, "", "create_quant_spec_from_config"], [409, 2, 1, "", "create_xiq_quantizer_from_pt2e_config"]], "neural_compressor.torch.algorithms.smooth_quant": [[411, 0, 0, "-", "save_load"], [412, 0, 0, "-", "smooth_quant"], [413, 0, 0, "-", "utility"]], "neural_compressor.torch.algorithms.smooth_quant.save_load": [[411, 2, 1, "", "recover_model_from_json"]], "neural_compressor.torch.algorithms.smooth_quant.smooth_quant": [[412, 1, 1, "", "SmoothQuantQuantizer"], [412, 2, 1, "", "qdq_quantize"]], "neural_compressor.torch.algorithms.smooth_quant.utility": [[413, 1, 1, "", "AutoAlpha"], [413, 1, 1, "", "Calibration"], [413, 1, 1, "", "GraphTrace"], [413, 1, 1, "", "SQLinearWrapper"], [413, 1, 1, "", "TorchSmoothQuant"], [413, 1, 1, "", "WrapperLayer"], [413, 2, 1, "", "build_captured_dataloader"], [413, 2, 1, "", "cal_scale"], [413, 2, 1, "", "cfg_to_qconfig"], [413, 2, 1, "", "check_cfg_and_qconfig"], [413, 2, 1, "", "dump_model_op_stats"], [413, 2, 1, "", "enough_memo_store_scale"], [413, 2, 1, "", "forward_wrapper"], [413, 2, 1, "", "get_module"], [413, 2, 1, "", "get_parent"], [413, 2, 1, "", "get_quantizable_ops_recursively"], [413, 2, 1, "", "model_forward"], [413, 2, 1, "", "model_forward_per_sample"], [413, 2, 1, "", "move_input_to_device"], [413, 2, 1, "", "quant_dequant_w_v1"], [413, 2, 1, "", "quant_dequant_x_v1"], [413, 2, 1, "", "register_autotune"], [413, 2, 1, "", "reshape_in_channel_to_last"], [413, 2, 1, "", "reshape_scale_as_input"], [413, 2, 1, "", "reshape_scale_as_weight"], [413, 2, 1, "", "set_module"], [413, 2, 1, "", "update_sq_scale"]], "neural_compressor.torch.algorithms.static_quant": [[415, 0, 0, "-", "save_load"], [416, 0, 0, "-", "static_quant"], [417, 0, 0, "-", "utility"]], "neural_compressor.torch.algorithms.static_quant.save_load": [[415, 2, 1, "", "load"], [415, 2, 1, "", "save"]], "neural_compressor.torch.algorithms.static_quant.static_quant": [[416, 1, 1, "", "StaticQuantQuantizer"]], "neural_compressor.torch.algorithms.static_quant.utility": [[417, 1, 1, "", "TransformerBasedModelBlockPatternDetector"], [417, 2, 1, "", "cfg_to_qconfig"], [417, 2, 1, "", "check_cfg_and_qconfig"], [417, 2, 1, "", "dump_model_op_stats"], [417, 2, 1, "", "generate_activation_observer"], [417, 2, 1, "", "generate_xpu_qconfig"], [417, 2, 1, "", "get_depth"], [417, 2, 1, "", "get_dict_at_depth"], [417, 2, 1, "", "get_element_under_depth"], [417, 2, 1, "", "get_quantizable_ops_from_cfgs"], [417, 2, 1, "", "get_quantizable_ops_recursively"], [417, 2, 1, "", "parse_cfgs"], [417, 2, 1, "", "simple_inference"]], "neural_compressor.torch.algorithms.weight_only": [[418, 0, 0, "-", "autoround"], [419, 0, 0, "-", "awq"], [420, 0, 0, "-", "gptq"], [424, 0, 0, "-", "hqq"], [429, 0, 0, "-", "modules"], [430, 0, 0, "-", "rtn"], [431, 0, 0, "-", "save_load"], [432, 0, 0, "-", "teq"], [433, 0, 0, "-", "utility"]], "neural_compressor.torch.algorithms.weight_only.autoround": [[418, 1, 1, "", "AutoRoundQuantizer"], [418, 2, 1, "", "get_dataloader"]], "neural_compressor.torch.algorithms.weight_only.awq": [[419, 1, 1, "", "AWQQuantizer"]], "neural_compressor.torch.algorithms.weight_only.gptq": [[420, 1, 1, "", "GPTQ"], [420, 1, 1, "", "GPTQuantizer"], [420, 1, 1, "", "Quantizer"], [420, 1, 1, "", "RAWGPTQuantizer"], [420, 2, 1, "", "find_layers"], [420, 2, 1, "", "find_layers_name"], [420, 2, 1, "", "is_leaf"], [420, 2, 1, "", "log_quantizable_layers_per_transformer"], [420, 2, 1, "", "trace_gptq_target_blocks"]], "neural_compressor.torch.algorithms.weight_only.hqq": [[421, 0, 0, "-", "bitpack"], [422, 0, 0, "-", "config"], [423, 0, 0, "-", "core"], [425, 0, 0, "-", "optimizer"], [426, 0, 0, "-", "qtensor"], [427, 0, 0, "-", "quantizer"]], "neural_compressor.torch.algorithms.weight_only.hqq.bitpack": [[421, 1, 1, "", "Packer"]], "neural_compressor.torch.algorithms.weight_only.hqq.config": [[422, 1, 1, "", "HQQModuleConfig"], [422, 1, 1, "", "QTensorConfig"]], "neural_compressor.torch.algorithms.weight_only.hqq.core": [[423, 1, 1, "", "HQQLinear"], [423, 1, 1, "", "HQQTensorHandle"]], "neural_compressor.torch.algorithms.weight_only.hqq.optimizer": [[425, 2, 1, "", "optimize_weights_proximal_legacy"]], "neural_compressor.torch.algorithms.weight_only.hqq.qtensor": [[426, 1, 1, "", "QTensor"], [426, 1, 1, "", "QTensorMetaInfo"]], "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensorMetaInfo": [[426, 4, 1, "", "axis"], [426, 4, 1, "", "group_size"], [426, 4, 1, "", "nbits"], [426, 4, 1, "", "packing"], [426, 4, 1, "", "shape"]], "neural_compressor.torch.algorithms.weight_only.hqq.quantizer": [[427, 1, 1, "", "HQQuantizer"], [427, 2, 1, "", "filter_fn"], [427, 2, 1, "", "patch_hqq_moduile"], [427, 2, 1, "", "replacement_fn"]], "neural_compressor.torch.algorithms.weight_only.modules": [[429, 1, 1, "", "FakeAffineTensorQuantFunction"], [429, 1, 1, "", "HPUWeightOnlyLinear"], [429, 1, 1, "", "INCWeightOnlyLinear"], [429, 1, 1, "", "MulLinear"], [429, 1, 1, "", "QDQLayer"], [429, 1, 1, "", "TEQLinearFakeQuant"], [429, 1, 1, "", "UnpackedWeightOnlyLinearParams"], [429, 1, 1, "", "WeightOnlyLinear"]], "neural_compressor.torch.algorithms.weight_only.rtn": [[430, 1, 1, "", "RTNQuantizer"]], "neural_compressor.torch.algorithms.weight_only.save_load": [[431, 1, 1, "", "WOQModelLoader"], [431, 2, 1, "", "load"], [431, 2, 1, "", "save"]], "neural_compressor.torch.algorithms.weight_only.teq": [[432, 1, 1, "", "TEQuantizer"], [432, 1, 1, "", "TrainableEquivalentTransformation"]], "neural_compressor.torch.algorithms.weight_only.utility": [[433, 1, 1, "", "GraphTrace"], [433, 2, 1, "", "fetch_module"], [433, 2, 1, "", "forward_wrapper"], [433, 2, 1, "", "get_absorb_layers"], [433, 2, 1, "", "get_block_prefix"], [433, 2, 1, "", "get_module"], [433, 2, 1, "", "get_module_input_output"], [433, 2, 1, "", "get_parent"], [433, 2, 1, "", "model_forward"], [433, 2, 1, "", "move_input_to_device"], [433, 2, 1, "", "qdq_weight_actor"], [433, 2, 1, "", "qdq_weight_asym"], [433, 2, 1, "", "qdq_weight_sym"], [433, 2, 1, "", "quant_tensor"], [433, 2, 1, "", "quant_weight_w_scale"], [433, 2, 1, "", "quantize_4bit"], [433, 2, 1, "", "recover_forward"], [433, 2, 1, "", "replace_forward"], [433, 2, 1, "", "search_clip"], [433, 2, 1, "", "set_module"]], "neural_compressor.torch.export": [[435, 0, 0, "-", "pt2e_export"]], "neural_compressor.torch.export.pt2e_export": [[435, 2, 1, "", "export"], [435, 2, 1, "", "export_model_for_pt2e_quant"]], "neural_compressor.torch.quantization": [[437, 0, 0, "-", "algorithm_entry"], [438, 0, 0, "-", "autotune"], [439, 0, 0, "-", "config"], [441, 0, 0, "-", "load_entry"], [442, 0, 0, "-", "quantize"]], "neural_compressor.torch.quantization.algorithm_entry": [[437, 2, 1, "", "autoround_quantize_entry"], [437, 2, 1, "", "awq_quantize_entry"], [437, 2, 1, "", "fp8_entry"], [437, 2, 1, "", "gptq_entry"], [437, 2, 1, "", "hqq_entry"], [437, 2, 1, "", "mixed_precision_entry"], [437, 2, 1, "", "mx_quant_entry"], [437, 2, 1, "", "pt2e_dynamic_quant_entry"], [437, 2, 1, "", "pt2e_static_quant_entry"], [437, 2, 1, "", "rtn_entry"], [437, 2, 1, "", "smooth_quant_entry"], [437, 2, 1, "", "static_quant_entry"], [437, 2, 1, "", "teq_quantize_entry"]], "neural_compressor.torch.quantization.autotune": [[438, 2, 1, "", "autotune"], [438, 2, 1, "", "get_all_config_set"], [438, 2, 1, "", "get_rtn_double_quant_config_set"]], "neural_compressor.torch.quantization.config": [[439, 1, 1, "", "AWQConfig"], [439, 1, 1, "", "AutoRoundConfig"], [439, 1, 1, "", "DynamicQuantConfig"], [439, 1, 1, "", "FP8Config"], [439, 1, 1, "", "GPTQConfig"], [439, 1, 1, "", "HQQConfig"], [439, 1, 1, "", "MXQuantConfig"], [439, 1, 1, "", "MixedPrecisionConfig"], [439, 1, 1, "", "OperatorConfig"], [439, 1, 1, "", "RTNConfig"], [439, 1, 1, "", "SmoothQuantConfig"], [439, 1, 1, "", "StaticQuantConfig"], [439, 1, 1, "", "TEQConfig"], [439, 1, 1, "", "TorchBaseConfig"], [439, 2, 1, "", "get_all_registered_configs"], [439, 2, 1, "", "get_default_AutoRound_config"], [439, 2, 1, "", "get_default_awq_config"], [439, 2, 1, "", "get_default_double_quant_config"], [439, 2, 1, "", "get_default_dynamic_config"], [439, 2, 1, "", "get_default_fp8_config"], [439, 2, 1, "", "get_default_fp8_config_set"], [439, 2, 1, "", "get_default_gptq_config"], [439, 2, 1, "", "get_default_hqq_config"], [439, 2, 1, "", "get_default_mixed_precision_config"], [439, 2, 1, "", "get_default_mixed_precision_config_set"], [439, 2, 1, "", "get_default_mx_config"], [439, 2, 1, "", "get_default_rtn_config"], [439, 2, 1, "", "get_default_sq_config"], [439, 2, 1, "", "get_default_static_config"], [439, 2, 1, "", "get_default_teq_config"], [439, 2, 1, "", "get_woq_tuning_config"]], "neural_compressor.torch.quantization.load_entry": [[441, 2, 1, "", "load"]], "neural_compressor.torch.quantization.quantize": [[442, 2, 1, "", "convert"], [442, 2, 1, "", "finalize_calibration"], [442, 2, 1, "", "need_apply"], [442, 2, 1, "", "prepare"], [442, 2, 1, "", "quantize"]], "neural_compressor.torch.utils": [[443, 0, 0, "-", "auto_accelerator"], [444, 0, 0, "-", "bit_packer"], [445, 0, 0, "-", "constants"], [446, 0, 0, "-", "environ"], [448, 0, 0, "-", "utility"]], "neural_compressor.torch.utils.auto_accelerator": [[443, 1, 1, "", "AcceleratorRegistry"], [443, 1, 1, "", "Auto_Accelerator"], [443, 1, 1, "", "CPU_Accelerator"], [443, 1, 1, "", "CUDA_Accelerator"], [443, 1, 1, "", "HPU_Accelerator"], [443, 1, 1, "", "XPU_Accelerator"], [443, 2, 1, "", "auto_detect_accelerator"], [443, 2, 1, "", "register_accelerator"]], "neural_compressor.torch.utils.bit_packer": [[444, 2, 1, "", "pack_array_with_numba_b2_c16"], [444, 2, 1, "", "pack_array_with_numba_b2_c32"], [444, 2, 1, "", "pack_array_with_numba_b2_c64"], [444, 2, 1, "", "pack_array_with_numba_b2_c8"], [444, 2, 1, "", "pack_array_with_numba_b4_c16"], [444, 2, 1, "", "pack_array_with_numba_b4_c32"], [444, 2, 1, "", "pack_array_with_numba_b4_c64"], [444, 2, 1, "", "pack_array_with_numba_b4_c8"], [444, 2, 1, "", "pack_array_with_numba_b8_c16"], [444, 2, 1, "", "pack_array_with_numba_b8_c32"], [444, 2, 1, "", "pack_array_with_numba_b8_c64"], [444, 2, 1, "", "pack_array_with_numba_b8_c8"], [444, 2, 1, "", "register_pack_func"]], "neural_compressor.torch.utils.constants": [[445, 1, 1, "", "LoadFormat"]], "neural_compressor.torch.utils.environ": [[446, 2, 1, "", "device_synchronize"], [446, 2, 1, "", "get_accelerator"], [446, 2, 1, "", "get_ipex_version"], [446, 2, 1, "", "get_torch_version"], [446, 2, 1, "", "is_hpex_available"], [446, 2, 1, "", "is_ipex_available"], [446, 2, 1, "", "is_ipex_imported"], [446, 2, 1, "", "is_package_available"], [446, 2, 1, "", "is_transformers_imported"]], "neural_compressor.torch.utils.utility": [[448, 2, 1, "", "dowload_hf_model"], [448, 2, 1, "", "dump_model_op_stats"], [448, 2, 1, "", "fetch_module"], [448, 2, 1, "", "get_double_quant_config_dict"], [448, 2, 1, "", "get_model_device"], [448, 2, 1, "", "get_model_info"], [448, 2, 1, "", "get_processor_type_from_user_config"], [448, 2, 1, "", "get_quantizer"], [448, 2, 1, "", "load_empty_model"], [448, 2, 1, "", "postprocess_model"], [448, 2, 1, "", "register_algo"], [448, 2, 1, "", "set_module"]], "neural_compressor.training": [[449, 1, 1, "", "CallBacks"], [449, 1, 1, "", "CompressionManager"], [449, 2, 1, "", "fit"], [449, 2, 1, "", "prepare_compression"]], "neural_compressor.transformers": [[451, 0, 0, "-", "utils"]], "neural_compressor.transformers.quantization": [[450, 0, 0, "-", "utils"]], "neural_compressor.transformers.utils": [[452, 0, 0, "-", "quantization_config"]], "neural_compressor.transformers.utils.quantization_config": [[452, 1, 1, "", "AutoRoundConfig"], [452, 1, 1, "", "AwqConfig"], [452, 1, 1, "", "GPTQConfig"], [452, 1, 1, "", "INCQuantizationConfigMixin"], [452, 1, 1, "", "QuantizationMethod"], [452, 1, 1, "", "RtnConfig"], [452, 1, 1, "", "TeqConfig"]], "neural_compressor.utils": [[453, 0, 0, "-", "collect_layer_histogram"], [454, 0, 0, "-", "constant"], [455, 0, 0, "-", "create_obj_from_config"], [456, 0, 0, "-", "export"], [461, 0, 0, "-", "kl_divergence"], [462, 0, 0, "-", "load_huggingface"], [463, 0, 0, "-", "logger"], [464, 0, 0, "-", "options"], [465, 0, 0, "-", "pytorch"], [466, 0, 0, "-", "utility"], [467, 0, 0, "-", "weights_details"]], "neural_compressor.utils.collect_layer_histogram": [[453, 1, 1, "", "LayerHistogramCollector"]], "neural_compressor.utils.create_obj_from_config": [[455, 2, 1, "", "create_dataloader"], [455, 2, 1, "", "create_dataset"], [455, 2, 1, "", "create_eval_func"], [455, 2, 1, "", "create_train_func"], [455, 2, 1, "", "get_algorithm"], [455, 2, 1, "", "get_func_from_config"], [455, 2, 1, "", "get_metrics"], [455, 2, 1, "", "get_postprocess"], [455, 2, 1, "", "get_preprocess"]], "neural_compressor.utils.export": [[457, 0, 0, "-", "qlinear2qdq"], [458, 0, 0, "-", "tf2onnx"], [459, 0, 0, "-", "torch2onnx"]], "neural_compressor.utils.export.qlinear2qdq": [[457, 2, 1, "", "check_model"], [457, 2, 1, "", "onnx_qlinear_to_qdq"]], "neural_compressor.utils.export.tf2onnx": [[458, 2, 1, "", "tf_to_fp32_onnx"], [458, 2, 1, "", "tf_to_int8_onnx"]], "neural_compressor.utils.export.torch2onnx": [[459, 2, 1, "", "dynamic_quant_export"], [459, 2, 1, "", "get_node_mapping"], [459, 2, 1, "", "get_quantizable_onnx_ops"], [459, 2, 1, "", "static_quant_export"], [459, 2, 1, "", "torch_to_fp32_onnx"], [459, 2, 1, "", "torch_to_int8_onnx"]], "neural_compressor.utils.kl_divergence": [[461, 1, 1, "", "KL_Divergence"]], "neural_compressor.utils.load_huggingface": [[462, 1, 1, "", "OptimizedModel"], [462, 2, 1, "", "export_compressed_model"], [462, 2, 1, "", "save_for_huggingface_upstream"]], "neural_compressor.utils.logger": [[463, 1, 1, "", "Logger"], [463, 2, 1, "", "debug"], [463, 2, 1, "", "error"], [463, 2, 1, "", "fatal"], [463, 2, 1, "", "info"], [463, 2, 1, "", "log"], [463, 2, 1, "", "warn"], [463, 2, 1, "", "warning"]], "neural_compressor.utils.options": [[464, 1, 1, "", "onnxrt"]], "neural_compressor.utils.pytorch": [[465, 2, 1, "", "is_int8_model"], [465, 2, 1, "", "load"], [465, 2, 1, "", "load_weight_only"], [465, 2, 1, "", "recover_model_from_json"]], "neural_compressor.utils.utility": [[466, 1, 1, "", "CaptureOutputToFile"], [466, 1, 1, "", "CpuInfo"], [466, 2, 1, "", "Dequantize"], [466, 1, 1, "", "DotDict"], [466, 1, 1, "", "GLOBAL_STATE"], [466, 1, 1, "", "LazyImport"], [466, 1, 1, "", "MODE"], [466, 1, 1, "", "OpEntry"], [466, 1, 1, "", "Statistics"], [466, 2, 1, "", "alias_param"], [466, 2, 1, "", "calculate_mse"], [466, 2, 1, "", "check_key_exist"], [466, 2, 1, "", "combine_histogram"], [466, 2, 1, "", "compare_objects"], [466, 2, 1, "", "compute_sparsity"], [466, 2, 1, "", "deep_get"], [466, 2, 1, "", "deep_set"], [466, 2, 1, "", "dequantize_weight"], [466, 2, 1, "", "dump_class_attrs"], [466, 2, 1, "", "dump_data_to_local"], [466, 2, 1, "", "dump_elapsed_time"], [466, 2, 1, "", "dump_table"], [466, 2, 1, "", "dump_table_to_csv"], [466, 2, 1, "", "equal_dicts"], [466, 2, 1, "", "fault_tolerant_file"], [466, 2, 1, "", "get_all_fp32_data"], [466, 2, 1, "", "get_number_of_sockets"], [466, 2, 1, "", "get_op_list"], [466, 2, 1, "", "get_size"], [466, 2, 1, "", "get_tensor_histogram"], [466, 2, 1, "", "get_tensors_info"], [466, 2, 1, "", "get_tuning_history"], [466, 2, 1, "", "get_weights_details"], [466, 2, 1, "", "load_data_from_pkl"], [466, 2, 1, "", "mse_metric_gap"], [466, 2, 1, "", "print_op_list"], [466, 2, 1, "", "print_table"], [466, 2, 1, "", "recover"], [466, 2, 1, "", "set_random_seed"], [466, 2, 1, "", "set_resume_from"], [466, 2, 1, "", "set_tensorboard"], [466, 2, 1, "", "set_workspace"], [466, 2, 1, "", "show_memory_info"], [466, 2, 1, "", "singleton"], [466, 2, 1, "", "str2array"], [466, 2, 1, "", "time_limit"], [466, 2, 1, "", "version1_eq_version2"], [466, 2, 1, "", "version1_gt_version2"], [466, 2, 1, "", "version1_gte_version2"], [466, 2, 1, "", "version1_lt_version2"], [466, 2, 1, "", "version1_lte_version2"]], "neural_compressor.utils.weights_details": [[467, 1, 1, "", "WeightsDetails"], [467, 1, 1, "", "WeightsStatistics"]]}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "function", "Python function"], "3": ["py", "exception", "Python exception"], "4": ["py", "attribute", "Python attribute"], "5": ["py", "data", "Python data"]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:function", "3": "py:exception", "4": "py:attribute", "5": "py:data"}, "terms": {"": [30, 31, 90, 128, 133, 140, 150, 152, 173, 174, 175, 179, 180, 181, 182, 185, 187, 192, 194, 195, 208, 211, 224, 225, 232, 234, 235, 239, 262, 380, 385, 391, 396, 413, 431, 433, 441, 466, 470, 471, 472, 474, 475, 476, 477, 479, 481, 483, 488, 490, 491, 496, 497, 522, 526, 528, 529, 530, 531, 533, 538, 539, 543, 544, 545, 546, 549, 550, 551, 552, 554, 555], "0": [3, 30, 31, 39, 75, 90, 133, 139, 140, 145, 147, 149, 152, 153, 154, 163, 169, 171, 192, 195, 200, 202, 204, 209, 212, 213, 216, 221, 225, 234, 245, 262, 267, 281, 303, 311, 385, 387, 396, 413, 417, 425, 433, 439, 443, 452, 459, 462, 473, 474, 475, 477, 480, 482, 483, 486, 488, 489, 492, 494, 504, 522, 523, 526, 528, 529, 530, 531, 534, 535, 536, 537, 538, 539, 541, 544, 546, 547, 549, 550, 551, 552, 553, 554], "00": [536, 555], "000": [211, 494], "0000": 540, "00000": 211, "00001": 211, "00003": 211, "00004": 211, "0001": [3, 195, 538], "0003": [536, 555], "0004": 538, "0005": 536, "0006": 536, "0007": 555, "0008": 536, "001": [211, 488, 526, 552], "0016": 536, "002": 526, "0021": 536, "0025": 536, "0029": [488, 552, 555], "00296431384049356": [488, 552], "0030": 555, "0036": [488, 552], "0040": 536, "0043": 536, "0046": [536, 555], "005": 549, "0051": 536, "0058": 555, "0059755356051027775": [488, 552], "0061": 536, "006533813662827015": [488, 552], "00774": 544, "0081": 555, "0086": [488, 552], "0097": 536, "00978": [439, 477, 488, 549], "00x": 555, "01": [31, 153, 195, 425, 439, 477, 536, 538, 549, 554, 555], "0106": 555, "0115": 555, "0119": 555, "0130": 555, "0142": 536, "017": 553, "01x": 555, "02": [195, 555], "0201": 536, "0213": 555, "02340": 169, "027": 555, "02x": 555, "03": [536, 555], "0352": 536, "03x": 555, "04": [494, 534, 555], "04191": 521, "04x": 555, "05": [150, 195, 234, 413, 536, 537, 552, 555], "0500": [488, 552], "05516": [439, 477, 488], "0559": 536, "05754": 544, "05x": 555, "06": 555, "0698": [488, 552], "06x": 555, "07": [488, 494, 552, 555], "0734": 555, "0737": [488, 552], "07x": 555, "08": [225, 536, 553, 555], "0806": [488, 552], "0837": 551, "09": [536, 555], "09656": 187, "099": 211, "09x": 555, "0f": 526, "0x": [140, 396], "0x2b000590": 555, "1": [3, 29, 30, 31, 40, 59, 74, 90, 94, 98, 131, 133, 140, 142, 145, 147, 149, 150, 151, 153, 154, 163, 169, 181, 182, 184, 195, 200, 202, 204, 208, 209, 210, 212, 213, 216, 217, 221, 225, 230, 231, 232, 234, 245, 262, 280, 281, 284, 292, 294, 298, 312, 331, 346, 359, 383, 385, 387, 391, 396, 413, 417, 418, 420, 425, 429, 433, 439, 441, 452, 455, 462, 466, 470, 472, 473, 474, 475, 476, 477, 479, 480, 481, 482, 483, 486, 488, 490, 492, 494, 495, 497, 521, 523, 528, 530, 534, 536, 537, 539, 541, 542, 543, 544, 546, 547, 549, 550, 551, 552, 553, 554], "10": [195, 211, 266, 425, 482, 483, 494, 520, 528, 534, 538, 544, 553, 554, 555], "100": [3, 151, 152, 153, 195, 211, 281, 301, 302, 305, 443, 481, 488, 489, 520, 526, 532, 534, 538, 546, 553, 554, 555], "1000": 538, "10000": [266, 544], "10004": [474, 477, 478, 483, 489, 492, 495, 523, 525, 526, 530, 533, 539, 541, 543, 547, 549], "10005": [541, 547], "10006": 525, "10016": [488, 552], "100x": [477, 488, 549], "101": [209, 234, 488, 537, 552, 555], "102": 555, "1024": [131, 211, 383], "10271": [473, 541], "10281": [473, 541], "103": 553, "1034": 555, "10438": [477, 488, 549, 552], "1048": 555, "10537": [473, 541], "106": 555, "107": 555, "1076": 555, "10833": 225, "1091": 555, "10k": [418, 452, 527], "10x": 555, "11": [474, 488, 534, 537, 539, 552, 554, 555], "1106": 555, "1114": 555, "1115": 555, "1121": 555, "1135": 555, "1136": 555, "1137": 555, "116": 553, "1188": 555, "119": [488, 552], "12": [536, 555], "120": [488, 552, 555], "1202": 555, "1205": 555, "121": 555, "123": [211, 553, 555], "1234": 555, "1236": 555, "1237": 555, "124m": [475, 552], "125": 555, "1259": 555, "125m": [475, 531, 552, 555], "126": 555, "127": [212, 213, 387, 488, 546, 552], "128": [3, 31, 145, 209, 212, 213, 225, 387, 418, 420, 439, 452, 477, 482, 488, 538, 546, 549, 553], "1285": 555, "12b": 536, "13": [488, 529, 534, 552, 555], "130": 555, "1307": 555, "132": 555, "13325": [477, 488, 549, 552], "1344": 555, "135": 555, "1365": [225, 553, 555], "1381": [488, 552], "1388": 555, "139": [488, 552], "13b": [475, 536, 552], "13x": 555, "14": [195, 458, 459, 528, 534, 555], "14314": [477, 488, 549], "144": 555, "1445": 555, "146": 555, "147": 555, "148": 555, "1495": 555, "15": [133, 385, 530, 534, 536], "150": 555, "1506": 555, "151": 555, "1510": [488, 552], "152": 555, "153": 555, "1535": 555, "1547": 555, "156": 555, "1564": 555, "1574": 555, "1583": [488, 552], "15x": 555, "16": [444, 474, 534, 549], "1601": [488, 552], "161": 555, "162": [488, 552, 555], "164": 555, "1644": 555, "16599": [488, 552], "169": 555, "16x": 555, "16x32gb": 555, "17": [486, 494, 534, 536, 555], "1707": 555, "1717": 555, "172": [488, 552, 555], "1732": 555, "17323": [420, 439, 477, 488, 549], "1742": [488, 552], "1749": [488, 552], "175": 555, "17509": [488, 552], "1751": [488, 552], "176": 555, "177": [232, 555], "178": 555, "17x": 555, "18": [486, 534, 536, 545], "1809": 521, "1810": 169, "1818": 555, "182": 555, "1842": 555, "18518": 135, "18529": 135, "187": 555, "1873": 555, "1879": 555, "1890": [488, 552], "1891": 555, "18x": 555, "19": 555, "192": [488, 552], "192795": 522, "193": 555, "195": 555, "1978": 195, "1983354538679123": [488, 552], "1988": 555, "199": 555, "1998": 555, "1_11_capabl": 497, "1b7": [475, 536, 552], "1e": [150, 413, 488, 552], "1e1": 425, "1s4c14ins1bsthroughput": 555, "1x": 555, "1x1": [538, 544], "1x2": [488, 552], "1xchannel": [195, 544], "2": [29, 30, 31, 39, 59, 90, 94, 140, 147, 149, 151, 153, 154, 175, 181, 182, 195, 210, 230, 232, 234, 245, 266, 281, 297, 311, 331, 359, 396, 413, 433, 441, 444, 466, 471, 472, 473, 475, 476, 477, 479, 480, 481, 482, 483, 484, 488, 489, 492, 494, 495, 504, 521, 523, 530, 531, 533, 534, 535, 536, 537, 539, 541, 542, 544, 545, 546, 547, 549, 550, 551, 552, 553, 554], "20": [225, 425, 544, 553, 555], "200": [224, 418, 439, 452, 477, 492, 554], "2000": 538, "2001": 209, "2011": 521, "2012": 211, "2017": 521, "2018": 521, "2019": 544, "2020": [135, 473, 541], "2021": 544, "2022": [195, 477, 488, 535, 544, 549, 552], "2023": [473, 477, 488, 494, 541, 544, 549, 552], "2024": [494, 555], "203": 555, "2043": 209, "2048": [3, 391, 418, 420, 439, 452, 466, 477, 549], "205": 555, "2059": 555, "206": 555, "207": [488, 552], "2070": 555, "2079": 555, "20b": 536, "20x": 555, "21": [234, 488, 536, 545, 552, 555], "210": 555, "21020": [488, 552], "211": 555, "2111": 544, "213": 555, "2132": 551, "214": 555, "2170": 555, "2172": 555, "218": 555, "219": 555, "21x": 555, "22": [534, 536, 555], "2202": 555, "2204": 187, "2205301336": 555, "2209": [477, 488, 549, 552], "2210": [420, 439, 477, 488, 549], "2211": [477, 488, 549, 552], "2220": [488, 552], "224": [195, 221, 526, 528, 538, 553], "22444": [488, 552], "225": [221, 538], "2286": 555, "229": [221, 538], "22x": 555, "23": [154, 536, 555], "230": 555, "2301": 544, "2305": [477, 488, 549], "2306": [439, 477, 488, 549], "2309": [439, 477, 488], "2310": [473, 541], "2326": 555, "23f1": 555, "23ubuntu4": 555, "23x": 555, "24": [154, 483, 544, 554, 555], "24101": 544, "24116": 544, "242": 555, "2420": [488, 552], "2428": 555, "247": 555, "24x": 555, "25": [544, 555], "250": 544, "255": [488, 546, 552], "256": [216, 221, 439, 526, 538, 553], "2567": 555, "2570": [488, 552], "2578": 555, "25x": 555, "26": [536, 555], "26f1": 555, "26x": 555, "27": [529, 536, 555], "279": 555, "27x": 555, "28": [209, 210, 214, 555], "282": 555, "284": 555, "2847": 555, "28x": 555, "29": [536, 555], "294": 555, "2949": 555, "295": 555, "2970": [488, 552], "2991": [488, 552], "29x": 555, "2d": [55, 327, 488, 552], "2e5m2": 472, "2gb": [243, 390], "2x": [527, 533], "2x1": [544, 555], "2x2": [488, 552], "2xlarg": 555, "3": [29, 31, 133, 140, 153, 154, 182, 195, 211, 225, 230, 281, 385, 396, 413, 441, 466, 473, 474, 476, 477, 479, 481, 482, 483, 488, 489, 492, 494, 504, 521, 522, 523, 526, 528, 530, 531, 534, 537, 538, 539, 541, 542, 544, 545, 546, 549, 550, 551, 552, 553, 554], "30": [225, 527, 533, 553, 555], "300": 544, "305": 555, "3087": 555, "30b": [475, 536, 552], "30x": 555, "31": [30, 536, 555], "311": 555, "313": 555, "31x": 555, "32": [31, 171, 280, 413, 418, 429, 433, 439, 444, 452, 473, 474, 481, 482, 526, 532, 541, 549, 555], "322": 555, "3253": [488, 552], "3254": 555, "32accuraci": 555, "32x": 555, "33": [135, 473, 536, 541, 555], "334": 555, "33x": 555, "34": [494, 536, 555], "3424": 555, "346": 555, "348": 555, "34f1": 555, "35": [536, 544, 555], "350": 555, "350m": [475, 552], "354": [475, 552], "3542": [475, 552], "35x": 555, "36": 555, "360": 555, "36x": 555, "37": [536, 555], "3707": 555, "3725": 555, "3740": [488, 552], "3757": [475, 552], "379": [475, 552], "37x": 555, "38": 555, "3804": [475, 552], "381": [544, 555], "3815": [488, 552], "384": [225, 553, 555], "3845": [488, 552], "3850": [488, 552], "385297635664756e": [488, 552], "3852e": [488, 552], "386": 555, "387": 555, "3887": [475, 552], "38x": 555, "39": 555, "3911": [488, 552], "3924": [488, 552], "393": 555, "3930": [475, 552], "394": 555, "3947": [475, 552], "395": 555, "396": 555, "397": 555, "399": 555, "39x": 555, "3b": [475, 536, 552], "3d": [55, 327, 488, 545, 552], "3dgan": 545, "3f": 483, "3rd": [474, 488, 539, 545, 546, 548], "3x": 529, "4": [31, 39, 142, 145, 151, 154, 175, 184, 195, 218, 221, 225, 227, 228, 230, 280, 281, 311, 418, 429, 433, 439, 444, 452, 471, 473, 475, 476, 477, 478, 483, 488, 489, 490, 494, 495, 497, 520, 526, 527, 530, 533, 534, 537, 538, 541, 544, 546, 549, 552, 553, 554, 555], "40": [536, 555], "401": 555, "402": 555, "404": [544, 555], "405": 555, "4055": [488, 552], "406": [538, 555], "407": 555, "40b": 536, "41": 555, "411": 555, "4149": [475, 552], "4172": [475, 552], "4199": 555, "41x": 555, "42": [195, 418, 439, 477, 555], "420": 533, "42x": 555, "43": [536, 555], "431": 555, "434": 555, "43x": 555, "44": 555, "442": 555, "4469": 551, "44x": 555, "45": [492, 536, 555], "4516": [475, 552], "4533": [475, 552], "456": 538, "457": 555, "45x": 555, "46": [536, 555], "461": 555, "4634": [475, 552], "46x": 555, "47": [154, 483, 536, 555], "4734": [488, 552], "4741": [488, 552], "4743": [488, 552], "47x": 555, "48": [154, 488, 552, 555], "4800": 555, "4828": [475, 552], "483": 555, "484": 281, "485": [538, 555], "48x": 555, "49": [536, 555], "4906": [475, 552], "492": 555, "4936": [475, 552], "494": 555, "498": 555, "4980": [475, 552], "499": 555, "4f": 522, "4k": 489, "4th": [474, 488, 536, 545, 546], "4x": [184, 488, 546], "4x1": [175, 195, 538, 544, 555], "5": [139, 149, 153, 163, 195, 213, 221, 234, 262, 303, 413, 417, 439, 475, 477, 480, 488, 495, 528, 530, 534, 537, 538, 544, 546, 549, 552, 553, 554, 555], "50": [232, 488, 496, 544, 552, 555], "5018": [475, 552], "5040": [488, 552], "5048": [475, 552], "505": 555, "5057": [475, 552], "50x": 555, "51": [536, 555], "512": [474, 477], "512gb": 555, "513": 555, "518": [475, 552], "5185": [475, 552], "52": 555, "520": 555, "526": 555, "529": 555, "52f1": 555, "52x": 555, "53": [536, 555], "530": 555, "5382": 555, "539": 555, "53x": 555, "54": 555, "541": 555, "5421": 555, "5436": [475, 552], "5443": [475, 552], "5444": [488, 552], "5494": 555, "54accuraci": 555, "54x": 555, "55": [536, 555], "5519": 555, "5523": 555, "5530": 555, "5540": 555, "5552": [475, 552], "5555": [488, 552], "556": 555, "558": 555, "5593": [475, 552], "55x": 555, "56": 555, "560m": [475, 552], "565": 555, "56be4db0acb8001400a502ec": 232, "56x": 555, "57": [488, 536, 552, 555], "5742": [475, 552], "576": 555, "5764": [475, 552], "5767": 555, "578": 555, "5789": [475, 552], "57x": 555, "58": [536, 555], "582": 555, "5826": [488, 552], "584": 555, "58x": 555, "59": [488, 536, 552, 555], "5972": [488, 552], "5977": [475, 552], "59f1": 555, "59x": 555, "5b": [475, 552], "5gb": 431, "5x": 545, "6": [195, 473, 475, 477, 480, 488, 530, 541, 546, 552, 554, 555], "60": 555, "600": 538, "602": 555, "6038": [488, 552], "6057": 555, "60x": 555, "61": [536, 555], "6113": 555, "6187": 555, "61accuraci": 555, "62": [536, 555], "6247": [475, 552], "626": 555, "6297": [475, 552], "62x": 555, "63": [536, 544, 555], "633": 555, "6354": 555, "6365": [475, 552], "637690492221736e": [488, 552], "6376e": [488, 552], "6392": [475, 552], "64": [225, 439, 444, 474, 482, 488, 536, 549, 552, 553, 555], "6404": [475, 552], "6426": 555, "6437": [475, 552], "6455": 555, "6481": [488, 552], "6499": [475, 552], "64x": 555, "65": 555, "6506": [488, 552], "6534": 555, "6542": [475, 552], "65421": 522, "655": [475, 552], "6569": [475, 552], "65b": [475, 552], "66": 555, "6621": [475, 552], "66b": [475, 552], "66x": 555, "67": [536, 555], "6718": [475, 552], "6735": [475, 552], "6739": 555, "6740": [475, 552], "6769": [475, 552], "67x": 555, "68": [536, 553, 555], "680": 555, "6804": [475, 552], "6814": [475, 552], "6821": [475, 488, 552], "6831": [475, 552], "6835": [488, 552], "6836": [488, 552], "6837": [488, 552], "6839": [488, 552], "684": 555, "6845": 555, "6848": [488, 552], "6866": [475, 552], "6872": [475, 552], "6883": [488, 552], "6895": [475, 552], "68x": 555, "69": 555, "6953": [475, 552], "6994": 552, "69x": 555, "6ap0": 555, "6b": [475, 536, 552], "6f": 526, "7": [29, 151, 154, 195, 245, 413, 425, 433, 475, 477, 480, 488, 497, 520, 529, 530, 538, 544, 546, 550, 552, 554, 555], "70": [536, 555], "702": 555, "7022": 555, "7025": 555, "7034": 555, "704": 555, "705": 555, "7058": 552, "707": 555, "708": 555, "70b": 536, "70x": 555, "71": [154, 536, 555], "711": 555, "7128": [475, 552], "714": 555, "7143": [475, 552], "7149": [475, 552], "715": 555, "7153": 555, "717": 555, "7174": [488, 552], "718": 555, "719": 555, "72": [154, 488, 536, 552, 555], "7221": [475, 552], "72x": 555, "73": [536, 555], "7323": 555, "7326": [475, 552], "7332": 552, "7335": 552, "7357": [475, 552], "7361": [475, 552], "7392": 552, "7398": [475, 552], "7399": 555, "73x": 555, "74": [536, 555], "7415": 555, "7440": [488, 552], "7442": 555, "7451": [488, 552], "749": 555, "7495": 551, "74x": 555, "75": [536, 544, 555], "754": 474, "755": 555, "7589": [488, 552], "7590": [475, 552], "75x": 555, "76": [536, 555], "7608": [488, 552], "7615": 552, "7627": [475, 552], "7632": 552, "7677": 552, "76x": 555, "77": [536, 555], "774m": [475, 552], "7759": [475, 552], "7772": [488, 552], "779": 555, "77x": 555, "78": [553, 555], "7840": [475, 552], "7895": 544, "79": [536, 555], "7908": [475, 552], "7957": [475, 552], "7965": 555, "798": 555, "799": 555, "79x": 555, "7b": [475, 484, 489, 494, 536, 552], "7b1": [475, 552], "8": [98, 140, 154, 195, 224, 396, 404, 413, 418, 433, 439, 444, 452, 471, 472, 473, 475, 477, 478, 488, 489, 492, 496, 521, 522, 530, 534, 541, 544, 545, 549, 552, 554, 555], "80": [529, 536, 547, 555], "800": [225, 553], "8001": [1, 453], "801": 544, "8018": 555, "8025": 555, "8044": 555, "805": 549, "8074": 555, "8084": 555, "80x": 555, "81": 555, "816": 555, "8178": 555, "81x": 555, "82": [536, 555], "8207": [488, 552], "8213": 555, "8235": 555, "8246": [488, 552], "8256": 555, "8259": 555, "8266": 551, "8291": 551, "8294": 551, "8298": [488, 552], "8299": 551, "83": [488, 551, 552, 555], "8314": 555, "8363": 555, "837": 555, "8371": 555, "8372": 551, "8382": 555, "83x": 555, "84": [488, 552, 555], "840": 555, "841": 555, "8411": 555, "844": 555, "8480": 555, "84x": 555, "85": [488, 551, 552, 555], "853": 555, "858": 555, "85x": 555, "86": [536, 555], "8626": 555, "8684": 555, "86x": 555, "87": [221, 536, 555], "875": [221, 553], "8763": [488, 552], "8768": [488, 552], "8782": 555, "87f1": 555, "88": [529, 551, 555], "8814": 555, "89": [488, 536, 552, 555], "893": 555, "8993": 555, "89x": 555, "8b": 489, "8ghz": 555, "8x1": 195, "9": [169, 195, 475, 483, 489, 492, 534, 538, 544, 552, 554, 555], "90": [195, 536, 547, 555], "901": 555, "9048": 555, "9091": 555, "90f1": 555, "90x": 555, "91": [488, 549, 552, 555], "914": 555, "92": [536, 555], "927": 555, "92x": 555, "93": [488, 552, 555], "9301": [488, 552], "9308": [488, 552], "9391": 555, "94": [553, 555], "9403": 555, "947": 555, "948": 555, "94x": 555, "95": [75, 154, 234, 537, 555], "9521": 555, "9522": 555, "9527": [266, 538], "95top1": 555, "96": [536, 555], "96x": 555, "97": [536, 538, 555], "98": [195, 538, 544, 555], "9860": [488, 552], "9867": 536, "98x": 555, "99": [3, 153, 303, 555], "9907": 536, "9911": 536, "9915": 536, "9928": 536, "9930": 536, "9933": 536, "9945": 536, "9955": 536, "9957": 536, "9972": 536, "9975": 536, "9976": 536, "9984": 536, "9986": 536, "9987": 536, "9988": 536, "9989": 536, "999": [3, 303], "9990": 536, "9991": 536, "9992": 536, "9994": 536, "9995": 536, "9997": 536, "99ccff": 554, "99x": 555, "A": [3, 40, 59, 60, 88, 101, 125, 126, 133, 138, 145, 152, 153, 159, 161, 162, 169, 170, 173, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 195, 211, 225, 227, 230, 231, 232, 234, 235, 239, 243, 266, 283, 286, 312, 331, 332, 385, 389, 390, 391, 397, 404, 406, 409, 412, 413, 415, 425, 427, 433, 443, 449, 466, 475, 477, 478, 488, 491, 496, 522, 523, 527, 530, 536, 537, 538, 540, 544, 545, 546, 549, 552, 553, 554], "And": [55, 327, 481, 488, 491, 492, 520, 522, 538, 552], "As": [472, 477, 488, 492, 496, 523, 537, 538, 544, 549, 554], "At": [473, 491, 496, 541, 554], "Being": 490, "By": [140, 173, 195, 227, 396, 477, 484, 496, 497, 537, 540, 544, 545, 549, 554], "For": [29, 31, 145, 149, 153, 156, 160, 173, 174, 177, 178, 188, 195, 211, 225, 231, 232, 234, 235, 262, 413, 431, 433, 439, 466, 472, 475, 477, 478, 479, 480, 484, 488, 490, 492, 493, 495, 497, 525, 526, 528, 530, 531, 533, 544, 548, 549, 552, 554, 555], "IT": [211, 218, 545], "If": [52, 53, 55, 59, 140, 151, 153, 156, 195, 198, 199, 211, 225, 235, 262, 281, 324, 325, 327, 331, 396, 413, 431, 433, 435, 441, 448, 449, 452, 472, 478, 481, 488, 489, 491, 492, 496, 520, 522, 523, 526, 529, 534, 535, 537, 538, 542, 544, 546, 549, 551, 552, 553, 554], "In": [162, 177, 178, 179, 184, 185, 186, 189, 195, 208, 230, 245, 448, 470, 477, 478, 481, 488, 489, 490, 492, 494, 495, 496, 523, 525, 526, 537, 538, 542, 543, 544, 546, 548, 549, 551, 552, 553, 554], "It": [39, 140, 159, 162, 166, 175, 198, 199, 232, 234, 235, 262, 267, 271, 311, 396, 433, 442, 449, 473, 476, 477, 478, 479, 481, 482, 488, 489, 495, 496, 497, 521, 529, 531, 538, 541, 544, 546, 549, 551, 554], "Its": [496, 521, 544], "NOT": [209, 530], "No": [413, 491, 522, 529, 538, 545], "Not": [277, 278, 391, 466, 472, 522], "ON": 548, "Of": 523, "On": [488, 546, 555], "One": [478, 497, 522, 526, 533, 544, 545, 554], "TO": 526, "The": [3, 21, 39, 59, 98, 104, 106, 125, 133, 140, 145, 146, 151, 152, 153, 155, 156, 158, 160, 161, 162, 163, 165, 169, 173, 175, 177, 178, 180, 181, 182, 187, 188, 189, 190, 191, 192, 195, 198, 199, 200, 209, 210, 211, 212, 213, 217, 218, 225, 227, 228, 229, 231, 232, 234, 235, 243, 245, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 278, 280, 281, 283, 284, 288, 292, 293, 294, 297, 298, 299, 301, 302, 305, 311, 331, 344, 361, 385, 386, 387, 388, 389, 390, 391, 392, 396, 405, 406, 407, 408, 409, 410, 412, 413, 414, 416, 417, 418, 422, 423, 425, 426, 427, 431, 433, 435, 437, 438, 439, 441, 442, 443, 448, 449, 453, 455, 461, 462, 465, 466, 467, 470, 471, 472, 473, 474, 476, 477, 478, 480, 481, 482, 488, 489, 490, 492, 495, 496, 497, 498, 500, 506, 509, 512, 520, 521, 522, 523, 526, 528, 530, 531, 532, 533, 534, 536, 537, 538, 539, 540, 541, 542, 543, 544, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555], "Their": 455, "Then": [230, 488, 496, 552, 554], "There": [100, 263, 265, 471, 476, 480, 488, 494, 522, 530, 533, 538, 549, 552, 554], "These": [496, 533], "To": [30, 230, 245, 392, 473, 474, 475, 476, 477, 480, 482, 483, 488, 491, 494, 496, 497, 522, 526, 539, 541, 544, 548, 549, 550, 552, 554], "Will": [128, 380, 441, 533], "With": [81, 245, 352, 481, 488, 492, 495, 497, 523, 534, 538, 542, 544, 545, 546, 551, 552, 554, 555], "_": [198, 199, 262, 474, 475, 477, 478, 481, 483, 488, 489, 492, 494, 495, 496, 521, 526, 528, 530, 534, 538, 539, 540, 542, 544, 546, 549, 550, 551, 552, 554], "__all__": 522, "__call__": 218, "__class__": 245, "__getitem__": [207, 211, 225, 523, 538], "__init__": [431, 441, 495, 522, 523, 537, 554], "__iter__": [207, 211, 523, 538], "__len__": [207, 211], "__name__": 245, "__str__": 452, "__version__": [1, 138, 397, 522], "_configset": 153, "_description_": [438, 478], "_epoch_ran": 162, "_generate_dataload": [200, 387], "_inductor": [471, 476], "_lossandgradi": 522, "_propagate_qconfig_help": 145, "_quantiz": 459, "_quantizedconv": [76, 347], "_quantizeddeconv": [76, 347], "_quantizedfusedbatchnorm": [108, 363], "_quantizedmatmul": [78, 79, 349, 350], "_saved_model": [133, 385], "_type_": [145, 462], "_use_new_zipfile_seri": 170, "a100cuda": 555, "aarch64": 520, "ab": [30, 169, 187, 420, 433, 439, 481, 488, 544, 546, 552], "abbrevi": 195, "abc": [98, 232, 495, 554], "abcadaptor": 495, "abctunestrategi": 554, "abil": [473, 495, 496, 530, 541, 552], "abl": [198, 199, 235, 262, 449, 481, 488, 546], "abound": 545, "about": [169, 191, 466, 470, 472, 490, 522, 523, 538, 544, 548, 554, 555], "abov": [39, 311, 475, 476, 477, 479, 488, 495, 496, 497, 544, 546, 552, 554], "absolut": [169, 195, 234, 413, 473, 488, 537, 538, 541, 544, 546, 554], "absorb": [145, 149, 413, 433, 477, 549], "absorb_layer_dict": [419, 439, 452], "absorb_pair": 31, "absorb_to_lay": [145, 413, 432, 433, 439], "absorbed_1": [145, 433], "absorpt": [145, 433], "abstract": [71, 162, 225, 343, 495, 497, 532], "abus": 490, "ac": 266, "acc": [474, 479, 536, 544, 555], "acceler": [171, 439, 443, 446, 472, 474, 475, 477, 478, 480, 488, 489, 494, 534, 538, 544, 545, 548, 549, 552, 554], "accelerator_execution_tim": 251, "acceleratorregistri": 443, "accept": [195, 281, 482, 489, 490, 520, 522, 523, 549], "access": [52, 53, 55, 195, 324, 325, 327, 462, 466], "accommod": [496, 497], "accompani": [535, 554], "accord": [1, 107, 145, 151, 195, 209, 210, 211, 225, 269, 271, 392, 442, 448, 455, 472, 481, 482, 488, 495, 526, 544, 548, 552, 553, 554], "accordingli": 552, "account": [490, 551, 554], "accumul": [277, 473, 477, 541, 554], "accur": [29, 149, 413, 420, 439, 476, 477, 488, 544, 549, 552], "accuraci": [31, 151, 153, 192, 195, 198, 199, 234, 235, 245, 262, 267, 281, 449, 473, 475, 476, 477, 481, 482, 484, 492, 494, 496, 497, 521, 523, 525, 526, 527, 528, 531, 537, 538, 541, 542, 543, 544, 545, 549, 550, 551, 552, 555], "accuracy_criterion": [195, 245, 538, 542, 554], "accuracy_level": 31, "accuracycriterion": [195, 538, 554], "accuracyspars": 555, "achiev": [475, 476, 477, 478, 479, 482, 484, 488, 489, 528, 536, 538, 543, 544, 545, 546, 552, 554], "acq": 266, "acq_max": 266, "acquisit": 266, "across": [235, 392, 473, 482, 524, 531, 538, 541, 544, 547, 552, 554], "act": [192, 207, 387, 490], "act_algo": [413, 439, 476], "act_algorithm": 303, "act_bit": [418, 439], "act_dtyp": [299, 303, 439, 473, 475, 476, 479, 541], "act_dynam": [418, 439], "act_fn": 174, "act_granular": [299, 303, 439, 479], "act_group_s": [418, 439], "act_max_valu": [292, 293, 294, 297, 298], "act_maxabs_hw_weights_pcs_maxabs_pow2": 472, "act_maxabs_pow2_weights_pcs_opt_pow2": 472, "act_min_valu": [292, 293, 294, 297, 298], "act_ord": [439, 477], "act_sym": [299, 303, 418, 439, 476, 479, 481, 482], "action": 490, "activ": [16, 31, 126, 145, 166, 195, 278, 280, 286, 292, 293, 294, 298, 413, 417, 439, 471, 472, 475, 476, 477, 480, 481, 488, 495, 496, 497, 521, 525, 530, 538, 546, 549, 552], "activation_max": 466, "activation_min": 466, "activation_min_max": 466, "activationoper": 5, "activity_regular": [292, 293, 294, 298], "actord": [31, 477, 549], "actual": [234, 476, 526, 529], "ad": [186, 209, 227, 266, 471, 474, 477, 496, 497, 539, 544, 549], "adadelta": 526, "adam": 165, "adamw": 165, "adapt": [473, 490, 496, 541, 545], "adaptor": [162, 195, 280, 288, 289, 455, 470, 497, 533, 547, 548, 550, 552], "adaptor_registri": 495, "add": [30, 38, 49, 94, 104, 133, 165, 192, 195, 209, 211, 278, 310, 321, 359, 385, 466, 470, 475, 491, 492, 494, 497, 523, 526, 528, 530, 537, 538, 551, 552, 554], "add_origin_loss": [163, 195], "add_port_to_nam": 90, "add_qdq_pair_to_weight": [28, 195, 546], "add_relu": 528, "addit": [145, 195, 431, 449, 477, 496, 497, 549, 550, 554], "addition": [477, 497, 526, 544, 554], "addn": 530, "address": [211, 474, 477, 482, 490, 522, 539, 545], "addv2": [38, 54, 310, 326, 530], "adher": 491, "adjust": [488, 546, 552], "adopt": [488, 491, 544, 545, 552], "advanc": [135, 195, 473, 474, 477, 479, 481, 488, 490, 496, 501, 536, 541, 544, 546, 549], "advantag": [474, 544, 554], "afc": 232, "affect": [488, 552], "affin": [142, 429, 546], "aforement": 538, "after": [1, 63, 133, 145, 171, 175, 180, 182, 187, 195, 209, 221, 225, 267, 281, 335, 385, 406, 413, 446, 462, 466, 471, 472, 476, 477, 480, 488, 494, 496, 525, 528, 533, 537, 538, 542, 543, 544, 546, 548, 549, 551, 552, 553, 554], "ag": 490, "again": [211, 548], "against": [228, 406], "aggress": 554, "agnost": [157, 478], "agnostic_mod": 230, "agre": 491, "ai": [470, 473, 474, 478, 494, 541, 545, 548], "aid": 524, "aim": [470, 482, 494, 531, 536, 544, 552, 554], "ajanthan": 544, "al": [135, 473, 477, 488, 494, 521, 534, 541, 549, 552], "albert": 555, "alemb": 529, "alexnet": 555, "algo": [156, 288, 289, 409, 442, 455], "algo_nam": [152, 305, 442, 522], "algorithm": [1, 31, 145, 152, 156, 195, 226, 227, 234, 290, 299, 301, 305, 391, 436, 437, 439, 441, 442, 448, 455, 461, 477, 478, 481, 482, 484, 488, 494, 495, 496, 497, 522, 525, 530, 531, 533, 536, 544, 546], "algorithm_entri": [304, 440, 522], "algorithm_registri": 146, "algorithm_typ": 146, "algorithmschedul": 146, "algos_map": [391, 448], "alia": [463, 466], "alias": [466, 522], "alias_param": 466, "alibaba": [494, 545], "align": [225, 472, 490, 549, 553], "alignimagechannel": 553, "alignimagechanneltransform": 225, "alistarh": 544, "all": [1, 3, 21, 31, 48, 82, 140, 141, 145, 146, 148, 151, 152, 154, 157, 162, 165, 170, 175, 183, 192, 195, 200, 201, 207, 211, 218, 225, 227, 230, 232, 234, 236, 245, 274, 280, 282, 299, 302, 320, 353, 387, 389, 390, 391, 392, 396, 398, 413, 417, 420, 429, 438, 439, 441, 455, 459, 466, 472, 475, 478, 481, 482, 484, 488, 489, 490, 491, 494, 495, 496, 497, 522, 523, 526, 530, 534, 538, 543, 544, 545, 546, 548, 549, 552, 553, 554], "all_par": [413, 433], "allbalanc": 555, "allenai": [231, 232], "allevi": 552, "alloc": 483, "allow": [145, 152, 266, 433, 474, 476, 477, 482, 526, 537, 538, 539, 544, 549, 552, 554], "allowlist": [439, 472], "along": [425, 426, 431, 477, 534, 544, 549], "alpha": [126, 139, 142, 149, 169, 189, 286, 303, 413, 417, 429, 439, 488, 522, 546, 554], "alpha_list": 277, "alpha_max": [413, 439, 552], "alpha_min": [413, 439, 552], "alpha_step": [413, 439, 552], "alreadi": [140, 151, 211, 224, 396, 474, 478, 495, 529, 533, 554], "also": [138, 174, 208, 211, 225, 227, 234, 245, 262, 397, 472, 474, 477, 478, 479, 480, 488, 489, 491, 495, 496, 497, 523, 527, 531, 533, 537, 538, 542, 544, 545, 546, 548, 549, 552, 554], "altern": [140, 195, 262, 396], "although": [208, 551], "alwai": [153, 230, 234, 262, 472, 482, 537, 538, 549], "amax": [433, 473, 541], "amazon": 494, "amd": [494, 534, 555], "among": [234, 473, 488, 541, 552], "amount": [3, 413, 480, 552], "amp": [1, 418, 549], "amp_cfg": 1, "amp_convert": 1, "amx": [474, 545], "an": [1, 52, 53, 55, 90, 126, 128, 133, 135, 138, 140, 145, 150, 166, 180, 195, 203, 207, 211, 225, 226, 227, 231, 232, 235, 257, 261, 262, 281, 286, 324, 325, 327, 380, 385, 396, 397, 409, 417, 433, 448, 449, 466, 468, 472, 473, 474, 475, 476, 477, 478, 480, 481, 482, 488, 490, 492, 494, 495, 497, 521, 523, 524, 528, 536, 537, 538, 539, 541, 542, 544, 545, 546, 549, 551, 552, 553, 554], "anaconda": 534, "analysi": [473, 541, 544, 551], "analyt": [470, 534, 545], "analyz": [95, 173, 243, 360, 390, 551, 552], "andrew": 521, "ani": [135, 140, 156, 230, 232, 257, 261, 277, 281, 302, 392, 396, 422, 435, 442, 443, 452, 466, 478, 481, 490, 494, 497, 522, 526, 544, 551, 554], "anneal": 168, "anno_dir": 210, "anno_path": [234, 537], "annot": [210, 230, 281, 496, 497, 537, 544, 554], "anoth": [225, 227, 466, 553, 554], "answer": [225, 231, 232, 490, 544, 553, 555], "answer_start": 232, "answeringsquad": 555, "ao": 409, "ap": 537, "ap0": 555, "apach": [3, 535], "api": [55, 60, 95, 154, 230, 234, 262, 277, 278, 281, 290, 302, 303, 304, 305, 327, 332, 360, 389, 391, 392, 420, 436, 438, 439, 440, 442, 445, 466, 472, 475, 477, 482, 488, 492, 494, 498, 500, 506, 509, 512, 528, 529, 531, 533, 534, 538, 550, 551, 554, 556], "appear": [140, 396, 490], "append": [145, 266, 492, 525, 538, 543], "append_attr": 145, "appl": 554, "appli": [31, 98, 111, 112, 113, 116, 118, 122, 133, 142, 162, 184, 186, 228, 301, 305, 366, 367, 368, 371, 373, 377, 385, 392, 406, 413, 429, 437, 442, 448, 470, 473, 476, 480, 481, 482, 488, 490, 496, 497, 538, 541, 543, 544, 546, 548, 551, 552, 554], "applianc": 545, "applic": [221, 431, 477, 479, 488, 497, 528, 545, 549, 552, 553], "apply_awq_clip": 31, "apply_awq_scal": 31, "apply_inlin": [133, 385], "apply_single_pattern_pair": 406, "appoint": 490, "approach": [195, 476, 477, 488, 492, 521, 533, 538, 544, 545, 547, 549, 554], "appropri": [443, 476, 482, 484, 488, 489, 490, 521, 552], "approv": 491, "approx": [477, 488, 549], "approxim": [227, 228, 477, 496, 537, 549], "appu": 477, "apr": [494, 545], "apt": [529, 534], "ar": [52, 53, 90, 140, 145, 154, 175, 180, 181, 182, 187, 195, 209, 225, 230, 234, 263, 265, 266, 280, 281, 324, 325, 392, 396, 406, 409, 413, 417, 455, 465, 466, 471, 472, 473, 474, 476, 477, 478, 480, 481, 484, 488, 489, 490, 491, 492, 495, 496, 522, 523, 525, 526, 527, 528, 530, 532, 533, 535, 536, 537, 538, 539, 541, 542, 543, 544, 545, 546, 548, 549, 550, 551, 552, 553, 554], "arang": 552, "arbitrari": [140, 396, 478, 538, 543], "arc": 489, "arcfac": 555, "architectur": [151, 470, 474, 477, 484, 488, 489, 494, 520, 531, 538, 544, 545, 549], "arctic": 534, "are_shapes_equ": 90, "area": [225, 230, 473, 537, 541, 553], "arg": [1, 39, 90, 145, 154, 209, 210, 211, 266, 281, 311, 399, 420, 433, 437, 448, 462, 463, 473, 478, 526, 538, 541, 544, 546, 549, 552, 553], "argmax": 16, "argmaxoper": 6, "argpars": 154, "argu": 522, "argument": [140, 154, 195, 281, 396, 406, 413, 431, 438, 441, 442, 478, 481, 482, 549], "ariel": 544, "arithmet": 530, "arm": [494, 533, 534, 555], "around": 90, "arr": [30, 391, 466], "arrai": [3, 30, 31, 133, 140, 225, 230, 396, 444, 466, 553], "arrang": [181, 182, 187, 210, 211, 214, 544], "art": 544, "articl": [231, 232], "arxiv": [169, 187, 420, 439, 473, 477, 488, 494, 521, 541, 544, 545, 549, 552], "as_text": 90, "ascii": [140, 396], "asd932_": 211, "ask": [494, 534], "aspect": [221, 225, 553], "asplo": 545, "assert_error": 90, "assertionerror": [170, 176, 183, 192, 235, 435, 448], "assign": [230, 466, 526, 554], "assist": [159, 550], "associ": [133, 140, 230, 396, 406], "assum": [224, 230, 495, 530], "asterisk": [475, 552], "asym": [29, 30, 31, 142, 145, 413, 429, 433, 497, 530, 549], "asymmetr": [413, 481, 497, 546, 549, 554], "atenc": 483, "atom": 535, "att": [231, 232], "attach": [488, 538, 552], "attack": 490, "attempt": 554, "attent": [16, 143, 171, 173, 184, 195, 209, 417, 490, 544], "attention_ffn_nam": 184, "attention_mask": [209, 538], "attentionoper": 7, "attr": [90, 133, 173, 385], "attr1": 281, "attr2": 281, "attr5": 281, "attribut": [30, 68, 89, 90, 133, 145, 173, 184, 195, 340, 448, 466, 472, 495, 523, 530, 532, 538], "attribute1": 281, "attribute_to_kwarg": 30, "attributeerror": [138, 281, 397], "aug": [494, 545], "augment": 2, "author": 535, "auto": [145, 152, 153, 171, 173, 195, 264, 270, 302, 413, 438, 439, 443, 446, 466, 472, 475, 478, 485, 492, 494, 521, 531, 544, 546, 549], "auto_acceler": 447, "auto_alpha_arg": [303, 439, 552], "auto_clip": 452, "auto_config": 544, "auto_copi": 145, "auto_detect_acceler": 443, "auto_input_output": [133, 385], "auto_merg": 30, "auto_mixed_precis": 270, "auto_scal": 452, "auto_slim": 172, "autoalpha": 413, "autom": [545, 552], "automat": [171, 173, 174, 195, 211, 214, 221, 443, 448, 472, 478, 481, 483, 484, 492, 494, 520, 533, 538, 539, 543, 544, 546, 552, 553, 554], "automixedprecisiontunestrategi": 264, "automodelforcausallm": [141, 398, 489, 531], "automodelforsequenceclassif": 538, "autonumb": [496, 497], "autoround": [428, 437, 439, 478, 488, 489, 494, 536], "autoround_arg": 477, "autoround_quantize_entri": 437, "autoroundconfig": [437, 439, 452, 477, 489], "autoroundquant": 418, "autotoken": [489, 538], "autotrack": [125, 133, 283, 385], "autotun": [479, 480, 481, 488, 502, 522], "autotunestrategi": 263, "aux": 1, "auxiliari": 460, "avail": [154, 188, 195, 239, 413, 446, 474, 477, 478, 484, 494, 498, 500, 506, 509, 512, 522, 527, 529, 531, 533, 544, 554, 555], "averag": [31, 231, 232, 234, 477, 537, 538, 549, 554], "averagepool": 23, "averagepooling2d": 297, "avg": 546, "avgpool": [114, 123, 297, 369, 378, 530], "avoid": [90, 140, 145, 209, 396, 413, 433, 448, 483, 492, 522], "avx": 474, "avx512": [474, 488, 539, 546], "avx512_bf16": [474, 539], "avx512_core_amx_fp16": 474, "avx512_fp16": 474, "aw": [545, 555], "awai": 523, "awar": [31, 135, 162, 195, 269, 439, 449, 477, 478, 482, 495, 496, 497, 521, 525, 526, 528, 533, 543, 545, 549, 554], "awq": [31, 428, 433, 437, 439, 478, 488, 489, 547, 549], "awq_arg": [477, 549], "awq_g32asym": 549, "awq_quant": 31, "awq_quantize_entri": 437, "awqconfig": [437, 439, 452, 477, 489], "awqquant": 419, "ax": [195, 404, 459], "axi": [30, 195, 425, 426], "azur": [491, 494, 545], "b": [30, 59, 154, 209, 331, 418, 466, 477, 488, 491, 521, 537, 544, 549, 552, 554], "b1": [59, 331], "b16": [36, 308], "b3": 555, "b_dataload": [151, 195, 520, 538], "b_filter": 1, "b_func": [151, 520], "back": [140, 145, 154, 225, 396], "backbon": 551, "backend": [2, 28, 29, 149, 165, 195, 196, 197, 201, 202, 205, 209, 210, 212, 213, 214, 215, 216, 220, 222, 225, 235, 236, 237, 239, 272, 439, 452, 474, 477, 478, 492, 497, 526, 532, 533, 538, 539, 549, 553, 554], "backward": [449, 488, 525, 526, 538, 543, 544, 546, 550], "badri": 477, "baichuan": 536, "baichuan2": 536, "balanc": [217, 413, 473, 475, 477, 481, 488, 541, 549, 552], "ban": 490, "bandit": 491, "bandwidth": [474, 477, 488, 538, 539, 549], "bar": [30, 211, 545], "bare": [494, 529, 534], "bart": 555, "base": [1, 3, 21, 32, 71, 90, 101, 105, 107, 117, 129, 135, 138, 143, 145, 146, 152, 153, 154, 161, 162, 169, 176, 183, 189, 191, 195, 200, 203, 207, 209, 211, 218, 225, 234, 236, 243, 244, 245, 271, 274, 278, 289, 305, 343, 372, 381, 387, 390, 392, 397, 409, 413, 417, 429, 439, 442, 443, 446, 448, 471, 472, 474, 475, 477, 478, 482, 484, 488, 489, 495, 496, 497, 502, 522, 525, 526, 530, 538, 539, 544, 545, 546, 549, 551, 552, 554, 555], "base_algorithm": 394, "base_config": [153, 155, 299, 301, 302, 303, 305, 438, 439, 442], "base_dir": 30, "base_model": 237, "base_tun": [155, 302, 438, 479, 481], "basecallback": 162, "baseconfig": [152, 153, 156, 299, 301, 302, 305, 438, 439, 442, 478, 481, 522], "basedataload": [200, 204, 206, 387], "basedatalod": [200, 387], "baselin": [455, 482, 551, 552], "baseline_model": [481, 482], "basemetr": [234, 262], "basemodel": [236, 301, 302, 305, 390, 481], "basepattern": [175, 177, 179], "baseprun": [180, 185, 187, 188], "basereg": 189, "basetransform": 225, "bash": 489, "basi": 544, "basic": [173, 175, 183, 195, 224, 270, 274, 277, 449, 473, 533, 538, 541, 551], "basicprun": [186, 188], "basictoken": 224, "basictunestrategi": 265, "batch": [1, 145, 200, 202, 203, 207, 208, 387, 418, 449, 477, 523, 525, 538, 543, 544, 546, 552, 553, 554, 555], "batch_decod": 489, "batch_idx": 526, "batch_sampl": [200, 202, 204, 387, 523], "batch_siz": [195, 200, 202, 204, 207, 208, 209, 210, 387, 418, 439, 452, 459, 477, 523, 526, 528, 532, 538, 546], "batchmatmul": [113, 368], "batchmatmulv2": [113, 368], "batchnorm": [20, 47, 52, 106, 319, 324, 492, 552], "batchnormalizationoper": 20, "batchsampl": [207, 387], "batchtospacend": [43, 315], "bayesian": [195, 270, 544], "bayesianoptim": 266, "bayesiantunestrategi": 266, "bbox": [230, 492, 537], "bboxes_labels_scor": 492, "beam": [227, 537], "becaus": [140, 179, 184, 225, 396, 480, 488, 492, 546, 552, 553, 554], "becom": [477, 488, 495, 528, 544, 549], "been": [3, 140, 170, 176, 183, 227, 391, 396, 404, 412, 413, 417, 474, 477, 481, 488, 497, 522, 538, 539, 549, 552], "befor": [92, 94, 149, 175, 180, 182, 187, 188, 190, 195, 209, 288, 357, 359, 420, 431, 441, 446, 472, 477, 488, 489, 491, 497, 523, 525, 526, 529, 530, 538, 546, 549, 551, 554], "begin": [101, 181, 182, 187, 476, 522, 523, 525, 538, 544, 554], "behavior": [140, 396, 490, 495, 496, 497, 530, 540, 549, 554], "being": [135, 230, 497], "beit": 555, "belong": [211, 239, 546], "below": [40, 51, 59, 154, 195, 234, 262, 312, 323, 331, 470, 472, 473, 475, 477, 478, 481, 482, 488, 489, 491, 492, 494, 495, 496, 497, 523, 526, 531, 532, 537, 541, 542, 543, 544, 546, 549, 551, 552, 554], "benchmark": [155, 195, 226, 245, 466, 470, 489, 494, 501, 502, 531, 540, 554, 555], "benchmark_with_raw_cmd": 151, "benchmarkconf": 538, "benchmarkconfig": [151, 195, 520, 538], "benefici": 521, "benefit": [531, 543], "bert": [173, 195, 205, 208, 209, 225, 494, 537, 539, 544, 553, 555], "bert_dataset": 215, "bertattent": 173, "besid": [477, 488, 495, 549], "best": [162, 225, 271, 433, 477, 478, 479, 480, 482, 490, 522, 534, 542, 546, 549, 552, 553, 554], "best_clip_ratio": 433, "best_configur": 465, "best_model": [162, 465, 474, 479, 480, 481, 482, 549], "best_model_weight": 465, "best_scor": 162, "beta": [169, 425], "better": [81, 195, 198, 199, 235, 262, 352, 449, 474, 477, 488, 522, 525, 537, 539, 544, 545, 546, 549, 551, 552, 554], "between": [3, 29, 128, 150, 186, 195, 225, 231, 234, 257, 261, 380, 406, 413, 466, 472, 473, 477, 478, 481, 488, 492, 494, 495, 496, 497, 521, 533, 537, 538, 541, 546, 549, 550, 551, 552, 553, 554], "bf16": [31, 72, 134, 195, 278, 344, 399, 401, 406, 439, 472, 481, 495, 496, 530, 538, 548, 554, 555], "bf16_convert": [37, 136, 309], "bf16_op": [33, 35, 92, 306, 307, 357, 496, 548], "bf16convert": [35, 307, 548], "bf16modul": 134, "bf16modulewrapp": 134, "bf16wrapper": 548, "bfloat16": [30, 429, 439, 474, 494, 539, 548], "bfloat16fp16": 472, "bi": [231, 232], "bia": [128, 195, 380, 403, 423, 429, 549], "bias_constraint": [292, 293, 294, 298], "bias_correct": [130, 382], "bias_initi": [292, 293, 294, 298], "bias_regular": [292, 293, 294, 298], "biasadd": [38, 44, 49, 51, 94, 310, 316, 321, 323, 359, 530], "biascorrect": [128, 380], "bibtex": 535, "bicub": [225, 553], "big": [135, 477, 549], "bigscienc": [475, 536, 552], "bilibili": 545, "bilinear": [221, 225, 553], "bilinearimagenet": 553, "bilinearimagenettransform": 221, "bilingu": 227, "billion": 544, "bilstm": 555, "bin": [3, 140, 141, 391, 396, 398, 466, 529], "binari": [8, 133, 234, 385, 529, 537, 550], "binary_op": 16, "binarydirect8bitoper": 8, "binaryoper": 8, "bind": [151, 484, 489], "bio": 555, "bit": [29, 31, 145, 195, 280, 413, 421, 426, 429, 433, 439, 444, 452, 471, 472, 473, 474, 477, 478, 488, 489, 497, 521, 525, 539, 541, 545, 546, 549, 552], "bit_pack": 447, "bita": [473, 541], "bitnami": 545, "bitpack": 424, "bitwidth": 471, "bk3": 3, "black": 554, "black_nod": 2, "blendcnn": 555, "bleu": [228, 233, 537, 544], "bleu_hook": 228, "bleu_scor": 228, "bleu_token": 227, "bleu_util": 233, "blob": [3, 135, 177, 178, 227, 228, 231, 232, 234, 262, 538], "blob_siz": 31, "block": [31, 143, 145, 169, 179, 182, 187, 192, 245, 281, 417, 433, 473, 477, 533, 541, 542, 544, 549, 554], "block_list": [145, 433], "block_mask": [169, 183], "block_num": [145, 433], "block_pattern": [143, 417], "block_prefix": [145, 433], "block_siz": [179, 404, 439, 477, 549], "blockfallbacktuningsampl": 277, "blocklist": [439, 472], "blockmaskcriterion": 169, "blocksiz": [31, 439, 452, 473, 541], "blockwis": 552, "blockwise_over_matmul_gemm_conv": 192, "blog": [439, 494, 545], "bloom": [475, 488, 494, 536, 544, 552], "bloomz": [475, 552], "blue": [477, 547], "bmm": [472, 477, 488, 549], "bn": 555, "bnb": [477, 549], "bnb_nf4": [439, 448], "bo": 195, "bodi": 490, "bool": [1, 29, 30, 31, 126, 133, 140, 144, 145, 153, 161, 175, 195, 209, 211, 221, 225, 228, 234, 277, 281, 286, 299, 303, 385, 396, 398, 406, 409, 412, 413, 418, 420, 423, 425, 426, 427, 431, 433, 439, 442, 446, 452, 459, 462, 465, 466, 477, 478, 523, 537, 553], "bool_val": 90, "boolean": [1, 409], "boost": [474, 494, 539, 544, 545], "booster": 525, "border": 553, "both": [133, 195, 385, 443, 476, 477, 479, 484, 488, 489, 490, 495, 525, 538, 544, 546, 549, 552, 554], "bottleneck": [477, 488, 549], "boudoukh": 544, "bound": [59, 151, 225, 230, 266, 331, 413, 537, 553], "boundari": [225, 553], "bowl": 232, "box": [225, 230, 234, 492, 537, 553, 554], "branch": [133, 385, 491], "brand": 535, "break": [94, 359, 526, 538], "breakthrough": [473, 541], "breviti": [227, 228, 537], "bridg": [495, 496, 533], "briefli": [488, 538, 552], "bright": 553, "bring": [488, 552], "broad": [470, 494, 533], "broadcast": [225, 526, 553], "broadcast_optimizer_st": 526, "broadcast_paramet": 526, "bronco": 232, "brought": [488, 540, 546], "buffer": [140, 396, 452], "bug": [491, 494], "build": [1, 29, 31, 100, 146, 147, 149, 150, 154, 238, 240, 242, 243, 244, 390, 413, 481, 489, 495, 496, 497, 534, 540, 545, 554], "build_captured_dataload": 413, "build_slave_faker_model": 280, "build_torch_model": 474, "built": [60, 165, 184, 196, 197, 201, 205, 209, 210, 215, 217, 219, 220, 221, 222, 225, 234, 237, 245, 262, 301, 305, 332, 449, 481, 482, 495, 526, 538, 542, 546, 551, 553, 554], "builtin": [140, 396, 526], "busi": 545, "button": [491, 533], "bypass_reshap": [52, 53, 324, 325], "byte": [140, 396, 413], "byte_arrai": [140, 396], "bytes_or_buff": 452, "bytesio": [140, 396], "c": [3, 59, 154, 211, 331, 466, 477, 483, 488, 529, 549, 554], "c1": [59, 331], "c1c2": [59, 331], "c2": [59, 331], "c6a": 555, "c6g": 555, "c6i": 555, "c_": [477, 549], "c_out": 477, "cach": [209, 477, 488, 494, 529, 546, 549], "cache_dir": [141, 398, 448], "cache_kl": 1, "cache_minmax": 1, "caffenet": 555, "cal_scal": 413, "calcul": [30, 45, 145, 153, 169, 177, 192, 195, 212, 213, 231, 232, 271, 317, 387, 413, 425, 453, 466, 472, 475, 477, 481, 488, 521, 537, 538, 542, 544, 546, 549, 551, 552, 554], "calculate_md5": 211, "calculate_ms": 466, "calculate_quant_min_max": 145, "calculate_scale_zp": 30, "calib": [145, 292, 293, 294, 297, 298, 496], "calib_cfg": 1, "calib_data": [1, 139], "calib_dataload": [262, 284, 301, 302, 305, 479, 480, 481, 482, 492, 523, 532, 537, 538, 546, 547, 548, 549], "calib_func": [33, 145, 262, 284, 301, 302, 305, 306, 433, 472, 481, 494, 548], "calib_iter": [284, 288, 301, 302, 305, 481, 496], "calib_method": 3, "calib_model": 1, "calib_num": 413, "calib_registri": 3, "calib_tensor": 1, "calibcollector": 1, "calibdata": 1, "calibr": [1, 4, 31, 34, 74, 75, 125, 145, 151, 195, 198, 199, 262, 285, 301, 305, 346, 412, 413, 418, 438, 442, 461, 470, 472, 476, 477, 478, 481, 488, 494, 495, 497, 532, 533, 544, 546, 549, 554], "calibration_data": [84, 92, 357], "calibration_sampling_s": [195, 538], "calibratorbas": 3, "call": [1, 29, 140, 146, 161, 173, 181, 182, 187, 230, 396, 413, 446, 481, 484, 488, 489, 492, 495, 523, 532, 544, 546, 552, 553], "call_count": 161, "call_on": 151, "callabl": [140, 153, 195, 232, 284, 301, 302, 305, 396, 412, 437, 438, 439, 442, 443, 448, 449, 478, 481, 522, 523], "callback": [449, 455, 492, 538, 543, 544, 546], "callbacks_list": 449, "camembert": 555, "can": [29, 30, 31, 128, 135, 140, 150, 173, 174, 175, 180, 188, 190, 195, 198, 199, 208, 211, 225, 230, 232, 234, 235, 245, 262, 380, 396, 413, 442, 449, 466, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 484, 488, 489, 492, 494, 495, 496, 497, 520, 522, 523, 525, 526, 528, 530, 533, 534, 536, 537, 538, 539, 540, 541, 542, 543, 544, 546, 547, 548, 549, 551, 552, 553, 554, 555], "candid": 406, "cannot": [230, 477, 492, 522, 529, 549], "cap": 494, "cap_s8_1_11": 497, "cap_s8_1_11_conv1d": 497, "capabl": [1, 32, 278, 289, 472, 474, 477, 488, 489, 496, 497, 520, 530, 538, 539, 544, 546, 554], "capac": [477, 488, 549, 551], "captur": [391, 413, 466, 476, 477], "captureddataload": 413, "captureoutputtofil": [391, 466], "card": [477, 544], "carri": [412, 413, 544], "cascad": 534, "case": [30, 55, 59, 140, 151, 154, 195, 208, 224, 225, 245, 327, 331, 396, 441, 443, 470, 477, 478, 481, 488, 497, 522, 523, 525, 526, 530, 532, 537, 542, 544, 545, 546, 549, 550, 552, 553, 555], "cast": [30, 36, 308, 548, 553], "cast_tensor": 30, "castonnxtransform": 225, "castpytorchtransform": 225, "casttftransform": 225, "cat": [492, 528], "categor": 537, "categori": [211, 229, 520], "category_id_set": 230, "caus": [192, 477, 488, 496, 523, 544, 549], "causal": [431, 441, 544], "cce5ff": 554, "cd": [526, 534], "cdot": [488, 552], "ce": [163, 195, 538], "center": [225, 493, 494, 534, 553, 554], "centercrop": 553, "centercroptftransform": 225, "centercroptransform": 225, "cento": 534, "central_fract": [221, 553], "cern": 545, "certain": [140, 194, 396, 482, 551], "certif": 491, "cfg": [56, 57, 145, 278, 328, 329, 412, 413, 417, 455, 496, 497], "cfg_filter": 455, "cfg_from_fil": 466, "cfg_preprocess": 455, "cfg_to_qconfig": [413, 417], "challeng": [473, 477, 541, 547], "chang": [29, 81, 128, 150, 181, 182, 184, 185, 187, 190, 195, 269, 352, 380, 413, 442, 466, 478, 491, 496, 523, 526, 529, 535, 538, 544, 553, 554], "channel": [29, 30, 31, 128, 149, 150, 171, 174, 179, 195, 221, 225, 380, 413, 433, 462, 472, 473, 475, 477, 494, 495, 497, 533, 534, 541, 544, 549, 553, 555], "channel_axi": [98, 147, 150], "channels_last": 221, "channelx1": [195, 544], "chapter": 496, "characterist": 490, "chart": [488, 546, 551], "chat": [475, 484, 489, 536, 552], "chatbot": [473, 541, 545], "chatglm2": 536, "chatglm3": 536, "check": [1, 31, 52, 53, 90, 133, 145, 192, 201, 211, 324, 325, 385, 391, 406, 413, 417, 427, 442, 446, 457, 465, 466, 481, 483, 494, 526, 534, 550], "check_cfg_and_qconfig": [145, 413, 417], "check_config": 192, "check_dataload": 201, "check_integr": 211, "check_key_exist": 466, "check_key_valid": 192, "check_model": 457, "check_mx_vers": 1, "checknumer": [65, 337], "checkout": 491, "checkpoint": [140, 243, 390, 396, 431, 441, 465, 540], "checkpoint_dir": [431, 441, 465], "checkpoint_sess": [243, 390], "checksum": 211, "chees": 522, "cheeseshopaddress": 522, "chen": 535, "cheng": [477, 488], "child": [59, 141, 331, 398, 420], "children": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18, 19, 21, 22, 23, 25, 26, 141, 398], "chines": 545, "choic": [209, 477, 481, 488, 495, 537, 552], "choos": [211, 433, 462, 478, 488, 533, 545, 550, 552, 554], "chosen": [472, 496, 521], "chunk": [225, 553], "chunk_siz": 211, "ci": 491, "cifar": [211, 555], "cifar10": 211, "cifar100": 211, "circumst": 490, "ckpt": [133, 235, 243, 262, 385, 390, 555], "cl": [141, 146, 161, 163, 165, 175, 180, 188, 189, 190, 211, 218, 225, 234, 245, 274, 391, 398, 448, 466, 522], "claim": 535, "clamp_": [488, 552], "clarifi": 490, "class": [101, 138, 145, 170, 176, 183, 215, 235, 262, 285, 287, 291, 305, 397, 400, 448, 477, 478, 479, 481, 482, 492, 496, 522, 523, 537, 538, 542, 543, 546, 550, 554], "class_1": 211, "class_n": 211, "classdef": 554, "classic": [478, 554], "classif": [188, 209, 225, 234, 528, 537, 544, 545], "classifi": [173, 192, 209, 234], "classificationmnli": 555, "classificationmrpc": 555, "classificationqnli": 555, "classificationqqp": 555, "classificationsst": 555, "classifierheadsearch": 173, "classifierheadsearchertf": 173, "classregist": 280, "clean": [224, 398, 538], "clean_module_weight": 398, "clean_weight": 398, "clear": [207, 495, 530, 537, 538], "click": [494, 533, 534, 545], "client": [161, 494], "clip": [31, 413, 433, 477, 488, 549, 552], "clip_grad_norm_": 538, "clm": 544, "clone": [491, 534], "close": [128, 150, 380, 521], "cloud": [473, 491, 494, 541, 545], "cluster": 554, "cmd": [151, 534], "cnn": 555, "cnt": 538, "co": 544, "coarsest": [488, 552], "coco": [210, 217, 230, 234, 537, 544], "coco_dataset": 215, "coco_filt": 219, "coco_label_map": 233, "coco_tool": 233, "cocoev": 230, "cocoevalwrapp": 230, "cocomap": 537, "cocomapv2": [234, 537], "coconpi": 210, "cocoraw": 210, "cocorecorddataset": 210, "cocowrapp": 230, "code": [140, 173, 198, 199, 262, 396, 439, 449, 470, 474, 477, 479, 489, 492, 494, 495, 496, 497, 525, 526, 532, 533, 535, 537, 542, 543, 544, 545, 546, 551, 552, 553], "codebert": 555, "codec": [140, 396], "codenam": [474, 536, 539], "coder": [533, 545], "coeff": 189, "coeffici": [40, 189, 312, 544], "cola": [209, 537, 555], "collabor": [491, 494], "collat": [133, 145, 385], "collate_fn": [200, 202, 203, 204, 387, 523], "collate_pr": 30, "collate_result": 145, "collate_tf_pr": [133, 385], "collate_torch_pr": 145, "collctor": 3, "collect": [1, 3, 30, 145, 153, 154, 223, 225, 232, 234, 391, 403, 412, 413, 416, 419, 421, 430, 453, 466, 481, 488, 496, 546, 554], "collect_layer_histogram": 460, "collect_layer_input": 192, "collect_weight_info": 145, "collector": [1, 135, 453], "collectorbas": 1, "collecttransform": 225, "color": [477, 544, 547], "colorjitt": 553, "column": [466, 477, 488, 549, 552], "column_map": 466, "columnwis": [50, 322], "com": [3, 135, 177, 178, 188, 209, 227, 228, 231, 232, 234, 262, 439, 490, 522, 534, 535, 538, 550, 551, 555], "comb": 525, "combin": [1, 145, 195, 198, 199, 221, 235, 262, 391, 417, 449, 466, 477, 480, 492, 494, 495, 533, 538, 543, 545, 546, 548, 549, 553, 554], "combine_cap": 1, "combine_histogram": [391, 466], "come": [140, 396, 473, 536, 541, 554], "comma": 418, "command": [151, 154, 449, 483, 484, 526], "command_prefix": 154, "comment": [138, 397, 477, 483, 490, 496, 537, 538, 549, 553], "commentsbalancedor": 555, "commit": [478, 481, 490, 491], "common": [124, 138, 140, 195, 223, 226, 299, 301, 302, 303, 305, 379, 396, 397, 437, 438, 439, 442, 448, 455, 475, 479, 480, 481, 490, 522, 525, 526, 528, 531, 532, 534, 538, 546, 552], "commonli": 544, "commun": 490, "comoress": 462, "compact": [538, 544], "compar": [135, 234, 466, 473, 476, 477, 482, 488, 525, 537, 541, 544, 546, 549, 551, 554], "compare_kei": 466, "compare_label": [234, 262, 537], "compare_object": 466, "compare_weight": 135, "comparison": [466, 549, 550], "compat": [133, 243, 385, 390, 491, 526, 540, 544, 550], "compatible_format": [138, 397], "compil": [227, 471, 476, 489, 538], "complaint": 490, "complement": [474, 544], "complet": [278, 472, 495, 496, 544, 546, 554, 555], "complex": [156, 474, 522, 538, 539, 544], "complex_attr": 156, "complextfmap": 522, "compli": 554, "compon": [162, 284, 482, 492, 522, 526, 534, 535, 543, 544], "compos": [152, 225, 455, 553], "composableconfig": [152, 522], "composetransform": 225, "comprehens": [470, 478, 481, 538], "compress": [226, 245, 420, 439, 449, 462, 468, 470, 472, 475, 477, 478, 480, 482, 488, 489, 494, 501, 531, 533, 538, 542, 543, 544, 545, 546, 552, 555], "compress_bit": 444, "compressed_model": 549, "compression_dim": [429, 462, 549], "compression_dtyp": [429, 462, 549], "compression_manag": [195, 449, 492, 525, 538, 543, 544, 546], "compressionmanag": 449, "compressor": [1, 151, 154, 162, 165, 177, 178, 188, 195, 221, 222, 225, 226, 233, 234, 235, 239, 246, 262, 270, 276, 290, 302, 303, 304, 305, 389, 392, 394, 436, 437, 438, 439, 440, 441, 442, 445, 446, 447, 448, 450, 452, 456, 468, 470, 472, 473, 474, 475, 476, 477, 478, 479, 481, 482, 483, 485, 488, 489, 491, 495, 496, 497, 520, 521, 522, 524, 525, 526, 527, 528, 529, 530, 531, 532, 535, 536, 539, 540, 541, 542, 543, 544, 545, 546, 549, 550, 552, 553, 554, 555], "comput": [30, 31, 90, 135, 145, 227, 228, 231, 232, 234, 413, 417, 425, 449, 466, 472, 473, 474, 476, 477, 488, 521, 523, 525, 528, 537, 538, 539, 541, 544, 545, 546, 549, 552, 554], "compute_bleu": 228, "compute_const_folding_using_tf": 90, "compute_dtyp": 452, "compute_error": 135, "compute_spars": 466, "computemetr": 230, "concat": [16, 132, 384], "concat_gener": 225, "concaten": 413, "concatoper": 9, "concatv2": [109, 119, 132, 364, 374, 384, 530], "concept": [470, 532, 551], "concret": [133, 385, 390, 554], "conda": [529, 550], "condit": [70, 153, 209, 210, 211, 225, 263, 265, 342, 482, 535, 553, 554], "conduct": [470, 492, 538, 554], "conf": [146, 151, 162, 195, 198, 199, 235, 245, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 278, 281, 449, 492, 497, 520, 532, 538, 539, 540, 543, 546, 547, 548, 549, 551, 552, 554], "confer": [135, 488, 544, 552], "confid": 145, "confidence_batch": [145, 554], "confidenti": 490, "config": [1, 31, 100, 101, 103, 107, 145, 151, 152, 153, 156, 160, 161, 169, 170, 171, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 198, 199, 209, 226, 235, 257, 261, 278, 279, 280, 281, 284, 288, 289, 300, 301, 302, 304, 305, 389, 406, 409, 412, 413, 417, 424, 427, 431, 438, 440, 442, 448, 449, 452, 455, 465, 466, 471, 472, 474, 476, 477, 478, 479, 496, 497, 501, 502, 520, 522, 523, 525, 526, 528, 533, 537, 538, 539, 540, 543, 544, 546, 548, 549, 551, 554], "config1": 153, "config2": 153, "config_file_path": 188, "config_inst": 151, "config_list": [152, 153], "config_map": [160, 427], "config_name_map": 160, "config_quantizable_lay": 103, "config_set": [153, 474, 479, 480, 481, 482], "config_sourc": 153, "configload": 153, "configmappingtyp": 427, "configproto": 261, "configregistri": [152, 160], "configs_map": [305, 399, 437, 442], "configset": 153, "configur": [1, 103, 134, 145, 151, 152, 153, 154, 161, 192, 195, 198, 199, 201, 211, 214, 234, 235, 245, 262, 271, 280, 301, 305, 406, 408, 409, 411, 413, 417, 422, 427, 437, 439, 442, 448, 449, 454, 455, 459, 464, 465, 478, 482, 484, 489, 492, 495, 496, 538, 542, 544, 546, 549, 550, 552, 553, 554, 555], "confirm": 496, "conflict": [90, 522, 529], "connect": [169, 544, 551], "consecut": [171, 173, 174, 544], "conserv": [195, 270], "conservativetunestrategi": 267, "consid": [31, 133, 234, 385, 490, 521, 522, 554], "consider": [480, 552], "consist": [41, 48, 313, 320, 477, 491, 492, 522, 552, 554], "consolid": 534, "const": [41, 42, 48, 49, 52, 53, 55, 67, 83, 313, 314, 320, 321, 324, 325, 327, 339, 354, 439], "const_node_valu": 90, "constant": [90, 158, 266, 276, 388, 447, 459, 460, 522, 523, 530, 553], "constant_valu": 225, "constfold": 530, "constrain": 547, "constraint": [195, 538, 544, 551, 554], "construct": [140, 209, 212, 213, 216, 223, 234, 239, 243, 280, 387, 389, 390, 396, 455, 482, 490, 496, 497, 526, 544, 554], "construct_function_from_graph_def": [133, 385], "consum": [482, 523], "consumpt": [473, 477, 488, 541, 552], "contact": [490, 550, 551], "contain": [1, 52, 53, 55, 59, 124, 133, 135, 140, 145, 151, 162, 170, 173, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 192, 195, 198, 199, 209, 210, 211, 230, 231, 232, 235, 262, 324, 325, 327, 331, 379, 385, 396, 406, 409, 412, 413, 417, 425, 429, 449, 455, 459, 465, 470, 477, 481, 494, 521, 529, 533, 537, 544, 549, 554], "content": 489, "content_fold": 216, "context": [1, 466, 477, 488, 549], "contextu": 544, "contigu": 145, "continu": [178, 230, 477, 536, 538, 544], "contract": [43, 315], "contrast": [175, 553], "contrib": [3, 226], "contribut": [490, 494, 544], "control": [146, 195, 492, 538, 544, 554], "conv": [16, 43, 47, 51, 56, 57, 58, 77, 94, 149, 195, 315, 319, 323, 328, 329, 330, 348, 359, 492, 496, 497, 538, 544, 546, 554], "conv1": [195, 479, 546], "conv1d": [194, 478, 497], "conv1d_relu": 528, "conv2d": [38, 44, 45, 46, 50, 51, 56, 110, 120, 128, 294, 295, 298, 303, 310, 316, 317, 318, 322, 323, 328, 365, 375, 380, 413, 472, 496, 497, 528, 530, 552, 554], "conv2d_config": 479, "conv2d_relu": 528, "conv2dbackpropinput": [111, 366], "conv3d": [56, 110, 328, 365], "conv3dbackpropinputv2": [111, 366], "conveni": [478, 538], "convent": [211, 230, 473, 491, 541], "convers": [39, 40, 195, 311, 312, 406, 474, 475, 488, 489, 491, 495, 496, 538, 539, 546, 548, 552], "convert": [1, 30, 33, 34, 35, 38, 39, 40, 41, 42, 51, 59, 71, 84, 85, 87, 88, 89, 90, 116, 121, 125, 127, 134, 145, 173, 195, 209, 221, 224, 225, 230, 278, 281, 283, 288, 289, 306, 307, 310, 311, 312, 313, 314, 323, 331, 343, 355, 371, 376, 392, 399, 406, 437, 442, 448, 450, 458, 471, 472, 473, 474, 475, 476, 477, 478, 480, 481, 484, 488, 492, 494, 496, 531, 538, 539, 546, 548, 549, 552, 553, 554], "convert_add_to_biasadd": [61, 333], "convert_bf16": 496, "convert_by_vocab": 224, "convert_examples_to_featur": [209, 225], "convert_layout": [61, 333], "convert_leakyrelu": [61, 333], "convert_nan_to_random": [61, 333], "convert_placeholder_to_const": [61, 333], "convert_tensorflow_tensor_to_onnx": 90, "convert_to_unicod": 224, "convertaddtobiasaddoptim": [38, 310], "converted_model": [195, 235, 538, 539], "convertlayoutoptim": [39, 311], "convertleakyreluoptim": [40, 312], "convertnantorandom": [41, 313], "convertplaceholdertoconst": [42, 314], "convolut": 545, "convoper": 10, "cooper": [474, 534, 539, 545], "coordin": [225, 266, 553, 554], "copi": [195, 466, 523], "copyreg": [138, 397], "copyright": [491, 535], "core": [151, 154, 195, 285, 407, 424, 477, 483, 484, 489, 491, 494, 520, 534, 535, 555], "core_id": 151, "core_list": [151, 154], "core_list_per_inst": 154, "cores_per_inst": [151, 195, 520, 538], "corner": [225, 491, 495, 497, 553], "corpor": 535, "correct": [128, 150, 192, 195, 230, 231, 232, 234, 380, 490], "correspond": [3, 135, 173, 184, 195, 209, 227, 230, 234, 412, 413, 417, 427, 455, 466, 472, 478, 488, 495, 530, 538, 544, 546, 551, 554], "cost": [472, 473, 477, 488, 541, 546, 549], "could": [140, 149, 175, 198, 199, 211, 235, 262, 396, 413, 449, 470, 474, 476, 477, 479, 481, 488, 490, 492, 525, 529, 538, 543, 544, 546, 549, 552], "count": [1, 477, 488, 549], "counter": 90, "coupl": 208, "cover": [491, 528, 532, 545], "coverag": 491, "cowork": [431, 441], "cpu": [76, 77, 78, 79, 139, 140, 145, 154, 161, 195, 347, 348, 349, 350, 391, 396, 398, 401, 413, 420, 429, 431, 433, 441, 443, 462, 466, 474, 476, 478, 481, 483, 496, 520, 521, 533, 538, 539, 545, 546, 547, 548, 549], "cpu_acceler": 443, "cpu_execution_tim": 251, "cpu_index": 154, "cpu_rang": 154, "cpuexecutionprovid": [2, 28, 29, 31, 539, 546], "cpufreq": 522, "cpuinfo": [161, 391, 466], "craft": [473, 541], "crbug": 522, "creat": [1, 3, 90, 138, 156, 195, 200, 209, 211, 230, 257, 261, 278, 280, 281, 387, 397, 406, 409, 452, 455, 470, 472, 488, 490, 495, 540, 544, 546, 551, 554], "create_data_exampl": 1, "create_dataload": 455, "create_dataset": 455, "create_eval_func": 455, "create_obj_from_config": 460, "create_onnx_config": 257, "create_quant_spec_from_config": 409, "create_tf_config": 261, "create_train_func": 455, "create_xiq_quantizer_from_pt2e_config": 409, "criteria": [170, 195, 482, 492, 552], "criterion": [135, 162, 164, 165, 169, 175, 180, 181, 182, 184, 187, 191, 195, 525, 538, 544, 546, 552, 554], "criterion_class": [169, 191], "criterion_conf": 195, "criterion_registri": 163, "criterion_typ": 163, "critet": 163, "critic": [184, 420, 481, 490], "crop": [221, 225, 553], "crop_pad": 225, "crop_ratio": 216, "cropres": 553, "cropresizetftransform": 225, "cropresizetransform": 225, "croptoboundingbox": [225, 553], "cross": [165, 234, 483, 532, 554], "cross_memori": 483, "crossentropyloss": [163, 195, 538], "crowd": 230, "crucial": [476, 544], "cs412": 3, "cse": [59, 83, 331, 354], "csv": 466, "ctx": 1, "cube": [195, 544], "cuda": [140, 192, 396, 413, 425, 443, 478, 539, 544, 546], "cuda_acceler": 443, "cudaexecutionprovid": [539, 546], "current": [1, 55, 89, 154, 169, 170, 173, 176, 180, 183, 195, 266, 272, 327, 413, 433, 441, 448, 466, 476, 477, 478, 482, 492, 494, 495, 497, 521, 522, 526, 530, 531, 538, 542, 544, 546, 549, 552, 554], "current_pattern": 173, "current_sparsity_ratio": 180, "curv": 537, "custom": [100, 138, 145, 151, 234, 245, 292, 293, 294, 295, 297, 298, 397, 433, 465, 472, 478, 482, 488, 494, 520, 525, 544, 545, 549, 551], "custom_metr": 532, "custom_tune_config": [474, 479, 480, 481, 482], "customis": [523, 537], "customized_msg": [161, 391, 466], "cv": [195, 472, 476, 478, 481, 544], "cv2": 221, "cvf": [488, 552], "d": [59, 145, 331, 417, 532, 544], "d1": [59, 331, 466], "d18": 555, "d2": 466, "d_": 195, "d_conf": [195, 525, 538, 543], "dai": 554, "damp_perc": 452, "dampen": 31, "darvish": [473, 541], "data": [1, 29, 30, 31, 41, 90, 125, 133, 135, 140, 145, 149, 161, 173, 195, 198, 199, 226, 235, 256, 260, 262, 266, 278, 280, 283, 301, 305, 313, 385, 388, 391, 396, 403, 404, 409, 413, 418, 433, 449, 452, 455, 459, 466, 470, 471, 472, 473, 474, 476, 477, 478, 481, 488, 494, 495, 521, 523, 526, 530, 534, 537, 538, 539, 541, 546, 548, 549, 550, 551, 552, 553, 554], "data_dir": 209, "data_format": [211, 221, 292, 294, 297, 298], "data_it": [1, 173], "data_load": [33, 34, 306, 495], "data_path": 214, "data_sourc": [207, 455], "data_typ": [280, 418], "data_x": 1, "databas": 211, "databrick": [475, 536, 552], "datafunc": 211, "dataiterload": 1, "dataload": [1, 2, 29, 31, 125, 126, 135, 145, 151, 170, 171, 173, 188, 192, 198, 199, 209, 220, 256, 260, 262, 283, 286, 387, 413, 418, 420, 433, 449, 455, 470, 481, 492, 495, 496, 525, 526, 532, 538, 539, 543, 544, 546, 547, 548, 549, 552, 553], "dataloader_cfg": 455, "dataloaderwrap": 1, "datalod": 413, "dataset": [125, 126, 145, 188, 198, 199, 200, 202, 203, 204, 206, 207, 208, 220, 230, 231, 232, 234, 235, 262, 283, 286, 387, 418, 449, 452, 455, 475, 477, 479, 480, 481, 488, 495, 496, 523, 526, 537, 538, 544, 546, 549, 550, 552, 554, 555], "dataset_format": 211, "dataset_nam": 418, "dataset_registri": 211, "dataset_typ": 211, "datatyp": [90, 145, 548, 554, 555], "date": [477, 549], "datetim": 195, "dbox": 492, "dco": 491, "ddr5": 555, "deal": 449, "deberta": 555, "debug": [281, 463, 554], "debug_stripp": 530, "dec": 545, "decid": [101, 195, 228, 481, 483, 495, 496, 538, 546, 550, 554], "decim": 466, "decis": [522, 537], "declar": 522, "decod": [140, 221, 227, 396, 452, 537, 553], "decode_singl": 492, "decodeimag": 553, "decompos": [52, 53, 324, 325], "decor": [3, 21, 52, 53, 55, 133, 146, 152, 161, 165, 175, 180, 188, 190, 225, 234, 245, 274, 324, 325, 327, 391, 413, 446, 448, 466, 492, 495, 554], "decorator_metr": 234, "decreas": [83, 354, 544], "dedic": 195, "dedicated_qdq_pair": [28, 195, 546], "deem": 490, "deep": [391, 466, 470, 473, 474, 478, 481, 488, 494, 495, 496, 523, 533, 534, 538, 539, 541, 545, 546, 554], "deep_get": [245, 391, 466], "deep_set": [245, 466], "deepcopi": 195, "deepen": 545, "deepst": 195, "def": [52, 53, 55, 153, 198, 199, 262, 281, 324, 325, 327, 391, 448, 449, 474, 475, 478, 479, 481, 482, 488, 492, 495, 522, 523, 525, 526, 537, 538, 546, 552, 554], "default": [3, 31, 82, 133, 140, 145, 152, 156, 161, 192, 195, 198, 199, 202, 209, 210, 211, 221, 225, 227, 234, 243, 280, 299, 303, 353, 385, 390, 391, 396, 398, 408, 409, 412, 413, 418, 425, 431, 433, 435, 437, 438, 439, 441, 442, 448, 452, 455, 458, 459, 462, 466, 472, 473, 474, 475, 477, 478, 481, 482, 483, 484, 489, 492, 495, 522, 523, 530, 533, 537, 538, 539, 541, 542, 544, 546, 548, 549, 551, 552, 553, 554], "default_alpha": 552, "default_col": [202, 387], "default_config": 192, "default_dtyp": 278, "default_opset_vers": 127, "default_sampl": [153, 482], "default_sq_alpha_arg": 303, "default_v": 156, "default_white_list": [152, 299, 303, 439], "default_workspac": 195, "defaultdataload": [202, 208], "defin": [71, 135, 156, 173, 175, 180, 181, 182, 187, 188, 189, 190, 195, 198, 199, 235, 262, 343, 449, 452, 465, 472, 479, 481, 482, 488, 490, 494, 495, 496, 523, 525, 530, 533, 537, 538, 543, 544, 546, 549, 551, 552, 554], "definit": [133, 142, 203, 207, 385, 429], "defult": [431, 441], "degrad": [525, 544, 554], "delet": 544, "delete_assign": 261, "deliv": [474, 477, 538, 545, 550], "demand": [477, 488, 549], "demo": [154, 478, 538], "democrat": 545, "demonstr": [473, 477, 482, 484, 494, 527, 541], "denot": [488, 552], "dens": [173, 190, 195, 295, 544, 555], "dense_shap": 213, "densenet": 555, "densiti": 554, "denver": 232, "depend": [1, 198, 199, 235, 262, 449, 491, 494, 529, 530, 534, 546, 554], "deploi": [477, 482, 488, 491, 538, 549, 554], "deploy": [478, 524, 545, 547, 552], "deprec": [153, 522, 550, 553], "depth": [145, 417, 548], "depth_multipli": [294, 298], "depthwis": [294, 413], "depthwise_constraint": [294, 298], "depthwise_conv2d": 295, "depthwise_initi": [294, 298], "depthwise_regular": [294, 298], "depthwiseconv2d": 294, "depthwiseconv2dn": [50, 56, 110, 120, 322, 328, 365, 375, 530], "dequant": [29, 30, 31, 36, 73, 76, 77, 78, 79, 81, 135, 145, 308, 345, 347, 348, 349, 350, 352, 398, 413, 423, 429, 433, 466, 488, 492, 496, 552], "dequantize_cast_optim": [37, 309], "dequantize_data": 30, "dequantize_data_with_scale_zero": 30, "dequantize_per_channel": [488, 552], "dequantize_tensor": 466, "dequantize_weight": 466, "dequantizecastoptim": [36, 308], "dequantizelinear": 30, "deriv": [162, 169, 177, 178, 179, 185, 186, 187, 188, 189, 190, 191], "derogatori": 490, "desc": 538, "desc_act": 452, "descent": [173, 439, 477, 488, 494, 545], "describ": [495, 497, 522, 530], "descript": [1, 117, 281, 372, 472, 475, 492, 496, 497, 523, 552], "deseri": [140, 396], "design": [162, 208, 234, 267, 470, 476, 478, 481, 489, 494, 538, 544, 546], "desir": [195, 221, 225, 409, 475, 496, 497, 544, 552, 553], "despit": 482, "dest": 90, "destin": [133, 385, 457], "detach": [488, 552], "detail": [29, 149, 152, 173, 225, 413, 439, 466, 467, 470, 474, 475, 477, 478, 479, 480, 481, 488, 489, 490, 494, 496, 497, 521, 522, 530, 532, 536, 537, 538, 539, 544, 549, 550, 551, 552, 554], "detect": [143, 145, 161, 173, 195, 230, 234, 239, 417, 443, 448, 478, 491, 492, 544, 554], "detect_processor_type_based_on_hw": 161, "detection_box": [230, 538], "detection_boxes_list": 230, "detection_class": [230, 538], "detection_classes_list": 230, "detection_mask": 230, "detection_scor": [230, 538], "detection_scores_list": 230, "detection_typ": 230, "detectionboxes_precis": 234, "detectioncoco": 555, "detections_list": 230, "detector": 143, "determin": [169, 175, 180, 191, 195, 427, 461, 471, 476, 482, 484, 490, 492, 521, 544, 546], "determinist": 538, "dettmer": [477, 488, 549], "dev": 529, "develop": [135, 156, 245, 473, 474, 491, 492, 522, 534, 538, 539, 541, 544, 545, 548, 551], "deviat": [225, 553], "devic": [1, 64, 74, 75, 76, 77, 78, 79, 82, 92, 116, 121, 132, 139, 140, 145, 170, 173, 174, 188, 192, 195, 336, 346, 347, 348, 349, 350, 353, 357, 371, 376, 384, 396, 398, 401, 413, 417, 418, 420, 423, 425, 429, 431, 433, 441, 446, 448, 462, 474, 477, 478, 484, 488, 494, 496, 523, 538, 539, 544, 547, 549], "device_id": [140, 396], "device_map": 489, "device_nam": [443, 446], "device_synchron": 446, "devop": 491, "df": 90, "diag": 31, "diagnosi": 545, "diagon": [31, 477, 549], "diagram": [477, 496, 497, 554], "dict": [1, 31, 101, 133, 134, 135, 140, 145, 146, 151, 154, 160, 165, 169, 170, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 194, 195, 198, 199, 208, 229, 230, 231, 232, 234, 235, 243, 262, 277, 280, 281, 289, 299, 303, 305, 385, 390, 391, 396, 399, 404, 406, 411, 412, 413, 417, 418, 419, 420, 425, 427, 433, 435, 437, 438, 439, 442, 448, 449, 452, 453, 455, 457, 459, 465, 466, 477, 478, 479, 484, 489, 492, 496, 522, 528, 537, 544, 546, 552, 554], "dictionari": [52, 53, 55, 133, 134, 135, 140, 145, 194, 195, 224, 230, 324, 325, 327, 391, 396, 404, 413, 417, 431, 441, 448, 466], "differ": [128, 146, 150, 152, 154, 156, 157, 163, 184, 195, 203, 211, 225, 234, 243, 245, 380, 390, 445, 461, 472, 477, 478, 481, 484, 488, 490, 492, 496, 523, 526, 527, 531, 532, 533, 537, 538, 540, 542, 544, 546, 549, 552, 553, 554], "difficult": [475, 480, 482, 488, 495, 552], "difficulti": [475, 480, 488, 552], "diffus": [494, 545], "digit": [227, 466, 545], "dilat": [43, 315], "dilated_contract": [61, 333], "dilatedcontract": [43, 315], "dilation_r": [292, 294, 298], "dim": [225, 488, 552, 553], "dimens": [52, 53, 90, 202, 225, 324, 325, 387, 413, 477, 549, 553], "dir": [145, 209, 462, 465, 529, 532], "direct": [85, 179, 355, 478, 544], "direct8bit": 11, "direct8bitoper": 11, "direct_q8": 16, "directli": [171, 195, 208, 230, 234, 479, 489, 537, 538, 544, 546], "directori": [195, 210, 211, 243, 390, 408, 431, 441, 462, 465, 466, 494, 526, 529], "disabl": [133, 195, 391, 490, 548, 549, 552], "disable_al": 195, "disable_quanted_input": 452, "disable_random": [133, 391], "discard": 523, "discord": 494, "discourag": 544, "discov": 521, "discret": [3, 554], "discrimin": 555, "discuss": [478, 494], "dispatch": [496, 533], "displai": [195, 277, 278, 391, 466], "distanc": 466, "distil": [162, 195, 470, 494, 527, 531, 533, 543], "distil_loss": [195, 538], "distilbert": [209, 539, 545, 555], "distilgpt2": 555, "distillation_conf": 162, "distillation_criterion": [525, 538, 543], "distillationcallback": 162, "distillationconfig": [195, 449, 525, 538, 543], "distilroberta": 555, "distinct": [473, 541], "distort": 496, "distribut": [1, 3, 128, 150, 195, 200, 202, 203, 204, 207, 380, 387, 461, 470, 472, 477, 481, 488, 496, 523, 534, 545, 546, 549, 550, 552, 555], "distribute_calib_tensor": 1, "distributedoptim": 526, "distributedsampl": 526, "distutil": 529, "div_": [488, 552], "dive": [470, 530], "diverg": [3, 453, 461, 497, 521, 554], "divid": [488, 523, 552, 554], "divis": [31, 413], "dl": [235, 474, 532, 545, 555], "dlabel": 492, "dlrm": 234, "dmlexecutionprovid": 546, "dnnlexecutionprovid": [539, 546], "do": [162, 188, 195, 208, 209, 227, 230, 411, 459, 465, 472, 478, 490, 496, 522, 526, 530, 533, 537, 538, 544, 548, 550, 552, 554], "do_blockwis": [413, 439, 552], "do_constant_fold": 459, "do_lower_cas": [209, 224, 225, 553], "do_sampl": 489, "doc": [177, 178, 195, 234, 262, 277, 278, 281, 391, 443, 466, 494, 534, 537, 538], "doc_span_index": 225, "doc_strid": [225, 553], "doc_token": 225, "docker": 529, "docstr": [195, 281], "docstyl": 491, "document": [225, 281, 470, 474, 479, 480, 488, 489, 496, 497, 501, 526, 536, 539, 544, 550, 553], "doe": [227, 280, 301, 305, 476, 477, 481, 522, 523, 537, 538, 549, 550, 553], "doesn": [140, 225, 396, 474, 481, 495, 526, 538, 539], "dolli": [475, 536, 544, 552], "domain": [89, 90, 195, 544, 554], "don": [128, 150, 175, 195, 380, 475, 476, 492, 544, 554], "done": [488, 492, 525, 544, 546], "dong": 135, "dot": [391, 466, 474, 488, 539, 546], "dotdict": [195, 245, 466], "doubl": [438, 439, 477, 545], "double_qu": 448, "double_quant_bit": [439, 477], "double_quant_dtyp": [439, 477], "double_quant_group_s": [439, 477], "double_quant_typ": 448, "double_quant_use_sym": [439, 477], "dowload_hf_model": [141, 398, 448], "download": [141, 211, 398, 448, 494, 534, 550], "download_url": 211, "downstream": 544, "dpcpp": 489, "dq": [92, 94, 116, 357, 359, 371, 471, 476, 478, 488, 552], "draw": [523, 551, 554], "drive": 497, "driven": [245, 494, 531, 538, 542], "drop": [192, 230, 418, 474, 475, 477, 488, 491, 538, 539, 544, 549, 552, 555], "drop_last": [203, 207, 387], "dry_run": 526, "dscore": 492, "dtype": [30, 31, 87, 90, 195, 212, 213, 221, 225, 387, 401, 406, 409, 413, 423, 429, 433, 439, 462, 474, 477, 478, 488, 489, 496, 497, 528, 530, 546, 549, 552, 553], "dtype_map": 30, "dtype_to_nam": 30, "duc": 555, "due": [154, 488, 497, 528, 544, 546, 552], "dummi": [44, 173, 212, 213, 234, 316, 387, 481, 537, 538], "dummy_biasadd": [61, 333], "dummy_dataset": 215, "dummy_dataset_v2": 215, "dummy_v2": [213, 387], "dummydataset": [212, 213, 387, 481], "dummydatasetv2": 387, "dump": [2, 138, 145, 151, 154, 397, 413, 417, 448, 459, 466, 481, 488, 491, 546], "dump_class_attr": 466, "dump_data_to_loc": 466, "dump_elapsed_tim": [161, 391, 466, 495], "dump_fp32": [131, 383], "dump_model_op_stat": [413, 417, 448], "dump_numa_info": 154, "dump_op_typ": 2, "dump_stats_path": [439, 472], "dump_tabl": 466, "dump_table_to_csv": 466, "duplic": [83, 93, 266, 354, 358], "durat": 551, "dure": [1, 138, 140, 175, 179, 190, 195, 245, 396, 397, 413, 442, 448, 466, 475, 476, 478, 481, 488, 494, 496, 531, 538, 540, 542, 543, 544, 546, 552], "dyna": 195, "dynam": [140, 195, 200, 278, 387, 396, 409, 435, 437, 439, 459, 477, 478, 494, 495, 523, 528, 531, 533, 538, 545, 554, 555], "dynamic_ax": [195, 459, 528], "dynamic_length": 209, "dynamic_max_gap": [418, 439, 477], "dynamic_quant_export": 459, "dynamic_shap": 435, "dynamicquantconfig": [439, 471], "dynamo": [476, 478], "e": [140, 154, 175, 195, 396, 477, 484, 488, 489, 490, 491, 494, 527, 544, 549, 552, 554], "e16": 491, "e2m1": [473, 477, 541, 549], "e2m3": [473, 541], "e3m2": [473, 541], "e4m3": [439, 472, 473, 494, 541], "e5m2": [472, 473, 541], "e8m0": [473, 541], "each": [126, 133, 135, 140, 145, 154, 192, 195, 207, 211, 214, 221, 225, 228, 230, 231, 232, 271, 286, 387, 396, 412, 413, 417, 431, 433, 466, 470, 477, 480, 483, 488, 491, 495, 496, 497, 522, 525, 530, 538, 542, 544, 547, 549, 551, 553, 554], "eager": [188, 261, 435, 471, 476, 477, 478, 526, 533, 546, 548], "earli": [195, 488, 538, 546, 554], "eas": [478, 481, 534, 545], "easi": [209, 476, 478, 481, 528, 538, 545, 546, 549, 551], "easier": 545, "easili": [245, 482, 488, 495, 542, 552], "econom": 490, "ecosystem": [494, 545], "edg": 553, "edit": 490, "edouard": [488, 552], "edu": [3, 211], "educ": 490, "effect": [187, 476, 477, 495, 544, 545], "effici": [29, 149, 413, 476, 484, 488, 494, 521, 523, 543, 544, 545, 549, 552, 554], "efficientnet": 555, "effort": 538, "eg": [145, 211, 433], "egsdcrb1": 555, "either": [140, 153, 195, 230, 396, 476, 481, 482, 484, 488, 543, 546], "elaps": [161, 391, 466], "electra": 555, "electron": 490, "elem_format": 404, "elem_typ": 90, "element": [31, 153, 177, 178, 192, 207, 230, 387, 404, 433, 466, 473, 477, 488, 533, 537, 541, 544, 549, 552], "elementwise_over_al": 192, "elementwise_over_matmul_gemm_conv": 192, "elemformat": 404, "eleutherai": [475, 531, 536, 552], "elia": [477, 488, 549], "elimin": [51, 323], "ellipsi": [406, 443], "els": [1, 133, 195, 207, 245, 385, 433, 489, 549], "em": 555, "email": 494, "emb": 477, "embed": [145, 420, 521, 544], "embed_layernorm": 16, "embed_out": 544, "embedlayernorm": 12, "embedlayernormalizationoper": 12, "emerg": [473, 541, 552], "emit": 472, "emnlp": [494, 545], "emot": 555, "empathi": 490, "empir": [128, 150, 380], "empow": [473, 541], "empti": [141, 195, 398, 448, 472, 477, 478, 547], "emsp": 544, "emul": [478, 488, 538, 546], "en": [494, 534], "enabl": [31, 94, 135, 180, 195, 359, 409, 474, 476, 477, 481, 496, 497, 526, 539, 544, 545, 552, 554], "enable_act": 135, "enable_al": 195, "enable_auto_scal": [31, 549], "enable_bas": 195, "enable_eager_execut": 526, "enable_extend": 195, "enable_full_rang": [418, 433, 439, 462, 477, 547, 549], "enable_minmax_tun": [418, 439, 477], "enable_mse_search": [31, 549], "enable_quanted_input": [418, 439, 477], "encapsul": [95, 198, 199, 235, 262, 360, 449, 540], "encod": [140, 230, 396, 452, 553], "encodejp": 553, "encount": [523, 529], "end": [133, 180, 181, 182, 187, 195, 209, 224, 225, 263, 265, 385, 489, 495, 496, 497, 525, 530, 534, 538, 542, 544, 549, 553, 554], "end_epoch": 538, "end_posit": 225, "end_step": [180, 195, 538, 544], "endlessli": 554, "energi": [473, 541], "engin": 474, "english": 544, "enhanc": [484, 521, 544, 545, 548], "enough": [156, 413, 481, 488, 489, 546], "enough_memo_store_scal": 413, "ensp": 553, "ensur": [1, 225, 266, 476, 479, 526, 546, 553], "ensure_list": 1, "entir": [262, 449, 475, 477, 480, 492, 496, 497, 521, 544], "entranc": [64, 201, 336], "entri": [52, 53, 55, 135, 165, 301, 302, 305, 324, 325, 327, 437, 438, 441, 442, 466, 478, 522, 535], "entropi": [195, 521, 554], "enum": 1, "enumer": [156, 157, 449, 473, 495, 525, 526, 538, 541, 543, 544], "env": [151, 529], "env_var": 151, "environ": [30, 151, 443, 447, 474, 478, 484, 489, 490, 494, 520, 554], "eoferror": [138, 397], "ep": [3, 150, 539], "epoch": [162, 181, 182, 187, 449, 523, 525, 526, 538, 543, 544], "equal": [90, 128, 133, 150, 281, 380, 391, 466, 473, 541, 544, 549], "equal_dict": 466, "equat": [30, 232, 234, 488, 546, 552], "equival": [68, 340, 432, 473, 475, 477, 480, 488, 494, 541, 545, 549, 552], "erf": [54, 326], "error": [31, 90, 135, 140, 234, 271, 396, 413, 452, 463, 472, 477, 488, 529, 537, 549, 552, 554], "error_msg": 90, "especi": 531, "essenti": [521, 529], "estim": [133, 234, 243, 390, 554], "estimator_sess": [243, 390], "et": [135, 473, 477, 488, 521, 541, 549, 552], "etc": [151, 162, 224, 232, 235, 262, 392, 465, 494, 533, 544, 551, 554], "ethnic": 490, "euclidean": 466, "eural_compressor": 245, "eval": [492, 522, 523, 546], "eval_acc": 153, "eval_acc_fn": [474, 479, 481], "eval_arg": [153, 302, 438, 474, 478, 479, 481], "eval_dataload": [151, 195, 198, 199, 235, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 449, 520, 532, 537, 538, 546, 547, 554], "eval_fn": [153, 302, 438, 474, 478, 479, 480, 481, 482], "eval_fn_wrapp": 480, "eval_frequ": 162, "eval_func": [125, 198, 199, 235, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 281, 283, 449, 492, 523, 525, 526, 538, 546, 547, 548, 549, 554], "eval_metr": [198, 199, 235, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 449, 537, 546], "eval_perf": 153, "eval_result": 522, "eval_result_of_q_model": 153, "evalu": [151, 153, 162, 175, 198, 199, 209, 227, 230, 231, 232, 234, 235, 245, 262, 438, 449, 455, 478, 480, 481, 482, 488, 495, 496, 522, 525, 532, 533, 536, 537, 538, 539, 542, 544, 546, 551, 552, 554], "evaluate_squad": 233, "evaluation_result": 526, "evaluation_time_cost": 526, "evaluationfuncwrapp": 153, "even": [488, 547, 549, 552], "evenli": 523, "event": 490, "everi": [52, 53, 55, 175, 178, 180, 195, 324, 325, 327, 477, 495, 523, 547, 549, 554], "everyon": 490, "everyth": 192, "exact": [231, 418], "exact_match_scor": 231, "exactli": 526, "exampl": [1, 31, 135, 140, 145, 151, 152, 153, 156, 160, 173, 174, 188, 195, 209, 210, 211, 221, 225, 230, 231, 232, 234, 235, 245, 262, 280, 281, 391, 396, 408, 411, 426, 431, 433, 435, 443, 448, 449, 465, 466, 470, 475, 481, 482, 484, 490, 494, 496, 497, 501, 531, 532, 533, 548, 550, 553, 554, 556], "example_algo": [391, 448], "example_gener": 281, "example_index": 225, "example_inp": 145, "example_input": [145, 195, 408, 411, 412, 413, 417, 432, 433, 435, 438, 442, 459, 465, 471, 475, 476, 477, 478, 528], "examplealgorithm": 152, "examplealgorithmconfig": 152, "exampleclass": 281, "exce": [225, 549, 553], "exceed": [243, 390], "excel": [488, 549], "except": [140, 281, 396, 413, 466, 477, 522, 546, 547], "exchang": 528, "exclud": [195, 472, 495, 521, 548], "excluded_op_nam": [56, 57, 195, 328, 329, 538, 544], "excluded_precis": [195, 439, 548], "execut": [29, 30, 39, 140, 151, 157, 180, 181, 182, 195, 261, 262, 263, 265, 311, 392, 396, 412, 420, 449, 455, 465, 466, 476, 477, 488, 489, 525, 538, 539, 543, 544, 546, 549, 551, 554, 555], "executionprovid": 555, "exemplifi": 544, "exhaust": [195, 270, 413], "exhaustivetunestrategi": 268, "exist": [39, 151, 243, 311, 390, 391, 446, 466, 474, 478, 489, 528, 537, 554], "exit": [195, 263, 265, 482, 538], "exit_polici": 538, "exp": [195, 473, 538, 541, 544], "expand_and_reshap": 404, "expanddim": [45, 317], "expanddims_optim": [61, 333], "expanddimsoptim": [45, 317], "expect": [211, 477, 479, 488, 490, 491, 529, 536, 546, 549, 554], "expens": [538, 554], "experi": [481, 489, 490, 551, 552, 554], "experiment": [489, 525, 526, 528, 538, 544, 546, 550, 553], "explain": [156, 488, 497, 552], "explicit": 490, "explicitli": [156, 195, 476, 484, 489, 532, 544], "explor": [473, 495, 541], "explos": [473, 541], "expon": [473, 541], "exporsingleimagedetectionboxestococo": 230, "export": [86, 195, 230, 436, 460, 470, 471, 474, 476, 478, 489, 529], "export_compressed_model": [462, 549], "export_format": [418, 439], "export_model_for_pt2e_qu": 435, "exportconfig": 195, "exportdetectionstococo": 230, "exported_model": [471, 476], "exportgroundtruthtococo": 230, "exportsingleimagedetectionboxestococo": 230, "exportsingleimagedetectionmaskstococo": 230, "exportsingleimagedetectionstococo": 230, "exportsingleimagegroundtruthtococo": 230, "expos": 452, "express": [227, 490], "extend": [478, 489, 496, 497, 531], "extend_engin": [95, 360], "extens": [138, 140, 391, 396, 397, 466, 474, 475, 476, 478, 488, 489, 494, 495, 502, 522, 529, 530, 531, 533, 534, 536, 538, 539, 540, 544, 545, 546, 549, 552, 554], "extra": [140, 232, 396, 477, 538, 549], "extra_opset": 87, "extract": [52, 53, 55, 133, 173, 211, 280, 324, 325, 327], "extract_data_typ": 280, "extran": 522, "extrem": 521, "f": [140, 170, 396, 477, 488, 522, 526, 546, 549, 552], "f1": [231, 233, 234, 526, 537, 538, 551, 555], "f1_score": [231, 232], "face": [141, 398, 448, 489, 490, 494, 501, 545, 550, 555], "facebook": [475, 536, 552], "facil": [140, 396], "facilit": 522, "fact": [488, 546], "factor": [3, 126, 286, 413, 425, 471, 475, 477, 481, 488, 549, 552, 555], "factori": [241, 391], "fail": [140, 396, 492, 523, 528], "failur": [491, 492], "fair": 490, "faith": 490, "fake": [29, 31, 98, 142, 149, 156, 280, 413, 429, 433, 477, 488, 538, 546, 549, 552], "fake_qu": [33, 73, 92, 116, 121, 306, 345, 357, 371, 376, 439], "fake_quant": 99, "fakeaffinetensorquantfunct": [142, 429], "fakealgoconfig": 156, "fakequ": [73, 288, 345], "fakequant": 98, "fakequantizebas": 98, "falcon": [475, 494, 536, 544, 552], "fall": [140, 145, 396, 473, 541], "fallback": [145, 195, 269, 271, 272, 474, 475, 476, 528, 539, 548, 554], "fallback_list": 28, "fallback_ord": 145, "fallbacktuningsampl": 277, "fals": [1, 2, 28, 29, 30, 31, 32, 33, 34, 56, 57, 74, 77, 84, 87, 88, 90, 98, 116, 121, 128, 131, 132, 133, 139, 140, 144, 145, 151, 163, 166, 195, 200, 202, 204, 209, 211, 221, 225, 230, 234, 245, 262, 281, 289, 292, 293, 294, 297, 298, 303, 306, 328, 329, 346, 348, 371, 376, 380, 383, 384, 385, 387, 396, 403, 404, 406, 409, 413, 417, 418, 420, 425, 427, 429, 433, 439, 442, 452, 455, 462, 465, 466, 475, 477, 479, 481, 482, 483, 489, 495, 496, 523, 530, 537, 538, 546, 549, 552, 553, 554], "familiar": 470, "famou": [477, 488, 549], "faq": [490, 494], "far": 478, "fashionmnist": 211, "fast": [187, 195, 477, 481, 544, 545, 549], "fast_bias_correct": [148, 195, 546], "fastbiascorrect": [147, 150], "faster": [523, 524, 528, 545, 554, 555], "fatal": 463, "father": [141, 173, 398], "fault": 491, "fault_tolerant_fil": 466, "fbgemm": [474, 539, 546], "fc": [145, 195, 544], "fc1": [145, 433, 476], "fc2": [31, 145, 433], "fcn": 555, "feasibl": 195, "featur": [195, 209, 210, 221, 225, 413, 474, 479, 488, 491, 494, 520, 523, 524, 538, 539, 540, 544, 545, 550, 553], "feb": 545, "fed": [413, 554], "feed": [133, 208, 385, 544], "feed_dict": [133, 208, 385], "feedward": 184, "fefin": 199, "feng": 535, "ferplu": 555, "fetch": [46, 145, 148, 154, 203, 318, 387, 496], "fetch_modul": [145, 433, 448], "fetch_weight_from_reshap": [61, 333], "fetchweightfromreshapeoptim": [46, 318], "few": [544, 545, 552, 554], "ffffff": 554, "ffn": [143, 184, 417], "ffn2_sparsiti": [171, 544], "ffn_modul": 184, "ffn_name": 184, "field": [195, 211, 230, 473, 492, 495, 497, 526, 538, 541, 551, 554], "field_nam": [161, 466], "fig": 472, "figur": [477, 547], "file": [90, 133, 138, 140, 141, 145, 148, 160, 166, 188, 192, 195, 198, 199, 209, 210, 211, 214, 224, 225, 230, 235, 245, 249, 250, 253, 262, 385, 391, 396, 397, 398, 411, 413, 417, 465, 466, 472, 477, 483, 491, 495, 496, 497, 526, 529, 532, 535, 537, 538, 540, 542, 546, 549, 553], "file_lik": [140, 396], "file_typ": 466, "filenam": [211, 466], "filepath": [140, 396, 466, 522], "fill": [551, 553, 554], "filter": [1, 209, 210, 211, 212, 213, 214, 216, 220, 292, 298, 387, 406, 427, 466, 497, 533, 544], "filter_fn": 427, "filter_registri": 218, "filter_typ": 218, "final": [140, 173, 180, 192, 396, 476, 488, 496, 538, 542, 544, 546, 550, 554], "finalize_calibr": 442, "find": [30, 52, 53, 55, 90, 133, 194, 234, 266, 324, 325, 327, 385, 466, 482, 495, 536, 544, 549, 551, 552, 554], "find_by_nam": 30, "find_lay": [194, 420], "find_layers_nam": 420, "find_opset": 90, "fine": [186, 209, 478, 522, 544, 545, 546], "finer": [488, 552, 554], "finest": [488, 552], "finetun": [477, 488, 544, 549, 555], "finish": 484, "first": [128, 140, 145, 150, 152, 174, 195, 209, 230, 267, 278, 380, 391, 396, 413, 433, 466, 473, 474, 477, 483, 488, 489, 492, 494, 495, 496, 497, 534, 539, 541, 546, 548, 549, 551, 552, 554], "first_conv_or_matmul_quant": [195, 546], "first_n": [131, 383], "fit": [151, 175, 195, 235, 262, 266, 281, 449, 492, 520, 523, 526, 532, 537, 538, 539, 540, 544, 546, 547, 548, 549], "fit_with_raw_cmd": 151, "fix": [133, 185, 187, 209, 385, 477, 523, 544, 546, 549, 554], "fix_ref_type_of_graph_def": [133, 385], "flag": [195, 228], "flan": 544, "flatten_static_graph": 173, "flex": [494, 534], "flexibl": [474, 477, 481, 494, 544, 549], "flip": [221, 225, 553], "float": [3, 30, 31, 125, 126, 135, 152, 175, 180, 189, 192, 195, 209, 221, 228, 230, 231, 232, 277, 281, 283, 286, 303, 392, 413, 418, 433, 439, 442, 443, 452, 466, 471, 472, 473, 474, 475, 476, 477, 479, 480, 482, 488, 522, 537, 541, 546, 552, 553], "float16": [5, 8, 30, 401, 406, 462, 472, 477, 489, 549], "float16activationoper": 5, "float16binaryoper": 8, "float32": [212, 213, 225, 230, 387, 429, 462, 472, 477, 488, 495, 520, 538, 546, 549, 552, 553], "float_dict": 135, "float_model": [135, 477, 484], "float_to_bfloat16": 30, "float_to_float16": 30, "floatfunct": 492, "floor": [473, 541], "flop": [477, 488, 549], "flow": [231, 232, 488, 492, 538], "flowchart": 554, "fn": 406, "fn_arg": 406, "focu": [489, 521, 554], "focus": [473, 477, 481, 490, 495, 531, 541], "fold": [47, 48, 145, 303, 319, 320, 413, 432, 433, 439, 459, 475, 477, 530, 549, 552], "fold_batch_norm": [61, 333], "fold_const": [61, 333], "foldbatchnormnodesoptim": [47, 319], "folder": [133, 211, 216, 235, 262, 385, 465, 477, 478, 489, 549], "follow": [73, 95, 153, 174, 211, 227, 230, 345, 360, 392, 420, 473, 474, 475, 480, 483, 484, 488, 489, 490, 491, 492, 494, 495, 496, 497, 522, 523, 525, 526, 529, 530, 534, 535, 537, 538, 539, 540, 541, 543, 544, 546, 549, 552, 554], "footprint": [195, 245, 525, 538, 542, 544, 547], "forc": [443, 548], "forg": 529, "fork": 491, "form": 232, "format": [5, 30, 39, 133, 145, 154, 195, 209, 210, 211, 230, 278, 281, 288, 289, 311, 385, 389, 390, 404, 431, 441, 445, 459, 462, 473, 474, 476, 478, 481, 483, 488, 494, 496, 497, 522, 526, 528, 537, 538, 539, 540, 541, 545, 546, 549], "format_list2str": 154, "format_vers": [138, 397], "formul": 538, "formula": [488, 544, 552], "forpytorch": 534, "fortensorflow": 534, "fortieth": 544, "forward": [1, 145, 192, 413, 433, 477, 488, 496, 538, 544, 546, 549, 552], "forward_wrapp": [145, 413, 433], "foster": 490, "found": [162, 406, 413, 474, 494, 522, 536, 539, 552, 554, 555], "foundat": 545, "four": [471, 476, 482], "fp1": [488, 552], "fp16": [31, 195, 278, 399, 401, 406, 418, 439, 488, 546], "fp2": [488, 552], "fp32": [28, 29, 31, 64, 116, 121, 128, 145, 150, 195, 198, 199, 234, 267, 271, 278, 301, 305, 336, 371, 376, 380, 391, 406, 411, 412, 415, 431, 433, 437, 455, 458, 459, 462, 465, 466, 472, 474, 475, 476, 477, 478, 479, 481, 488, 491, 495, 497, 530, 536, 537, 538, 539, 546, 548, 549, 550, 551, 552, 554, 555], "fp32_baselin": [153, 455, 495], "fp32_graph": [128, 380], "fp32_layer": [292, 293, 294, 297, 298], "fp32_model": [135, 145, 431, 441, 459, 466, 475, 476, 480, 481, 547, 549], "fp32_model_path": 547, "fp32_onnx_config": 528, "fp32_onnx_path": 459, "fp32_op": [33, 35, 92, 306, 307, 357], "fp32_tensor": 466, "fp4": [433, 473, 477, 541, 549], "fp6": [473, 541], "fp8": [437, 439, 441, 473, 478, 541, 545], "fp8_config": [439, 472, 494], "fp8_entri": 437, "fp8_white_list": [439, 472], "fp8config": [437, 439, 472, 494], "fpath": 211, "frac": [473, 488, 541, 552], "fraction": [221, 553], "fragment": 495, "framework": [1, 151, 152, 157, 163, 165, 170, 176, 180, 183, 187, 188, 196, 197, 201, 202, 205, 208, 209, 210, 211, 212, 213, 214, 215, 216, 218, 220, 222, 225, 234, 235, 236, 237, 239, 272, 278, 288, 299, 389, 455, 470, 474, 476, 481, 482, 488, 495, 497, 521, 522, 526, 527, 531, 532, 533, 537, 538, 539, 544, 545, 546, 553, 554, 555], "framework_dataset": 211, "framework_nam": [152, 522], "framework_specific_info": [32, 288, 289, 495], "frantar": [477, 488, 544, 549], "free": [29, 149, 187, 198, 199, 262, 413, 475, 480, 488, 490, 542, 551, 552], "freez": [73, 74, 75, 133, 345, 346, 385, 471, 476, 544], "freeze_fake_qu": [80, 351], "freeze_valu": [80, 351], "freeze_value_without_calib": 80, "freezefakequantopoptim": [73, 345], "freezevaluetransform": [74, 346], "freezevaluewithoutcalibtransform": 75, "frequenc": [162, 180, 195, 538, 544], "frequent": 534, "fresh": [494, 534], "friendli": [473, 488, 530, 538, 541, 545, 552], "from": [1, 30, 46, 52, 53, 55, 90, 133, 140, 141, 145, 151, 152, 153, 160, 162, 163, 169, 170, 176, 177, 178, 179, 183, 185, 186, 187, 188, 189, 190, 191, 192, 195, 198, 199, 200, 203, 206, 209, 210, 211, 212, 213, 214, 216, 221, 227, 230, 231, 232, 235, 243, 245, 262, 278, 280, 281, 288, 318, 324, 325, 327, 385, 387, 390, 392, 396, 398, 406, 408, 411, 412, 413, 415, 417, 431, 433, 437, 441, 448, 449, 452, 455, 457, 458, 459, 462, 465, 466, 470, 471, 472, 473, 474, 475, 476, 477, 479, 480, 481, 482, 483, 484, 488, 489, 490, 491, 492, 495, 497, 520, 522, 523, 525, 526, 527, 528, 529, 531, 533, 536, 537, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554], "from_dict": 479, "from_pretrain": [462, 489, 531, 538], "frontend": 489, "frozen": [235, 243, 262, 390, 540], "frozen_func": [133, 385], "frozen_pb_sess": [243, 390], "fuel": [473, 541], "full": [30, 209, 210, 211, 462, 466, 472, 477, 494, 521, 529, 535, 544, 549, 550], "full_rang": 433, "fulli": [479, 538, 546, 552], "fulltoken": 224, "fun": 281, "func": [133, 145, 161, 385, 466, 546], "func_dict": 455, "function": [124, 125, 138, 159, 181, 182, 187, 195, 198, 199, 210, 283, 296, 379, 393, 397, 421, 460, 470, 474, 476, 477, 478, 481, 488, 489, 491, 492, 495, 496, 497, 520, 523, 525, 528, 532, 533, 536, 537, 538, 539, 544, 546, 549, 551, 552, 554], "function1": 281, "function2": 281, "function3": 281, "fundament": [479, 480, 521, 538], "funnel": 555, "funsd": 555, "further": [55, 195, 327, 474, 488, 490, 494, 539, 540, 546], "fuse": [1, 43, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 76, 77, 78, 79, 81, 116, 121, 144, 145, 149, 315, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 347, 348, 349, 350, 352, 371, 376, 417, 492, 552], "fuse_biasadd_add": [61, 333], "fuse_column_wise_mul": [61, 333], "fuse_conv_redundant_dequant": [80, 351], "fuse_conv_requant": [80, 351], "fuse_conv_with_math": [61, 333], "fuse_decomposed_bn": [61, 333], "fuse_decomposed_in": [61, 333], "fuse_gelu": [61, 333], "fuse_layer_norm": [61, 333], "fuse_matmul_redundant_dequant": [80, 351], "fuse_matmul_requant": [80, 351], "fuse_pad_with_conv": [61, 333], "fuse_pad_with_fp32_conv": [61, 333], "fuse_qdq_bn": [115, 370], "fuse_qdq_concatv2": [115, 370], "fuse_qdq_conv": [115, 370], "fuse_qdq_deconv": [115, 370], "fuse_qdq_in": [115, 370], "fuse_qdq_matmul": [115, 370], "fuse_qdq_pool": [115, 370], "fuse_reshape_transpos": [61, 333], "fusebiasaddandaddoptim": [49, 321], "fusecolumnwisemuloptim": [50, 322], "fuseconvredundantdequantizetransform": [76, 347], "fuseconvrequantizetransform": [77, 348], "fuseconvwithmathoptim": [51, 323], "fusedbatchnorm": [66, 338], "fusedbatchnormv2": [66, 338], "fusedbatchnormv3": [108, 118, 363, 373], "fusedbatcnormv3": [55, 327], "fusedecomposedbnoptim": [52, 324], "fusedecomposedinoptim": [53, 325], "fusedinstancenorm": [112, 367], "fusedmatmul": 18, "fusedmatmuloper": 18, "fusegeluoptim": [54, 326], "fuselayernormoptim": [55, 327], "fusematmulredundantdequantizetransform": [78, 349], "fusematmulrequantizedequantizenewapitransform": [79, 350], "fusematmulrequantizedequantizetransform": [79, 350], "fusematmulrequantizenewapitransform": [79, 350], "fusematmulrequantizetransform": [79, 350], "fusenodestartwithconcatv2": [109, 119, 364, 374], "fusenodestartwithconv2d": [110, 120, 365, 375], "fusenodestartwithdeconv2d": [111, 366], "fusenodestartwithfusedbatchnormv3": [108, 118, 363, 373], "fusenodestartwithfusedinstancenorm": [112, 367], "fusenodestartwithmatmul": [113, 122, 368, 377], "fusenodestartwithpool": [114, 123, 369, 378], "fusepadwithconv2doptim": [56, 328], "fusepadwithfp32conv2doptim": [57, 329], "fusetransposereshapeoptim": [58, 330], "fusion": [44, 46, 55, 63, 94, 111, 112, 113, 116, 117, 118, 122, 316, 318, 327, 335, 359, 366, 367, 368, 371, 372, 373, 377, 480, 492, 495, 530], "futur": [149, 153, 198, 199, 470, 478, 533, 536, 538], "fw": 496, "fwk": 239, "fwk_name": [152, 522], "fx": [145, 272, 406, 435, 441, 471, 474, 476, 533, 539, 545, 546, 548], "fx_model": 145, "fx_white_list": 145, "g": [90, 140, 154, 175, 195, 396, 477, 484, 488, 491, 527, 544, 549, 550, 552, 554], "g_idx": 429, "gain": [543, 545], "gan": 545, "gap": [477, 540], "gather": [1, 16, 195], "gatheroper": 13, "gaudi": [478, 494, 534, 545], "gaudi2": [472, 494, 534], "gaussian": [266, 554], "gavgpool": 16, "gb": 484, "gcc": 555, "gcp": 545, "gelu": [54, 326], "gemm": [16, 195], "gemm_to_matmul": [195, 546], "gemmoper": 15, "gen": [474, 488, 536, 539, 545, 546, 548], "gen_bar_updat": 211, "gen_id": 489, "gen_text": 489, "gender": 490, "gener": [1, 33, 34, 72, 107, 133, 145, 151, 153, 154, 173, 198, 199, 200, 208, 211, 212, 213, 214, 225, 227, 235, 258, 262, 266, 281, 299, 303, 306, 344, 385, 387, 413, 417, 418, 420, 438, 439, 442, 449, 472, 473, 474, 475, 477, 478, 479, 484, 488, 489, 491, 492, 495, 497, 523, 537, 538, 539, 541, 542, 544, 545, 546, 548, 549, 552, 553, 554], "generaltopk": 234, "generate_activation_observ": [145, 417], "generate_feed_dict": [133, 385], "generate_ffn2_pruning_config": 171, "generate_kwarg": 489, "generate_mha_pruning_config": 171, "generate_prefix": [151, 154], "generate_xpu_qconfig": 417, "generategraphwithqdqpattern": [92, 357], "generator1": 281, "geomean": 533, "gestalt": 545, "get": [1, 29, 30, 31, 52, 53, 55, 89, 90, 133, 141, 145, 151, 154, 161, 165, 166, 169, 170, 171, 173, 176, 183, 189, 190, 191, 192, 195, 203, 225, 234, 243, 262, 271, 278, 280, 299, 302, 324, 325, 327, 385, 387, 390, 391, 398, 413, 417, 420, 433, 439, 448, 455, 459, 462, 466, 470, 475, 478, 483, 488, 496, 521, 529, 533, 534, 536, 538, 545, 549, 552, 554, 556], "get_absorb_lay": [145, 433], "get_acceler": 446, "get_activ": 166, "get_adaptor_nam": 280, "get_algorithm": 455, "get_all_config": 160, "get_all_config_set": [302, 438], "get_all_config_set_from_config_registri": [152, 522], "get_all_fp32_data": [391, 466], "get_all_registered_config": [299, 439], "get_architectur": 151, "get_attribut": 173, "get_blob_s": 31, "get_block_prefix": [145, 433], "get_bounded_thread": 151, "get_children": [141, 398], "get_common_modul": 173, "get_const_dim_count": [52, 53, 324, 325], "get_core_id": 151, "get_criterion": 169, "get_dataload": 418, "get_default_autoround_config": 439, "get_default_awq_config": 439, "get_default_double_quant_config": 439, "get_default_dynamic_config": 439, "get_default_fp8_config": 439, "get_default_fp8_config_set": 439, "get_default_gptq_config": 439, "get_default_hqq_config": 439, "get_default_mixed_precision_config": 439, "get_default_mixed_precision_config_set": 439, "get_default_mx_config": 439, "get_default_rtn_config": [439, 484], "get_default_sq_config": [303, 439], "get_default_static_config": 439, "get_default_static_quant_config": [299, 303], "get_default_teq_config": 439, "get_depth": [145, 417], "get_dict_at_depth": [145, 417], "get_double_quant_config_dict": 448, "get_element_under_depth": [145, 417], "get_embedding_contigu": 145, "get_estimator_graph": 133, "get_example_input": 145, "get_fallback_ord": 145, "get_filter_fn": 406, "get_final_text": 225, "get_framework_nam": 1, "get_func_from_config": 455, "get_graph_def": [133, 385], "get_half_precision_node_set": 406, "get_hidden_st": 145, "get_index_from_strided_slice_of_shap": 90, "get_input_output_node_nam": [133, 385], "get_ipex_vers": 446, "get_lay": 192, "get_linux_numa_info": 154, "get_max_supported_opset_vers": 89, "get_metr": 455, "get_model_devic": 448, "get_model_fwk_nam": 239, "get_model_info": 448, "get_model_input_shap": [133, 385], "get_model_typ": [243, 390], "get_modul": [141, 398, 413, 433], "get_module_input_output": [145, 433], "get_mse_order_per_fp32": 145, "get_mse_order_per_int8": 145, "get_named_children": [141, 398], "get_node_map": 459, "get_node_original_nam": 30, "get_numa_nod": 154, "get_number_of_socket": 466, "get_op_list": 466, "get_op_type_by_nam": 145, "get_par": [413, 433], "get_pattern": 176, "get_physical_id": 151, "get_postprocess": 455, "get_preprocess": 455, "get_processor_type_from_user_config": 448, "get_prun": 183, "get_quant": 448, "get_quant_dequant_output": 29, "get_quantizable_onnx_op": 459, "get_quantizable_ops_from_cfg": [145, 417], "get_quantizable_ops_recurs": [413, 417], "get_reg": 189, "get_reg_typ": 189, "get_reversed_numa_info": 154, "get_rtn_double_quant_config_set": 438, "get_schedul": 190, "get_schema": 89, "get_siz": 466, "get_sparsity_ratio": 192, "get_sparsity_ratio_tf": 192, "get_subgraphs_from_onnx": 90, "get_super_module_by_nam": [141, 398], "get_tensor_by_nam": [133, 385], "get_tensor_histogram": [391, 466], "get_tensor_val_from_graph_nod": 133, "get_tensorflow_node_attr": 90, "get_tensorflow_node_shape_attr": 90, "get_tensorflow_tensor_data": 90, "get_tensorflow_tensor_shap": 90, "get_tensors_info": 466, "get_tf_criterion": 191, "get_tf_model_typ": 390, "get_thread": 151, "get_threads_per_cor": 151, "get_torch_vers": [145, 446], "get_torchvision_map": 225, "get_tuning_histori": 466, "get_unquantized_node_set": 406, "get_weight_from_input_tensor": 133, "get_weight_scal": 31, "get_weights_detail": 466, "get_windows_numa_info": 154, "get_woq_tuning_config": [439, 482], "get_workspac": 161, "getdefaultencod": 452, "getenv": 522, "gholami": 544, "gigant": [475, 480, 552], "girl": 489, "git": [491, 529, 534], "github": [3, 135, 177, 178, 188, 209, 227, 228, 231, 232, 234, 262, 439, 477, 491, 494, 527, 534, 535, 538, 550], "give": [156, 496, 554], "given": [1, 3, 30, 52, 53, 55, 89, 90, 101, 133, 141, 145, 152, 194, 225, 230, 262, 324, 325, 327, 392, 398, 406, 409, 413, 427, 433, 442, 448, 452, 478, 482, 497, 544, 552, 553], "global": [100, 175, 195, 280, 389, 466, 477, 478, 479, 492, 538, 544, 554], "global_config": 192, "global_st": 466, "global_step": 180, "globalaveragepool": 14, "globalaveragepooloper": 14, "glorot_uniform": [292, 293, 294, 298], "glue": [227, 234, 537, 538], "gluon": [1, 235, 262, 540], "glx": 529, "gm": 406, "go": [530, 549, 553], "goal": [151, 235, 262, 482, 488, 496, 523, 543, 546, 554], "goe": 472, "good": [479, 490, 546, 554], "googl": [474, 494, 522, 539, 545], "googlenet": 555, "got": [133, 385, 496, 529], "gp": 266, "gpt": [475, 494, 531, 536, 544, 552], "gpt2": 555, "gptq": [31, 392, 428, 431, 437, 439, 441, 478, 484, 488, 489, 494, 522, 536, 547, 549], "gptq_arg": [477, 549], "gptq_config": 549, "gptq_config_path": 549, "gptq_entri": 437, "gptq_g128asym": 549, "gptq_g32asym": 549, "gptq_g32asym_disable_last_matmul": 549, "gptq_quantiz": 31, "gptq_related_block": 420, "gptqconfig": [437, 439, 452, 477, 482, 489], "gptquantiz": 420, "gpu": [74, 75, 82, 140, 195, 346, 353, 396, 413, 443, 477, 481, 520, 533, 538, 539, 546, 547], "gracefulli": 490, "grad": [182, 187], "gradient": [169, 439, 477, 488, 494, 533, 538, 544, 545, 555], "gradient_accumulate_step": [418, 439, 477], "gradient_accumulation_step": 538, "gradientcriterion": 169, "gradual": [190, 544], "grain": [186, 478, 544, 545, 554], "gram": 228, "granular": [292, 293, 294, 297, 298, 409, 473, 488, 495, 496, 497, 530, 533, 541, 546, 552], "graph": [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 90, 91, 92, 93, 94, 95, 97, 98, 99, 102, 115, 117, 121, 124, 127, 128, 130, 131, 132, 133, 173, 208, 236, 243, 261, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 362, 370, 372, 376, 379, 380, 382, 383, 384, 385, 390, 406, 441, 471, 476, 488, 492, 495, 526, 532, 540, 546, 547, 548], "graph_bas": [72, 344], "graph_convert": [96, 361], "graph_converter_without_calib": 96, "graph_cse_optim": [61, 333], "graph_def": [39, 125, 126, 133, 235, 243, 261, 262, 283, 286, 311, 385, 390, 458], "graph_def_sess": [243, 390], "graph_modul": 435, "graph_nam": 87, "graph_node_name_map": 133, "graph_optimization_level": [195, 546], "graph_output": 90, "graph_rewrit": [96, 361], "graph_sess": [243, 390], "graph_transform_bas": [130, 382], "graph_util": [96, 361], "graphanalyz": [95, 360], "graphconvert": [33, 306], "graphconverterwithoutcalib": 34, "graphcseoptim": [59, 331], "graphdef": [59, 133, 243, 261, 331, 385, 390, 540], "graphfoldconstantoptim": [48, 320], "graphmodel": 406, "graphmodul": [145, 406, 435, 492, 548], "graphrewriterbas": [71, 343], "graphrewriterhelp": [95, 360], "graphtrac": [413, 433], "graphtransform": [129, 381], "graphtransformbas": [129, 381], "grappler": [60, 332, 530], "grappler_optim": 530, "grappler_pass": [61, 333], "grappleroptim": [60, 332], "greater": [133, 391, 466, 481, 544, 554], "greatest": 554, "greatli": [477, 547, 549], "grei": [477, 547], "grid": [195, 544], "ground": [231, 232], "ground_truth": [231, 232], "groundtruth": [195, 230], "groundtruth_box": 230, "groundtruth_boxes_list": 230, "groundtruth_class": 230, "groundtruth_classes_list": 230, "groundtruth_dict": 230, "groundtruth_is_crowd": 230, "groundtruth_mask": 230, "group": [31, 189, 292, 426, 433, 477, 488, 494, 533, 544, 549, 553, 554, 555], "group_dim": [439, 477, 549], "group_norm": 528, "group_siz": [31, 142, 145, 280, 426, 429, 433, 439, 452, 477, 482, 549], "grouplasso": 189, "groupnorm": 552, "grow": [477, 488, 549], "grown": 544, "growth": [473, 474, 538, 539, 541, 544], "gt": [475, 477, 552, 553], "guangxuan": [477, 488, 549, 552], "guarante": [195, 538], "guess_output_rank": 30, "gui": [533, 544], "guid": [209, 474, 481, 492, 494, 522, 534], "guidelin": [493, 494], "gz": 211, "h": [31, 195, 225, 526, 553], "h384": 555, "h5": 540, "ha": [52, 53, 55, 59, 140, 179, 180, 227, 281, 324, 325, 327, 331, 391, 396, 404, 420, 443, 474, 479, 481, 488, 491, 495, 496, 497, 523, 528, 533, 538, 539, 544, 546, 548, 551, 552, 554], "habana": [472, 494, 534], "habana_visible_devic": 494, "habanalab": 494, "hack": 227, "haihao": [535, 544], "half": [399, 401, 406, 439, 474, 477, 539], "half_away_from_zero": [292, 293, 294, 297, 298], "half_precision_convert": 400, "half_precision_rewrit": 407, "halfprecisionconvert": 399, "halfprecisionmodulewrapp": 401, "hand": [478, 481], "handl": [46, 133, 149, 159, 318, 385, 393, 413, 423, 463, 492, 493, 522, 523, 552], "handler": [398, 452, 466, 554], "hanj": 3, "hanwen": 535, "harass": 490, "hard": [195, 523], "hardswish": 528, "hardtanh": 552, "hardwar": [161, 448, 473, 476, 484, 494, 495, 533, 538, 541, 544, 545], "harm": 490, "harmon": [232, 234], "has_zp": 31, "hasattr": 489, "hassoun": 544, "have": [3, 59, 68, 126, 133, 140, 170, 174, 176, 183, 207, 227, 230, 234, 262, 280, 281, 286, 331, 340, 385, 396, 412, 413, 417, 473, 474, 475, 477, 482, 488, 489, 490, 491, 496, 497, 522, 523, 526, 530, 535, 537, 538, 539, 541, 542, 544, 546, 547, 549, 552, 554], "haven": 544, "hawq": [135, 269, 554], "hawq_metr": 136, "hawq_top": 135, "hawq_v2": [195, 270], "hawq_v2_loss": 554, "hawq_v2tunestrategi": 269, "hbm": 534, "he": 495, "head": [171, 173, 177, 184, 192, 477, 544, 555], "head_mask": 184, "header": [161, 466, 529], "heavi": [477, 525], "height": [179, 221, 225, 526, 553], "helloworld": [539, 553], "help": [145, 166, 433, 470, 482, 488, 522, 536, 540, 549, 554], "helper": [30, 95, 101, 133, 145, 209, 210, 211, 224, 243, 360, 385, 390, 417, 457, 458, 459, 464], "here": [230, 281, 472, 475, 476, 477, 480, 481, 484, 488, 489, 496, 526, 527, 528, 530, 536, 537, 546, 550, 551, 552, 555], "herebi": 552, "herlper": [124, 379], "hesit": 544, "hessian": [31, 135, 269, 477, 549, 554], "hessian_trac": 135, "hessiantrac": 135, "heterogen": 545, "hf": [141, 398, 431, 441, 448, 484, 489, 536, 552], "hicham": 477, "hidden": [174, 179, 495], "high": [30, 212, 213, 387, 472, 481, 534, 545, 554], "higher": [152, 195, 198, 199, 235, 262, 443, 449, 472, 477, 484, 488, 492, 528, 537, 544, 546, 549, 551], "higher_is_bett": [195, 234, 262, 554], "highest": 554, "highli": [477, 545, 549], "highlight": 492, "hint": [466, 529], "histogram": [3, 391, 453, 466], "histogramcollector": 3, "histori": [195, 465, 466, 554], "history_cfg": 465, "hoc": 227, "hold": [230, 266], "holder": 216, "hook": [135, 162, 166, 184, 398, 455, 525, 538, 544, 552], "hope": 528, "horizont": [225, 553], "horovod": 526, "host": [494, 526], "hostconst": [82, 353], "hour": 554, "how": [31, 140, 162, 169, 175, 181, 182, 187, 195, 225, 396, 433, 448, 470, 474, 476, 477, 478, 479, 480, 481, 488, 489, 491, 492, 493, 495, 523, 525, 526, 532, 537, 538, 544, 545, 546, 552, 553, 554], "howev": [140, 396, 477, 484, 488, 544, 549, 552], "howpublish": 535, "hp_dtype": [439, 472], "hpex": 446, "hpo": [198, 544], "hpoconfig": 195, "hpu": [429, 441, 443, 472, 478], "hpu_acceler": 443, "hpuweightonlylinear": 429, "hqq": [428, 439, 478, 494], "hqq_arg": 477, "hqq_blog": [439, 477], "hqq_entri": 437, "hqqconfig": [437, 439, 477], "hqqlinear": [423, 427], "hqqmodul": 422, "hqqmoduleconfig": 422, "hqqtensorhandl": 423, "hqquantiz": 427, "hqt": 472, "hqt_output": [439, 472], "hroughput": 483, "ht": 555, "html": [492, 494, 496, 521, 526, 533, 534, 546], "htmllabel": 554, "http": [3, 135, 169, 177, 178, 187, 188, 209, 211, 227, 228, 230, 231, 232, 234, 262, 420, 439, 477, 492, 494, 534, 535, 538, 544, 550], "hub": [141, 398, 431, 441, 448, 494, 527], "hue": 553, "hug": [141, 398, 448, 489, 494, 545], "huge": [525, 544, 547], "hugginfac": [431, 441], "huggingfac": [173, 184, 209, 431, 441, 462, 473, 494, 527, 541, 552, 555], "huggingface_model": 188, "human": [154, 554], "hvd": [234, 526], "hw": 472, "hw_aligned_single_scal": 472, "hybirdblock": [235, 262], "hybrid": [484, 489], "hybridblock": 540, "hyper": [167, 477], "hyperparamet": [195, 488, 551, 552, 554], "i": [1, 30, 31, 33, 34, 36, 39, 40, 45, 49, 55, 67, 70, 90, 117, 128, 133, 135, 138, 140, 144, 145, 150, 151, 153, 156, 161, 162, 165, 166, 169, 173, 174, 180, 185, 186, 187, 188, 189, 191, 192, 195, 198, 199, 200, 207, 208, 209, 210, 211, 212, 213, 216, 218, 221, 225, 227, 230, 232, 234, 235, 245, 249, 250, 253, 262, 267, 272, 280, 281, 306, 308, 311, 312, 317, 321, 327, 339, 342, 372, 380, 385, 387, 391, 396, 397, 406, 408, 409, 413, 417, 420, 426, 427, 431, 433, 435, 439, 441, 443, 446, 448, 449, 452, 462, 465, 466, 471, 472, 473, 474, 475, 476, 477, 478, 480, 481, 482, 483, 484, 488, 489, 490, 491, 492, 494, 495, 496, 497, 498, 500, 506, 509, 512, 520, 521, 522, 523, 525, 526, 528, 529, 530, 533, 534, 535, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555], "ic": [534, 545], "id": [151, 224, 227, 229, 230, 231, 232, 537, 551, 554], "idea": [488, 494, 544, 552, 554], "ideal": 474, "ident": [59, 65, 174, 331, 337, 490], "identifi": [52, 53, 55, 140, 230, 243, 324, 325, 327, 390, 396, 482, 544, 549], "idx1": 211, "idx3": 211, "ieee": [135, 474, 488, 539, 552], "ignor": [466, 477, 488, 521, 549, 552, 554], "ignore_attr": 466, "ignore_kei": 466, "ii": 554, "illinoi": 3, "illustr": [477, 496, 497, 525, 554], "imag": [210, 211, 214, 216, 221, 225, 230, 474, 488, 526, 528, 544, 552, 553, 555], "image_format": 216, "image_height": 230, "image_id": [230, 537], "image_list": 214, "image_tensor": 538, "image_width": 230, "imageclassifi": 211, "imagefold": [211, 526, 538], "imagenet": [211, 214, 221, 479, 538, 544, 553, 555], "imagenet_dataset": 215, "imagenet_transform": 222, "imagenetraw": 214, "imagerecord": 526, "imageri": 490, "img": 494, "img1": 214, "img2": 214, "img_dir": 210, "imgx": 214, "iml": 545, "immedi": 544, "impact": [269, 544, 550, 554], "imper": 492, "implement": [95, 128, 132, 140, 198, 199, 204, 207, 208, 209, 211, 227, 235, 245, 262, 269, 360, 380, 384, 392, 396, 449, 477, 489, 497, 523, 537, 538, 544, 546, 549, 550, 554], "implicitli": [140, 156, 396], "import": [133, 151, 153, 161, 195, 235, 245, 262, 281, 385, 431, 441, 446, 449, 466, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 484, 488, 489, 492, 494, 497, 520, 523, 525, 526, 528, 531, 537, 538, 539, 540, 541, 542, 543, 544, 546, 547, 548, 550, 551, 552, 553, 554], "importerror": [138, 397, 529], "impract": 477, "improv": [186, 474, 476, 477, 488, 489, 491, 494, 521, 522, 538, 544, 545, 546, 548, 549, 554, 555], "in_featur": [403, 423, 429], "in_graph": [133, 385], "in_graph_is_binari": [133, 385], "in_mp": 30, "inappropri": 490, "inc": [225, 301, 305, 429, 431, 441, 481, 482, 490, 496, 536, 545, 550, 554], "inc_model": [528, 540], "inc_target_devic": [443, 478, 489], "incbench": 483, "incept": 555, "incid": 490, "incit": [475, 552], "includ": [138, 163, 169, 173, 175, 189, 191, 195, 209, 211, 218, 225, 281, 392, 397, 418, 431, 437, 441, 448, 466, 472, 477, 478, 481, 482, 484, 489, 490, 495, 496, 497, 523, 534, 535, 536, 537, 538, 544, 546, 548, 549, 554], "include_lay": 453, "include_nod": 1, "include_tensors_kl": 1, "include_tensors_minmax": 1, "inclus": 490, "incompat": 529, "incorpor": [477, 496, 497, 525, 544, 554], "incorrect": [140, 396], "incquantizationconfigmixin": 452, "increas": [195, 473, 477, 524, 533, 541, 549, 554], "increasingli": 544, "increment": 554, "incub": 3, "incur": [473, 541], "incweightonlylinear": 429, "independ": [184, 257, 261, 497], "index": [52, 53, 55, 90, 151, 192, 195, 203, 207, 211, 324, 325, 327, 387, 466, 488, 494, 534, 537, 549, 552], "indexdataset": 207, "indexerror": [138, 397], "indexfetch": [203, 387], "indic": [140, 152, 162, 195, 203, 207, 209, 230, 387, 396, 409, 426, 443, 481, 492, 523, 529], "individu": [126, 175, 257, 261, 286, 488, 490, 544, 552], "industri": [537, 545], "infer": [1, 30, 90, 125, 145, 173, 243, 245, 262, 283, 301, 305, 390, 417, 433, 449, 474, 475, 476, 477, 480, 481, 488, 489, 494, 495, 496, 521, 525, 531, 538, 539, 542, 544, 545, 546, 549, 552, 554], "infer_onnx_shape_dtyp": 90, "infer_shap": 30, "inferenc": [473, 541], "influenc": [145, 544], "info": [30, 145, 154, 161, 177, 178, 192, 223, 239, 391, 412, 413, 417, 442, 448, 462, 463, 466, 492, 522, 540], "inform": [1, 135, 154, 169, 170, 173, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 195, 198, 199, 234, 389, 426, 466, 472, 473, 475, 477, 481, 483, 484, 490, 491, 493, 494, 495, 496, 497, 498, 500, 506, 509, 512, 521, 522, 530, 538, 540, 541, 544, 548, 549, 550, 552, 555, 556], "infrastructur": 524, "ingest": 230, "inherit": [162, 163, 185, 186, 190, 206, 225, 392, 495, 496], "init": [0, 4, 29, 148, 164, 170, 172, 193, 537, 554], "init_alpha": [413, 439], "init_quantize_config": 101, "init_tun": 153, "initi": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18, 19, 21, 22, 23, 25, 26, 29, 30, 31, 90, 101, 133, 140, 153, 163, 180, 198, 199, 206, 209, 262, 278, 282, 285, 287, 291, 292, 293, 294, 295, 296, 297, 298, 300, 385, 396, 400, 409, 448, 449, 478, 481, 488, 497, 537, 544, 546, 552, 554], "initial_op_tuning_cfg": 277, "initial_tuning_cfg_with_quant_mod": 278, "initialize_int8_avgpool": 297, "initialize_int8_conv2d": 292, "initialize_int8_dens": 293, "initialize_int8_depthwise_conv2d": 294, "initialize_int8_maxpool": 297, "initialize_int8_separable_conv2d": 298, "initialize_name_count": 90, "inject": [44, 316, 538], "injectdummybiasaddoptim": [44, 316], "inlin": [133, 385], "innov": 545, "inplac": [29, 412, 413, 442, 478, 489], "input": [1, 2, 29, 30, 31, 39, 45, 49, 52, 53, 55, 56, 57, 59, 67, 68, 70, 71, 90, 94, 95, 101, 107, 126, 133, 134, 142, 144, 145, 149, 154, 173, 174, 192, 195, 198, 199, 209, 210, 211, 213, 221, 224, 225, 234, 235, 239, 243, 262, 286, 311, 317, 321, 324, 325, 327, 328, 329, 331, 339, 340, 342, 343, 359, 360, 385, 387, 390, 391, 408, 411, 413, 417, 425, 429, 433, 435, 442, 448, 449, 457, 458, 459, 462, 465, 466, 471, 476, 477, 478, 488, 495, 496, 520, 521, 523, 525, 526, 528, 530, 532, 537, 538, 539, 540, 544, 546, 549, 552, 553], "input2tupl": 145, "input_data": [29, 523], "input_desc": 1, "input_dtyp": 90, "input_fil": 225, "input_fn": [133, 243, 390], "input_func": [145, 433], "input_graph": [116, 121, 128, 258, 261, 371, 376, 380, 495], "input_graph_def": [52, 53, 55, 324, 325, 327], "input_id": [209, 225, 489, 538], "input_mask": [225, 538], "input_max": 413, "input_max_ab": 413, "input_min": 413, "input_minmax": 413, "input_model": 540, "input_model_tensor": 466, "input_nam": [52, 53, 87, 127, 195, 243, 324, 325, 390, 458, 459, 528], "input_name_to_nod": 457, "input_node_map": [52, 53, 324, 325], "input_node_nam": [69, 116, 121, 133, 341, 371, 376, 385], "input_output_nam": [60, 332], "input_pb": [129, 131, 132, 381, 383, 384], "input_scal": [142, 398, 413, 429], "input_shap": [90, 213, 387], "input_tensor": [133, 243, 385, 390], "input_tensor_data": 467, "input_tensor_ids_op_nam": [145, 417], "input_tensor_nam": [133, 243, 385, 390], "input_valu": [145, 433], "inputbatch": 225, "inputcapturemodul": 413, "inputfeatur": [209, 225], "inputs_as_nchw": [127, 458], "insecur": [140, 396], "insensit": 443, "insert": [62, 90, 92, 94, 98, 131, 149, 288, 334, 357, 359, 383, 413, 433, 442, 476, 477, 478, 480, 488, 492, 496, 523, 533, 538, 544, 546, 548, 549, 552], "insert_log": [130, 382], "insert_newlin": 466, "insert_print_nod": [61, 333], "insert_qdq_pattern": [91, 356], "insertlog": [131, 383], "insertprintminmaxnod": [62, 334], "inset": 538, "insid": [230, 477, 525, 529, 544, 547, 554], "insight": [545, 550, 554], "inspect": 550, "inspect_tensor": 495, "inspect_typ": 495, "inspir": [477, 549], "instal": [391, 481, 489, 526, 529, 531, 556], "instanc": [100, 101, 151, 154, 195, 198, 199, 231, 232, 234, 235, 262, 266, 280, 409, 449, 466, 477, 483, 490, 491, 492, 495, 520, 531, 538, 549, 554, 555], "instance_index": 154, "instance_norm": 528, "instancenorm": [53, 325, 552], "instances_val2017": 210, "instanti": 543, "instead": [195, 208, 466, 476, 544, 549], "institut": 211, "instruct": [474, 475, 488, 489, 496, 497, 534, 539, 544, 545, 546, 552], "insuffici": 483, "insult": 490, "int": [1, 3, 29, 30, 31, 90, 125, 133, 143, 145, 152, 156, 161, 171, 195, 208, 209, 210, 221, 225, 228, 230, 234, 251, 257, 261, 280, 281, 283, 284, 288, 301, 302, 305, 385, 404, 413, 417, 418, 423, 425, 426, 429, 433, 439, 444, 452, 458, 459, 462, 466, 477, 481, 522, 523, 537, 538, 549, 553], "int32": [429, 462, 477, 549], "int4": [488, 494, 536, 546], "int8": [5, 6, 30, 31, 72, 108, 109, 110, 116, 118, 119, 120, 121, 128, 133, 150, 151, 195, 221, 278, 280, 292, 293, 294, 297, 298, 299, 303, 344, 363, 364, 365, 371, 373, 374, 375, 376, 380, 409, 433, 439, 441, 457, 458, 459, 465, 466, 472, 473, 475, 477, 478, 479, 481, 488, 491, 492, 495, 496, 497, 520, 525, 530, 536, 538, 539, 541, 545, 546, 548, 549, 550, 551, 552, 553], "int8_conv_config": 496, "int8_model": [458, 459], "int8_model_path": 547, "int8_node_name_revers": 133, "int8_onnx_config": [195, 528], "int8_sequ": [33, 306], "int_label": 537, "int_max": 30, "integ": [162, 179, 180, 195, 230, 234, 281, 475, 477, 480, 488, 495, 497, 521, 546, 549, 552, 554], "integerop": 555, "integr": [133, 163, 385, 474, 478, 481, 488, 495, 538, 549], "intel": [154, 165, 177, 178, 188, 226, 233, 234, 246, 262, 270, 276, 290, 302, 303, 304, 305, 391, 392, 394, 436, 437, 438, 439, 440, 441, 442, 445, 446, 447, 448, 450, 452, 456, 468, 470, 472, 474, 475, 476, 477, 478, 479, 481, 482, 483, 485, 488, 490, 491, 493, 495, 496, 497, 521, 524, 525, 527, 528, 529, 530, 531, 535, 536, 539, 540, 542, 543, 544, 545, 546, 548, 549, 550, 552, 554, 555], "intel_extension_for_pytorch": [446, 476, 489], "intelcaff": 545, "intellig": 545, "intelon": 545, "inteltensorflow": [214, 216], "intend": 491, "inter": 195, "inter_area": 221, "inter_num_of_thread": [195, 257, 261, 538], "inter_pol": 221, "interact": [494, 551], "interest": [490, 494], "interfac": [71, 98, 154, 301, 343, 390, 392, 455, 474, 476, 478, 481, 488, 533, 538, 546, 550], "interleav": 210, "intermedi": [30, 135, 163, 195, 488, 492, 533, 546], "intermediatelayersknowledgedistillationloss": 163, "intermediatelayersknowledgedistillationlossconfig": 195, "intern": [135, 207, 278, 387, 472, 523, 537, 544], "internal_pattern": 278, "internet": 211, "interpol": [186, 225, 537, 544, 553], "intersect": [234, 406, 495, 537], "interv": [186, 544], "intra": 195, "intra_num_of_thread": [195, 257, 261, 538], "introduc": [59, 331, 470, 477, 488, 495, 496, 497, 530, 538, 546, 549, 550, 552], "introduct": 470, "intuit": [477, 488, 549, 552], "inturn": 537, "inuput": 30, "invalid": 413, "invalid_lay": 175, "invent": [488, 546], "invers": [477, 549], "investig": [490, 522], "invok": [230, 496], "involv": 476, "io": [1, 140, 396, 439, 477, 494, 534], "iou": 234, "iou_thr": [230, 234, 537], "iou_typ": 230, "ipc": 494, "ipex": [145, 195, 411, 412, 413, 417, 437, 441, 446, 465, 474, 475, 489, 533, 536, 538, 539, 552], "ipex_config": [145, 413], "ipex_config_path": [145, 413, 417], "ipexmodel": 244, "ir_vers": 29, "is_asymmetr": 496, "is_b_transpos": 30, "is_ckpt_format": [133, 385], "is_dynam": 409, "is_fused_modul": 145, "is_glob": 175, "is_hpex_avail": 446, "is_imposs": 225, "is_int8_model": 465, "is_ipex_avail": 446, "is_ipex_import": 446, "is_large_model": 30, "is_leaf": 420, "is_list_or_tupl": 90, "is_measur": 245, "is_model_quant": 1, "is_onnx_domain": 90, "is_package_avail": 446, "is_perchannel": 496, "is_qat": 144, "is_saved_model_format": [133, 385], "is_subgraph": 87, "is_transformers_import": 446, "isa": 474, "isiter": 1, "isn": [52, 53, 55, 324, 325, 327], "issu": [413, 488, 490, 491, 493, 494, 534, 546], "item": [30, 192, 195, 224, 278, 391, 466, 488, 521, 526, 544, 552, 554], "item_list": 30, "item_typ": 278, "itemstyl": 554, "iter": [1, 2, 90, 125, 133, 145, 151, 174, 180, 187, 190, 195, 198, 199, 203, 207, 211, 212, 235, 262, 283, 301, 305, 385, 387, 413, 417, 418, 425, 433, 439, 449, 452, 455, 477, 481, 482, 492, 495, 520, 523, 538, 544, 546, 552, 554], "iter_bar": 538, "iter_op": [133, 385], "iterabledataset": [207, 211], "iterablefetch": [203, 387], "iterablesampl": [207, 387], "iteration_list": 495, "iterativeschedul": 190, "iterator_sess_run": [133, 385], "itex": [32, 94, 195, 289, 359, 458, 481, 539, 546, 552], "itex_instal": 391, "itex_mod": [32, 33, 74, 92, 116, 121, 289, 306, 346, 357, 371, 376], "itex_qdq_mod": [56, 57, 328, 329], "itrex": [195, 418, 439, 475, 552], "its": [30, 45, 81, 133, 135, 173, 182, 185, 187, 192, 195, 205, 211, 225, 228, 229, 266, 317, 352, 406, 408, 457, 474, 477, 482, 488, 490, 535, 537, 539, 544, 549, 551, 553, 554], "itself": 544, "j": [475, 488, 494, 536, 544, 552], "jan": 545, "jason": 552, "jbla": 31, "jeffrei": 521, "ji": [477, 488, 549], "jit": [173, 476, 552], "jitbasicsearch": 173, "jitter": 553, "john": [391, 466], "join": 494, "joint": 545, "jonathanhuang": 230, "journei": 545, "jpeg": 553, "jpg": [210, 214, 216, 494], "json": [145, 160, 210, 225, 411, 413, 417, 465, 477, 540, 549, 553], "json_file_path": [411, 465], "judg": 420, "juli": 545, "jun": 545, "june": [494, 545], "just": [82, 200, 223, 225, 245, 353, 387, 413, 476, 481, 488, 489, 529, 538, 543, 546, 552, 553, 554], "k": [234, 262, 537, 544], "k_block": 31, "kappa": 425, "keep": [140, 161, 184, 192, 267, 396, 492, 495, 522, 550], "keep_mask_lay": 175, "keepdim": [488, 552], "kei": [133, 135, 140, 141, 173, 184, 192, 195, 243, 281, 390, 391, 396, 398, 413, 433, 453, 455, 466, 477, 488, 494, 496, 545, 549, 554], "kept": 179, "kera": [55, 101, 165, 173, 192, 195, 211, 218, 238, 243, 287, 290, 302, 305, 327, 390, 391, 479, 481, 496, 523, 526, 540], "keras_model": 237, "keras_sess": [243, 390], "kerasadaptor": 288, "kerasbasepattern": 175, "kerasbaseprun": 180, "kerasbasicprun": 181, "kerasconfigconvert": 288, "kerasmodel": [238, 390], "keraspatternnxm": 179, "kerasqueri": 288, "kerassurgeri": 288, "kernel": [31, 149, 195, 477, 495, 496, 554], "kernel_constraint": [292, 293], "kernel_initi": [292, 293], "kernel_regular": [292, 293], "kernel_s": [292, 294, 298], "keutzer": 544, "key_layer_nam": 184, "keynot": 545, "keyword": [140, 195, 396, 413, 431, 441], "kim": 544, "kind": [145, 538], "kit\u4e3aai\u5e94\u7528\u5e26\u6765\u9ad8\u6548\u5f02\u6784\u52a0\u901f\u670d\u52a1": 545, "kl": [1, 3, 195, 409, 413, 439, 453, 461, 496, 497, 521, 530, 538, 554], "kl_diverg": 460, "klcalibr": 3, "know": [522, 526, 550], "knowledg": [162, 163, 195, 480, 525, 527, 533, 538, 540], "knowledgedistillationframework": 163, "knowledgedistillationloss": [163, 538], "knowledgedistillationlossconfig": [195, 525, 538, 543], "known": [140, 266, 396, 472, 473, 538, 541, 544, 546, 554], "kriz": 211, "kullback": 497, "kwarg": [2, 30, 90, 107, 108, 109, 110, 111, 112, 113, 114, 117, 118, 119, 120, 122, 141, 145, 171, 195, 211, 223, 225, 234, 235, 236, 238, 240, 242, 243, 244, 262, 277, 279, 281, 292, 293, 294, 297, 298, 363, 364, 365, 366, 367, 368, 369, 372, 373, 374, 375, 377, 390, 398, 399, 418, 420, 429, 431, 433, 437, 439, 441, 448, 449, 452, 462, 463, 465, 466, 523], "kwon": 544, "l": [477, 483, 521, 554], "l1": 195, "l12": 555, "l2": [195, 544], "l6": 555, "l954": 227, "l983": 227, "label": [195, 198, 199, 209, 211, 212, 213, 214, 217, 221, 225, 227, 229, 234, 235, 262, 387, 413, 449, 481, 492, 523, 537, 538, 546, 553], "label_fil": [209, 225, 553], "label_list": [209, 234], "label_map": 537, "label_shap": [213, 387], "label_shift": [221, 553], "labelbalancecocorawfilt": 217, "labelbalancecocorecordfilt": 217, "labelshift": [221, 553], "lack": [523, 529], "lake": [474, 534, 539, 545], "lambada": [475, 552], "lambada_openai": 536, "lambda": [140, 396, 547], "lamini": [475, 544, 552], "land": 545, "languag": [29, 149, 227, 413, 431, 441, 472, 473, 475, 477, 478, 480, 481, 488, 489, 490, 528, 541, 545, 547, 549, 552], "laplacian": 477, "larei": 544, "larg": [29, 30, 149, 413, 472, 473, 475, 477, 478, 480, 481, 488, 489, 523, 538, 541, 545, 547, 549, 552, 555], "larger": [152, 443, 477, 488, 496, 544, 549, 552], "lasso": [189, 533, 544], "lassounbalanc": 555, "last": [145, 169, 192, 195, 413, 472, 474, 475, 477, 523, 539, 546, 549, 552], "last_batch": [200, 202, 204, 208, 387, 523], "last_conv_or_matmul_quant": [195, 546], "latenc": [551, 554], "latency_pattern": 483, "later": [140, 149, 396, 413, 443, 471, 522], "latest": [474, 478, 494, 534, 545, 550], "latin1": [140, 396], "launch": [483, 539], "launcher": 525, "layer": [32, 101, 102, 103, 104, 106, 107, 137, 139, 141, 149, 163, 166, 171, 173, 174, 175, 179, 184, 192, 194, 195, 288, 289, 291, 391, 395, 398, 413, 420, 429, 453, 455, 466, 470, 475, 476, 478, 488, 495, 496, 497, 525, 529, 533, 544, 549, 550], "layer1": [173, 195, 538, 544, 546], "layer2": [173, 538, 544, 546], "layer3": [538, 544], "layer_1": 174, "layer_2": 174, "layer_idx": 192, "layer_initi": 295, "layer_input": 192, "layer_map": [163, 195], "layer_nam": [195, 413, 544], "layer_norm": 528, "layer_tensor": 453, "layer_wis": [394, 465, 547], "layer_wise_qu": [136, 195, 547], "layerhistogramcollector": 453, "layernorm": [55, 327, 488, 552], "layerwisequ": 139, "layout": [39, 311], "layoutlmv3": 555, "lazi": [161, 466], "lazyimport": [161, 466], "ld_library_path": 529, "lead": [195, 474, 477, 481, 488, 522, 538, 539, 544, 549, 552], "leadership": 490, "leaky_relu": 528, "leakyrelu": [40, 312, 552], "learn": [470, 473, 474, 477, 478, 481, 488, 494, 495, 496, 523, 528, 532, 533, 534, 538, 539, 541, 544, 545, 546, 552, 554], "learning_r": [195, 538], "least": [188, 491, 538, 544, 554], "leav": 101, "lee": 544, "left": [221, 225, 488, 552, 553], "legal": [494, 556], "leibler": 497, "len": [135, 195, 225, 526, 553], "length": [184, 195, 209, 225, 230, 418, 477, 488, 537, 545, 546, 549, 553], "less": [40, 133, 145, 195, 312, 391, 433, 466, 520, 538, 544], "let": [497, 530, 544], "level": [31, 156, 173, 267, 281, 463, 472, 473, 488, 490, 541, 552, 554], "levelwis": 173, "leverag": [60, 332, 462, 471, 472, 476, 479, 482, 488, 489, 528, 543, 546, 549, 554], "lib": 529, "libgl": 529, "libgl1": 529, "libglib2": 529, "librari": [174, 226, 257, 468, 474, 481, 494, 534, 538, 539, 545, 546], "licens": 491, "lie": 521, "light": 525, "lightn": 494, "lightweight": [484, 544], "like": [59, 81, 83, 133, 140, 156, 173, 192, 195, 198, 199, 200, 211, 234, 243, 262, 331, 352, 354, 385, 387, 390, 396, 449, 452, 474, 477, 481, 488, 491, 492, 494, 495, 496, 525, 533, 543, 544, 546, 549, 550, 552, 554], "limit": [138, 266, 397, 466, 473, 477, 478, 481, 494, 536, 541, 546, 549], "lin": [477, 488, 549], "line": [496, 522, 526, 533], "linear": [30, 142, 145, 149, 171, 173, 174, 179, 184, 192, 194, 195, 403, 413, 423, 427, 429, 433, 472, 475, 476, 477, 488, 489, 492, 528, 538, 544, 549, 552, 554], "linear2linearsearch": 173, "linear_lay": 184, "linear_pattern": 174, "linearcompress": 174, "linearcompressioniter": 174, "linearli": 30, "link": [195, 209, 234, 262, 472, 478, 489, 521, 528, 549, 555], "linkedin": 545, "linux": [154, 483, 484, 489, 520, 529], "list": [1, 29, 30, 31, 39, 90, 125, 133, 135, 143, 145, 151, 152, 153, 154, 156, 173, 174, 179, 184, 188, 192, 194, 195, 198, 199, 203, 209, 221, 225, 227, 228, 230, 231, 232, 234, 235, 243, 249, 250, 253, 262, 277, 281, 283, 299, 302, 303, 305, 311, 385, 390, 398, 406, 413, 417, 418, 420, 433, 438, 439, 448, 449, 453, 458, 459, 466, 472, 475, 478, 480, 481, 492, 494, 495, 528, 530, 534, 536, 538, 542, 544, 546, 548, 552, 554], "liter": 281, "littl": 489, "llama": [475, 484, 489, 494, 536, 544, 545, 547, 549, 552], "llama2": 494, "llamanorm": 552, "llm": [125, 126, 283, 286, 420, 439, 472, 473, 475, 476, 477, 480, 488, 489, 531, 541, 544, 545, 547, 549, 552], "llm_weight_minmax": [92, 357], "lm": [477, 544, 555], "lm_head": [477, 544, 549], "lm_head_config": 477, "ln": 529, "lnl": 489, "load": [133, 138, 140, 141, 160, 209, 224, 225, 235, 243, 262, 385, 390, 395, 397, 398, 408, 411, 412, 413, 415, 417, 431, 441, 445, 448, 465, 466, 472, 481, 489, 496, 523, 529, 546, 547, 552], "load_and_cache_exampl": 209, "load_config_map": 160, "load_data_from_pkl": 466, "load_empty_model": [141, 398, 448, 477, 484, 547], "load_entri": 440, "load_huggingfac": [460, 538], "load_layer_wise_quantized_model": [141, 398], "load_modul": 398, "load_saved_model": [243, 390], "load_state_dict": [140, 396], "load_tensor": [141, 398], "load_tensor_from_shard": [141, 398], "load_valu": 398, "load_vocab": 224, "load_weight_onli": 465, "loadannot": 230, "loaded_model": [477, 489], "loader": [1, 125, 198, 199, 235, 262, 283, 301, 305, 431, 449, 462, 523, 546], "loadformat": [431, 445], "loc": [140, 396], "local": [175, 195, 431, 441, 466, 477, 479, 494, 529, 534, 544, 551], "local_config": [192, 195], "local_config_fil": [32, 288, 289], "locat": [140, 146, 192, 225, 396, 413, 476, 481, 526, 546, 550, 553], "lock": [185, 533, 543, 544], "log": [131, 151, 159, 161, 173, 249, 250, 253, 383, 393, 413, 463, 483, 492, 551, 554], "log2": [473, 541], "log_fil": [151, 256, 260], "log_interv": 526, "log_process": 161, "log_quantizable_layers_per_transform": 420, "logfile_dict": 154, "logger": [161, 453, 460, 466], "logic": [421, 425, 443, 478], "logical_cpu": 154, "login": 551, "loglevel": 554, "logo": 535, "long": [225, 477, 522, 529, 549, 553], "long_str": 522, "longer": [209, 225, 418, 484, 553], "longest": [225, 553], "look": [133, 184, 480, 495, 497, 530, 537, 552], "lookup": 173, "loop": [449, 492, 496, 497, 530, 551, 554], "loss": [29, 153, 163, 195, 234, 449, 474, 476, 477, 481, 488, 521, 525, 526, 527, 536, 537, 538, 539, 543, 544, 545, 546, 549, 552, 554], "loss_func": [170, 188], "loss_sum": 538, "loss_typ": [163, 195, 538], "loss_weight": [163, 195, 538], "lossi": [488, 546], "lot": [488, 492, 552], "low": [30, 198, 199, 212, 213, 235, 387, 472, 474, 477, 482, 488, 489, 495, 496, 520, 521, 526, 538, 539, 545, 546, 549, 552, 554], "low_cpu_mem_usag": 418, "low_gpu_mem_usag": [418, 439, 477], "low_memory_usag": 195, "lower": [224, 225, 232, 267, 413, 471, 472, 473, 476, 481, 488, 525, 541, 544, 545, 546, 552, 553, 554], "lowerbitssampl": 277, "lowercas": 209, "lowest": [544, 554], "lp_norm": 425, "lpot": [545, 550], "lr": [195, 418, 439, 452, 477, 526, 538], "lr_schedul": [418, 439, 477, 538, 544], "lstm": 16, "lstmoper": 17, "lt": 555, "lvwerra": 555, "lwq": 477, "m": [30, 135, 177, 178, 195, 413, 433, 483, 491, 534, 544, 555], "machin": [154, 227, 477, 481, 484, 528, 534, 545, 552], "maco": 534, "made": [269, 488, 495, 546, 550, 554], "mae": [234, 537], "magnitud": [169, 191, 195, 234, 413, 533, 544], "magnitude_progress": 195, "magnitudecriterion": [169, 191], "mahonei": 544, "mai": [3, 133, 138, 140, 281, 385, 396, 397, 472, 474, 477, 478, 480, 488, 489, 490, 491, 494, 496, 521, 522, 528, 529, 535, 539, 545, 546, 549, 552, 554], "mail": 490, "main": [3, 165, 184, 188, 301, 302, 305, 420, 437, 438, 442, 443, 477, 478, 479, 481, 483, 484, 488, 492, 523, 526, 538, 544, 546, 549], "mainli": [162, 190, 488, 494, 531, 538, 544, 546], "mainstream": [470, 494], "maintain": [234, 476, 477, 481, 488, 490, 491, 522, 540, 544, 549, 550], "mainten": 495, "major": [488, 532, 546, 552], "make": [30, 90, 128, 150, 175, 180, 188, 190, 207, 380, 466, 474, 475, 477, 480, 488, 490, 494, 495, 496, 520, 522, 523, 530, 537, 538, 544, 546, 547, 548, 549, 551, 552, 554], "make_dquant_nod": 30, "make_matmul_weight_only_nod": 31, "make_modul": 1, "make_nam": 90, "make_nc_model": 1, "make_nod": 30, "make_onnx_inputs_output": 90, "make_onnx_shap": 90, "make_quant_nod": 30, "make_sub_graph": 29, "make_symbol_block": 1, "makeiter": [133, 385], "male": 466, "malici": [140, 396], "manag": [152, 449, 455, 538], "mandatori": [198, 199, 262, 538], "mani": [31, 234, 262, 433, 477, 481, 488, 522, 523, 534, 549, 554], "manipul": [87, 88], "manner": [523, 540], "manual": [211, 544], "mao": 521, "map": [1, 30, 90, 133, 140, 145, 160, 195, 225, 229, 231, 232, 234, 396, 427, 442, 457, 459, 477, 488, 494, 522, 526, 537, 538, 544, 546, 549], "map_kei": 234, "map_loc": [140, 396], "map_numpy_to_onnx_dtyp": 90, "map_onnx_to_numpy_typ": 90, "map_point": [230, 234, 537], "map_tensorflow_dtyp": 90, "mar": 545, "mark": 521, "marketplac": [494, 545], "mask": [169, 175, 177, 180, 182, 186, 187, 209, 230, 544, 555], "mask_padding_with_zero": 209, "massiv": 544, "master": [3, 177, 178, 188, 227, 228, 231, 232, 234, 262, 538, 554], "match": [63, 87, 140, 173, 230, 231, 335, 396, 406, 476, 481, 483, 494, 549], "match_datatype_pattern": 145, "math": [51, 323, 488, 546], "mathemat": [475, 480, 488, 552], "matmul": [16, 31, 38, 44, 50, 58, 79, 94, 113, 122, 179, 195, 303, 310, 316, 322, 330, 350, 359, 368, 377, 530, 549, 554], "matmul_weight_only_nod": 31, "matmulfpq4": 31, "matmulnbit": 31, "matmuloper": 18, "matric": [488, 552], "matrix": [31, 234, 262, 480, 488], "matter": [207, 548], "max": [30, 89, 128, 150, 195, 225, 231, 232, 266, 380, 413, 433, 439, 466, 473, 477, 481, 488, 494, 534, 538, 541, 544, 546, 549, 552, 553, 554], "max_answer_length": [225, 553], "max_dim": [225, 553], "max_filter_tensor": 466, "max_grad_norm": 538, "max_inclusive_opset_vers": 89, "max_input_chars_per_word": 224, "max_length": 209, "max_min_data": [74, 75, 346], "max_new_token": 489, "max_num_class": 230, "max_ord": 228, "max_output": 492, "max_query_length": [225, 553], "max_seq_length": [209, 225, 420, 553], "max_shard_s": 431, "max_sparsity_ratio_per_op": [175, 180, 195, 538, 544], "max_trial": [153, 195, 474, 482, 538, 554], "max_x": 413, "maxab": [439, 472], "maxabs_hw": [439, 472], "maxabs_hw_opt_weight": 472, "maxabs_pow2": 472, "maxim": [538, 544, 554], "maximum": [40, 153, 175, 180, 195, 209, 225, 228, 243, 266, 312, 390, 413, 425, 431, 477, 488, 497, 521, 538, 544, 546, 552, 553], "maxpool": [16, 114, 123, 297, 369, 378, 530], "maxpooling2d": 297, "maxpooloper": 19, "mbzuai": [475, 552], "mckinstri": 521, "md": [177, 178, 195, 234, 262], "md5": 211, "mean": [29, 31, 175, 184, 195, 221, 225, 232, 234, 271, 413, 425, 431, 441, 472, 477, 479, 488, 492, 496, 497, 523, 526, 530, 537, 538, 544, 546, 547, 549, 552, 553, 554], "mean_valu": [221, 553], "meaning": [538, 543], "meanwhil": 547, "measur": [133, 235, 385, 439, 461, 472, 477, 481, 488, 495, 520, 531, 537, 538, 549, 554], "measure_exclud": [439, 472], "mechan": [138, 169, 397, 470, 544, 551], "media": [490, 545], "median": [125, 283], "medium": [494, 545], "meet": [195, 201, 263, 265, 477, 479, 481, 482, 488, 492, 496, 542, 545, 546, 549, 554], "member": [490, 495], "memomeri": 139, "memori": [59, 145, 245, 331, 413, 433, 466, 472, 473, 474, 475, 477, 478, 480, 483, 488, 489, 521, 523, 525, 538, 539, 541, 542, 544, 546, 547, 549, 552, 554, 555], "mention": [477, 488, 544, 549], "merg": [93, 202, 278, 358, 387, 483, 554], "merge_duplicated_qdq": [91, 356], "mergeduplicatedqdqoptim": [93, 358], "mesa": 529, "messag": [90, 131, 383, 483, 491, 554], "met": [153, 482, 488, 530, 546], "meta": [81, 352, 426, 484, 489, 494, 536, 545], "meta_info": 426, "meta_op_optim": [80, 351], "metaclass": 274, "metadata": [140, 396], "metagraphdef": [243, 390], "metainfochangingmemopoptim": [81, 352], "metal": [494, 529, 534], "metaop": [81, 352], "meteor": 534, "method": [31, 126, 128, 138, 140, 145, 189, 195, 203, 207, 208, 209, 211, 218, 225, 278, 280, 286, 380, 392, 396, 397, 417, 431, 441, 455, 460, 462, 463, 472, 477, 478, 479, 481, 488, 492, 494, 521, 522, 523, 525, 537, 538, 543, 544, 546, 548, 549, 552, 553, 554], "meticul": [473, 541], "metric": [153, 162, 195, 198, 199, 226, 235, 262, 449, 455, 470, 480, 495, 496, 526, 538, 539, 546, 551, 552, 554, 555], "metric_cl": [234, 262, 538], "metric_criterion": 245, "metric_fn": [231, 232], "metric_max_over_ground_truth": [231, 232], "metric_registri": 234, "metric_typ": 234, "metric_weight": 245, "mha": [176, 183, 544], "mha_compress": 184, "mha_head_s": 184, "mha_modul": 184, "mha_nam": 184, "mha_scor": 184, "mha_spars": [171, 544], "mhacompress": 184, "microcod": 555, "microsc": 478, "microsoft": [3, 473, 489, 494, 495, 541], "middl": [488, 552], "migacz": 521, "might": [192, 474, 492, 522, 554], "migrat": [470, 475, 480, 488, 552], "mimic": [488, 546], "min": [128, 150, 195, 380, 433, 466, 473, 477, 481, 488, 541, 544, 546, 549, 552, 553, 554], "min_dim": [225, 553], "min_filter_tensor": 466, "min_max": 425, "min_sparsity_ratio_per_op": [175, 195, 538, 544], "min_train_sampl": 195, "min_x": 413, "mini": [489, 555], "minilm": [545, 555], "minim": [128, 150, 266, 380, 472, 476, 477, 481, 488, 496, 497, 521, 527, 537, 538, 544, 546, 554], "minimum": [175, 195, 413, 425, 473, 497, 521, 538, 541, 544, 552, 554], "minmax": [1, 3, 303, 409, 413, 439, 476, 496, 497, 521, 530, 546], "minmax_file_path": 466, "minmax_lr": [418, 439, 452, 477], "minmaxcalibr": 3, "miou": 234, "misc": [138, 397, 535], "miss": [488, 544, 546], "mistral": 536, "mistralai": 536, "mitig": [477, 549], "mix": [134, 195, 235, 264, 278, 400, 437, 439, 470, 478, 494, 495, 501, 531, 533], "mix_precis": [195, 226, 281, 538, 539], "mixed_precis": [235, 394, 538], "mixed_precision_entri": 437, "mixed_precision_model": 134, "mixedprecis": [195, 538], "mixedprecisionconfig": [195, 235, 281, 437, 439, 474, 538, 539], "mixin": 452, "mixprecisionconfig": 437, "ml": 545, "mla": [495, 539, 546], "mleffici": 545, "mlp": [478, 544], "mlperf": [545, 555], "mm": 555, "mnist": [211, 526], "mnli": [209, 537, 555], "mobil": [538, 555], "mobilebert": [209, 555], "mobilenet": [528, 551, 555], "mobilenetv2": 555, "mobiusml": [439, 477], "mod": 427, "mod_dict": 439, "mode": [28, 29, 30, 95, 140, 157, 161, 218, 230, 243, 278, 360, 390, 396, 404, 413, 437, 439, 442, 448, 465, 466, 472, 477, 478, 495, 496, 533, 546, 548, 549, 554], "model": [1, 2, 3, 16, 28, 29, 30, 31, 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 54, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 94, 101, 103, 125, 126, 127, 133, 134, 135, 139, 140, 141, 143, 144, 145, 149, 151, 153, 156, 162, 170, 171, 173, 180, 181, 182, 185, 187, 188, 190, 192, 195, 198, 199, 205, 208, 209, 211, 225, 226, 234, 235, 245, 256, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 280, 281, 283, 286, 288, 301, 302, 305, 306, 307, 308, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 326, 328, 329, 330, 331, 332, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 345, 346, 347, 348, 349, 350, 352, 353, 354, 355, 357, 358, 359, 385, 388, 390, 391, 392, 396, 398, 406, 408, 411, 412, 413, 415, 417, 420, 431, 432, 433, 435, 437, 438, 441, 442, 448, 449, 450, 457, 458, 459, 462, 465, 466, 468, 470, 471, 472, 473, 474, 477, 478, 479, 480, 481, 484, 488, 489, 491, 492, 495, 497, 501, 520, 521, 522, 523, 525, 526, 527, 531, 532, 533, 535, 537, 539, 542, 543, 545, 546, 550, 551, 553, 554], "model_attr": 156, "model_forward": [413, 433], "model_forward_per_sampl": 413, "model_info": 439, "model_level": 156, "model_loss": 554, "model_nam": [101, 195, 494], "model_name_or_path": [209, 431, 441, 489, 494, 538, 547], "model_origin": [195, 262, 548], "model_path": [125, 133, 283, 385, 420, 439, 477], "model_proto": 90, "model_slim": [170, 184], "model_slim_ffn2": 171, "model_slim_mha": 171, "model_state_dict_path": [477, 484], "model_typ": [209, 466], "model_wis": 538, "model_wrapp": [2, 136, 388], "modelproto": [31, 235, 457, 540], "models": [195, 245, 538, 542], "modelwisetuningsampl": 277, "modern": [477, 488, 549], "modif": [491, 495, 530], "modifi": [184, 211, 261, 280, 472, 492, 497, 526, 530, 544], "modified_pickl": [137, 395], "modul": [136, 137, 155, 158, 170, 176, 183, 395, 407, 410, 414, 424, 428, 434, 447, 470, 472, 474, 477, 478, 482, 489, 492, 494, 502, 522, 523, 531, 533, 537, 538, 540, 544, 548, 549], "module_debug_level1": 281, "module_hook_config": [145, 433], "module_nam": [141, 161, 169, 170, 180, 181, 182, 183, 185, 186, 187, 189, 191, 398, 466], "module_name_list": [145, 433], "module_node_map": 459, "module_typ": 420, "module_wrapp": 400, "modulelist": 420, "mold": 153, "momentum": [169, 533, 538, 544], "momentumbalanc": 555, "momentumunbalanc": 555, "monitor": [153, 442, 478], "more": [29, 133, 149, 156, 177, 178, 225, 385, 413, 439, 470, 472, 473, 474, 475, 477, 478, 481, 488, 489, 493, 494, 496, 521, 522, 526, 528, 533, 534, 536, 538, 539, 541, 542, 543, 544, 548, 549, 552, 554, 555], "mosaicml": [475, 552], "mose": 227, "mosesdecod": 227, "mosh": 544, "most": [195, 234, 472, 474, 477, 481, 488, 538, 539, 544, 546, 549, 552, 554, 555], "mostli": 522, "motiv": 489, "move": [63, 140, 335, 396, 413, 433, 477, 488, 492, 549, 550], "move_input_devic": 145, "move_input_to_devic": [413, 433], "move_squeeze_after_relu": [61, 333], "movesqueezeafterreluoptim": [63, 335], "mp": 481, "mpi": 554, "mpirun": 554, "mpt": [475, 544, 552], "mrpc": [209, 234, 537, 544, 554, 555], "mscoco": 230, "mse": [31, 145, 195, 234, 262, 270, 466, 477, 488, 537, 549, 550], "mse_metric_gap": 466, "mse_v2": [195, 270], "mse_v2tunestrategi": 272, "mseloss": [488, 552], "msetunestrategi": 271, "msfp": [473, 541], "msft": 545, "msg": 463, "mt": 555, "mteval": 227, "mtl": 489, "much": [162, 169, 195, 225, 488, 552, 553], "mul": [40, 50, 51, 54, 149, 312, 322, 323, 326, 477, 528, 549, 552], "mullinear": [142, 429], "multi": [151, 154, 171, 173, 184, 230, 234, 262, 483, 523, 533, 538, 542, 544, 546, 554], "multi_object": 542, "multiclass": 537, "multilabel": 537, "multilingu": 555, "multiobject": 245, "multipl": [152, 165, 171, 196, 197, 201, 202, 209, 210, 212, 213, 214, 215, 216, 220, 222, 225, 235, 236, 237, 239, 245, 481, 520, 528, 531, 538, 543, 544, 551, 552, 554], "multipli": [3, 195, 471, 473, 477, 541, 549], "must": [225, 230, 452, 491, 492, 496, 520, 523, 534, 546, 553], "mx": [1, 402, 404, 439, 473, 478, 494, 531, 541], "mx_quant": 394, "mx_quant_entri": 437, "mx_spec": [403, 404], "mxfp4": [473, 541], "mxfp6": [473, 541], "mxfp8": [473, 541], "mxint8": [473, 541], "mxlinear": 403, "mxnet": [0, 1, 3, 195, 204, 208, 211, 214, 218, 225, 234, 235, 240, 262, 495, 496, 521, 523, 527, 530, 533, 538, 539, 540, 554], "mxnet_model": 237, "mxnetcifar10": 211, "mxnetcifar100": 211, "mxnetcropresizetransform": 225, "mxnetcroptoboundingbox": 225, "mxnetdataload": 204, "mxnetdataset": 211, "mxnetfashionmnist": 211, "mxnetfilt": 218, "mxnetimagefold": 211, "mxnetimagenetraw": 214, "mxnetmetr": 234, "mxnetmnist": 211, "mxnetmodel": 240, "mxnetnormalizetransform": 225, "mxnettransform": 225, "mxnettranspos": 225, "mxquantconfig": [437, 439, 473, 541], "mxquantiz": 403, "my": 491, "mydataload": [479, 481, 482], "n": [177, 178, 195, 210, 225, 228, 281, 488, 497, 522, 536, 544, 546, 552, 553], "n_best_siz": [225, 553], "n_bit": [488, 552], "n_block": [477, 549], "n_gpu": 538, "n_iter": 266, "n_pack": 444, "n_sampl": [31, 145, 413, 452, 477], "n_warmup": 266, "na": [195, 239, 481, 533, 555], "name": [1, 30, 39, 52, 53, 55, 89, 90, 95, 101, 125, 133, 135, 140, 141, 144, 145, 146, 151, 152, 153, 156, 160, 166, 169, 173, 175, 180, 183, 184, 188, 189, 190, 191, 192, 194, 195, 209, 210, 211, 214, 218, 223, 225, 229, 234, 239, 243, 245, 262, 278, 280, 283, 292, 293, 297, 311, 324, 325, 327, 360, 385, 390, 391, 396, 398, 403, 412, 413, 417, 418, 420, 427, 433, 439, 442, 443, 446, 448, 453, 455, 457, 458, 459, 466, 472, 473, 476, 477, 478, 479, 491, 492, 497, 522, 526, 528, 530, 535, 537, 538, 540, 541, 544, 546, 550, 551, 554, 555], "namecollector": 1, "named_paramet": 526, "namespac": 522, "namhoon": 544, "nan": [41, 313], "narrow": [473, 541], "narrow_rang": 98, "nasconfig": 195, "nation": [211, 490], "nativ": 497, "natur": [227, 477, 528], "nbest_predict": [225, 553], "nbit": 426, "nblock": [418, 439], "nbsp": 554, "nc": [551, 554], "nc_model": 1, "nc_resnet50_v1": 526, "nc_workspac": 195, "nchw": [39, 221, 311], "ncmodel": 1, "ndarrai": [1, 29, 30, 52, 53, 55, 225, 324, 325, 327, 444, 453, 529, 553], "ndarray_to_devic": 1, "nearest": [173, 225, 439, 477, 478, 488, 549, 553], "nearst": 31, "necessari": [30, 188, 490, 496, 540, 544, 551, 554], "necessarili": [138, 397], "need": [1, 29, 90, 94, 151, 156, 173, 188, 195, 198, 199, 200, 207, 208, 211, 218, 225, 234, 235, 245, 262, 271, 359, 387, 404, 411, 413, 431, 441, 449, 459, 465, 466, 477, 480, 482, 484, 488, 489, 492, 495, 522, 523, 526, 529, 533, 538, 544, 546, 547, 548, 549, 550, 552, 553, 554], "need_appli": [305, 442], "need_spac": 413, "neelnanda": [418, 452], "neither": 546, "neo": 531, "neox": 536, "nepoch": 538, "nest": [145, 391, 417, 466], "nesterov": 538, "net": [241, 391, 494], "netflix": 545, "nets_factori": 237, "network": [135, 169, 175, 176, 269, 439, 473, 474, 488, 521, 525, 528, 538, 541, 545, 546, 552, 554], "neural": [1, 135, 151, 154, 162, 165, 175, 176, 177, 178, 188, 195, 221, 222, 225, 226, 233, 234, 235, 239, 246, 262, 269, 270, 276, 290, 302, 303, 304, 305, 389, 392, 394, 436, 437, 438, 439, 440, 441, 442, 445, 446, 447, 448, 450, 452, 456, 468, 470, 472, 473, 474, 475, 476, 477, 478, 479, 481, 482, 483, 485, 488, 489, 491, 495, 496, 497, 520, 521, 522, 524, 525, 526, 527, 528, 529, 530, 531, 532, 535, 536, 539, 540, 541, 542, 543, 545, 546, 549, 550, 552, 553, 554, 555], "neural_compressor": [471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 484, 489, 492, 494, 497, 520, 522, 523, 525, 526, 528, 531, 532, 537, 538, 539, 540, 541, 542, 543, 544, 546, 547, 548, 550, 551, 552, 553, 554], "neurip": 545, "neuron": 544, "never": [140, 396, 554], "nevertheless": [473, 541], "new": [1, 3, 31, 133, 151, 200, 211, 225, 245, 385, 387, 392, 413, 433, 443, 452, 470, 474, 475, 476, 477, 488, 491, 496, 526, 538, 539, 544, 545, 546, 549, 550, 551, 553], "new_api": [33, 34, 56, 57, 62, 64, 77, 84, 116, 121, 128, 306, 328, 329, 334, 336, 348, 371, 376, 380], "new_dtyp": 30, "new_func": [133, 385], "new_graph_def": [133, 385], "new_in_featur": 444, "new_init": 31, "new_metr": 537, "new_modul": [141, 145, 398, 413, 433, 448], "new_quantized_nam": 30, "newapi": [79, 350], "newdataload": 523, "newli": [538, 544], "newlin": [232, 466], "newmetr": 537, "next": [45, 203, 281, 317, 387, 477, 488, 494, 496, 497, 523, 549, 554], "next_annotation_id": 230, "next_tune_cfg": 554, "nextplatform": 545, "nf4": [433, 477, 549], "nfl": 232, "ngram": [227, 537], "nhwc": [39, 221, 311], "ni_workload_nam": 195, "ninm": 176, "nll_loss": 526, "nlp": [188, 195, 474, 478, 481, 488, 544, 546], "nn": [141, 142, 145, 173, 174, 184, 194, 195, 235, 262, 398, 408, 412, 413, 417, 420, 427, 429, 431, 433, 435, 437, 438, 441, 442, 448, 459, 462, 465, 472, 477, 478, 488, 492, 538, 540, 549, 552], "nncf": 135, "no_absorb_lay": [145, 433], "node": [1, 29, 30, 31, 35, 39, 40, 41, 42, 45, 47, 49, 52, 53, 55, 59, 62, 65, 67, 68, 69, 83, 87, 88, 90, 95, 117, 125, 133, 154, 173, 243, 261, 283, 307, 311, 312, 313, 314, 317, 319, 321, 324, 325, 327, 331, 334, 337, 339, 340, 341, 354, 360, 372, 385, 390, 406, 413, 433, 443, 457, 459, 483, 495, 526, 547, 554, 555], "node1": 526, "node2": 526, "node_candidate_list": 406, "node_collector": 135, "node_def": [52, 53, 55, 324, 325, 327], "node_from_map": [52, 53, 55, 324, 325, 327], "node_index": 154, "node_list": 406, "node_map": [52, 53, 55, 324, 325, 327], "node_nam": [52, 53, 55, 133, 243, 251, 324, 325, 327, 390, 496], "node_name_from_input": [52, 53, 55, 324, 325, 327], "node_name_list": [131, 383], "node_op": 496, "node_set_from_user_config": 406, "nodedef": [52, 53, 55, 324, 325, 327], "non": [3, 466, 472, 474, 477, 492, 549, 554], "nondigit_punct_r": 227, "none": [1, 28, 30, 31, 32, 33, 34, 48, 74, 87, 89, 90, 95, 101, 127, 133, 135, 139, 140, 141, 142, 145, 151, 152, 153, 156, 161, 162, 163, 169, 170, 171, 173, 177, 188, 192, 195, 198, 199, 200, 202, 204, 207, 209, 210, 211, 212, 213, 214, 216, 221, 225, 230, 234, 235, 245, 256, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 278, 281, 284, 288, 289, 292, 293, 294, 297, 298, 299, 301, 302, 303, 305, 306, 320, 346, 360, 385, 387, 391, 392, 396, 398, 403, 404, 405, 413, 417, 418, 420, 423, 426, 429, 431, 432, 433, 435, 438, 439, 441, 442, 448, 449, 452, 453, 455, 458, 459, 462, 465, 466, 472, 473, 477, 478, 481, 483, 489, 494, 495, 496, 523, 537, 541, 546, 549, 553, 554], "nor": 546, "norm": [16, 477], "normal": [3, 20, 225, 232, 477, 488, 538, 542, 549, 552, 553], "normalfloat": [477, 549], "normalizationoper": 20, "normalize_answ": 232, "normalizetftransform": 225, "normalizetransform": 225, "not_use_best_ms": [418, 439, 477], "notat": [195, 466], "note": [40, 138, 153, 179, 230, 272, 312, 392, 397, 470, 471, 472, 474, 475, 476, 477, 480, 483, 488, 489, 494, 496, 497, 522, 523, 526, 528, 530, 531, 534, 536, 538, 544, 546, 549, 552, 554], "notebook": 470, "noteworthi": 521, "noth": [230, 554], "notic": [128, 150, 380, 474, 477, 535, 539, 550], "notimplementederror": 448, "nov": 545, "novel": 525, "now": [195, 225, 489, 492, 497, 526, 553, 554], "np": [30, 225, 266, 526, 552, 553, 554], "np_dtype": 90, "npu": [195, 546], "npy": 210, "npy_dir": 210, "npz": 211, "nr": 90, "nsampl": [418, 420, 439, 549], "nsdf3": 211, "nuanc": 477, "num": [433, 466, 489], "num_beam": 489, "num_bin": [1, 3, 453], "num_bit": [31, 98, 142, 145, 413, 429, 488, 552], "num_c": 483, "num_class": 234, "num_cor": [209, 210, 214], "num_cores_on_numa": 483, "num_cores_per_inst": [154, 483], "num_correct": 234, "num_cpu": 154, "num_detect": [230, 234, 537, 538], "num_gt_box": 230, "num_i": 483, "num_inst": [154, 483], "num_of_inst": [151, 195, 520, 538], "num_of_process": 526, "num_quantized_bin": 3, "num_replica": 526, "num_sampl": 234, "num_train_epoch": [538, 544], "num_work": [200, 202, 204, 387, 523, 538, 546], "numa": [154, 483], "numa_index": 154, "numa_info": 154, "numa_node_index": 154, "numactl": [151, 154, 529], "numba": 444, "number": [3, 31, 52, 53, 90, 125, 145, 152, 153, 161, 171, 177, 178, 195, 207, 210, 221, 225, 231, 232, 234, 257, 261, 266, 281, 283, 324, 325, 387, 413, 418, 426, 433, 443, 455, 466, 471, 477, 482, 483, 488, 520, 523, 526, 537, 544, 546, 549, 551, 552, 553, 554], "number_of_process": 554, "numer": [195, 234, 473, 474, 477, 481, 497, 539, 541, 545, 546, 549], "numpi": [29, 30, 52, 53, 55, 90, 133, 225, 230, 324, 325, 327, 444, 529, 552, 553], "nvidia": [494, 521, 533, 534, 555], "nxm": [176, 186, 195, 544], "o": [140, 281, 396, 494, 522, 534], "o0": [267, 554], "o1": 554, "obj": [1, 90, 170, 192, 208, 235, 245, 262, 449, 455, 466], "obj1": 466, "obj2": 466, "obj_cl": 245, "obj_criterion": 245, "obj_weight": 245, "object": [1, 29, 30, 31, 59, 71, 90, 101, 117, 133, 134, 135, 138, 140, 144, 145, 151, 153, 160, 162, 169, 170, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 195, 198, 199, 200, 209, 210, 211, 225, 226, 230, 234, 235, 243, 244, 257, 261, 262, 266, 280, 331, 343, 372, 385, 387, 390, 391, 396, 397, 399, 406, 409, 411, 413, 417, 433, 448, 449, 452, 455, 465, 466, 470, 476, 478, 481, 492, 496, 501, 520, 522, 523, 529, 537, 538, 540, 543, 544, 546, 553, 554, 555], "object_detect": [195, 230], "objective_cfg": 245, "objective_cl": 245, "objective_custom_registri": 245, "objective_registri": 245, "oblig": 490, "observ": [145, 417, 439, 442, 471, 472, 476, 477, 478, 497, 549, 551, 554], "obstacl": [473, 541], "obtain": [171, 173, 182, 187, 189, 192, 488, 492, 496, 544, 551, 552, 554], "occupi": [473, 541], "occur": 476, "ocp": [473, 541], "oct": 545, "off": [3, 52, 53, 55, 324, 325, 327, 477, 488, 491, 549], "offens": 490, "offer": [473, 477, 481, 541], "offici": [133, 227, 231, 232, 385, 490, 552], "offlin": [466, 475, 480, 481, 488, 490, 538, 546, 552], "offset_height": [225, 553], "offset_width": [225, 553], "ofir": 544, "often": [192, 477, 523, 543, 544], "old": [391, 466, 475, 538, 550], "old_hist": [391, 466], "oliv": [494, 545], "omit": [472, 549], "omp": 489, "omp_num_thread": 484, "ompi_mca_btl_vader_single_copy_mechan": 494, "on_after_compute_loss": [449, 525, 538, 543], "on_after_optimizer_step": [538, 544], "on_before_optimizer_step": [449, 525, 538, 543, 544], "on_epoch_begin": [449, 455, 525, 538, 543], "on_epoch_end": [449, 455, 525, 538, 543], "on_step_begin": [449, 455, 525, 538, 543, 544], "on_step_end": [449, 455, 525, 538, 543], "on_train_begin": [449, 492, 525, 538, 543, 544, 546], "on_train_end": [449, 525, 538, 543, 544, 546], "onc": [133, 140, 165, 190, 263, 265, 385, 396, 481, 489, 496, 497, 523, 544, 545, 554, 555], "one": [31, 94, 95, 100, 140, 145, 151, 175, 179, 184, 187, 188, 190, 225, 227, 230, 234, 359, 360, 396, 413, 417, 433, 474, 477, 478, 481, 482, 483, 488, 495, 521, 525, 526, 530, 533, 534, 537, 538, 539, 542, 543, 544, 546, 547, 549, 550, 552, 553, 554], "oneapi": [470, 474, 489, 534, 545], "onednn": [474, 481, 539, 546], "onednn_max_cpu_isa": 474, "ones": [140, 230, 396, 544], "oneshotschedul": 190, "onli": [29, 31, 39, 48, 55, 71, 94, 100, 128, 140, 149, 150, 151, 165, 170, 176, 183, 195, 209, 262, 272, 280, 281, 311, 320, 327, 343, 359, 380, 396, 413, 428, 429, 431, 432, 433, 439, 441, 459, 466, 470, 472, 474, 478, 482, 483, 489, 495, 496, 497, 520, 521, 522, 523, 526, 529, 530, 531, 536, 538, 539, 544, 545, 546, 547, 552, 554], "onlin": [477, 490, 551], "onnx": [2, 3, 16, 28, 30, 31, 72, 127, 149, 195, 205, 235, 242, 257, 457, 458, 459, 464, 491, 494, 495, 496, 498, 523, 527, 528, 530, 533, 539, 540, 541, 545, 549, 552, 554], "onnx_graph": 86, "onnx_ml_pb2": [235, 540], "onnx_model": [237, 256], "onnx_nod": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 86], "onnx_qlinear_to_qdq": 457, "onnx_quant": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27], "onnx_schema": 86, "onnx_typ": 90, "onnxbilinearimagenettransform": 221, "onnxcommunitymeetup2023": 545, "onnxgraph": 87, "onnxmodel": [31, 242, 256], "onnxnod": 88, "onnxopschema": 89, "onnxprofilingpars": 249, "onnxqlinear2qdq": 195, "onnxqlinear2qdqconfig": 195, "onnxresizecropimagenettransform": 221, "onnxrt": [29, 30, 31, 209, 211, 218, 225, 234, 256, 464, 495, 539, 546, 555], "onnxrt_cuda_ep": [195, 539, 546], "onnxrt_dataload": 256, "onnxrt_dml_ep": [195, 546], "onnxrt_dnnl_ep": [195, 539, 546], "onnxrt_integ": 538, "onnxrt_integerop": [211, 218, 225], "onnxrt_qdq": [211, 218], "onnxrt_qlinear": 538, "onnxrt_qlinearop": [211, 218, 225], "onnxrt_qlinearopsadaptor": 495, "onnxrt_trt_ep": [195, 539, 546], "onnxrtaug": 2, "onnxrtbertdataload": 205, "onnxrtbertdataset": 209, "onnxrtcroptoboundingbox": 225, "onnxrtdataload": [205, 256], "onnxrtglu": 234, "onnxrtimagenetdataset": 214, "onnxrtitdataset": 211, "onnxrtitfilt": 218, "onnxrtitmetr": 234, "onnxrtittransform": 225, "onnxrtparserfactori": 248, "onnxrtqldataset": 211, "onnxrtqlfilt": 218, "onnxrtqlmetr": 234, "onnxrtqltransform": 225, "onnxruntim": [3, 195, 205, 211, 218, 257, 495, 521, 523, 533, 534, 539], "onto": [140, 396], "op": [1, 16, 36, 38, 50, 51, 52, 53, 54, 55, 56, 57, 59, 63, 66, 70, 73, 76, 77, 78, 79, 81, 90, 92, 94, 108, 109, 110, 116, 118, 119, 120, 121, 126, 128, 131, 133, 135, 144, 145, 149, 173, 195, 257, 261, 267, 269, 271, 278, 279, 280, 286, 308, 310, 322, 323, 324, 325, 326, 327, 328, 329, 331, 335, 338, 342, 345, 347, 348, 349, 350, 352, 357, 359, 363, 364, 365, 371, 373, 374, 375, 376, 380, 383, 385, 412, 413, 417, 433, 437, 448, 457, 459, 461, 466, 474, 475, 478, 488, 489, 495, 496, 497, 530, 538, 539, 544, 546, 548, 550, 552, 554], "op_block_lst": 277, "op_cfg": 139, "op_defin": 251, "op_dict": 538, "op_dtyp": 277, "op_dtype_dict": 277, "op_infos_from_cfg": [145, 412, 413, 417], "op_level": 156, "op_list": 495, "op_nam": [145, 195, 279, 413, 417, 433, 448, 466, 467, 476, 496, 538, 544], "op_name_dict": [195, 476, 538, 546, 554], "op_name_or_module_typ": [152, 299, 303, 439], "op_name_typ": 278, "op_quant_mod": 279, "op_registri": 21, "op_run": 251, "op_typ": [21, 125, 126, 133, 194, 279, 283, 286, 303, 413, 417, 475, 476], "op_type_dict": [195, 476, 497, 546, 549, 554], "op_type_level": 156, "op_types_to_quant": 28, "op_user_cfg": 280, "op_user_cfg_modifi": 280, "op_wis": 538, "op_wise_config": [92, 116, 121, 357, 371, 376, 496], "op_wise_sequ": [116, 121, 371, 376], "open": [140, 226, 396, 455, 468, 490, 494, 528, 529, 538, 545], "openai": [475, 552], "opencv": 529, "opentri": 466, "openvinotoolkit": 135, "oper": [4, 30, 89, 95, 125, 133, 152, 156, 173, 174, 179, 195, 221, 257, 261, 283, 299, 360, 385, 406, 413, 471, 472, 474, 475, 476, 477, 478, 479, 480, 481, 488, 489, 492, 520, 523, 528, 544, 546, 549, 552, 553, 554], "operator_name_or_list": 478, "operatorconfig": [299, 439], "ops_lst": [145, 417], "ops_nam": [145, 417], "opset": [29, 87, 89, 90, 195, 458, 459, 528], "opset_vers": [90, 127, 195, 458, 459, 528], "opt": [188, 475, 488, 489, 494, 536, 544, 552, 554, 555], "opt_cfg": [60, 332], "opt_model": [471, 476, 538], "opt_param": 425, "optdecoderlay": 552, "optim": [39, 59, 60, 64, 65, 101, 103, 133, 164, 167, 168, 170, 173, 182, 187, 195, 266, 311, 331, 332, 336, 337, 385, 424, 439, 449, 451, 453, 466, 470, 476, 477, 480, 481, 482, 484, 488, 489, 494, 520, 522, 525, 526, 530, 533, 534, 536, 538, 540, 545, 546, 548, 549, 551, 552, 554], "optimize_lay": 102, "optimize_qdq": [115, 370], "optimize_transform": 489, "optimize_weights_proximal_legaci": 425, "optimized_model_tensor": 466, "optimized_tensor_data": 467, "optimizedmodel": 462, "optimizeqdqgraph": [116, 371], "optimizer_registri": 165, "optimizer_typ": 165, "optimum": [472, 549], "option": [3, 31, 90, 140, 145, 156, 161, 175, 195, 198, 199, 209, 225, 230, 234, 235, 262, 278, 280, 281, 391, 392, 396, 398, 408, 409, 413, 415, 418, 425, 431, 433, 435, 437, 438, 439, 441, 442, 448, 449, 458, 459, 460, 462, 466, 477, 478, 481, 488, 496, 497, 522, 523, 530, 534, 538, 544, 546, 549, 551, 552, 553, 554], "optuningconfig": [277, 278, 279], "optyp": [1, 195, 457, 496], "optype_wise_": 496, "optypes_to_exclude_output_qu": [28, 195, 546], "optypewis": 496, "optypewisetuningsampl": 277, "opwis": 496, "opwisetuningsampl": 277, "orchestr": [449, 470, 533], "order": [139, 145, 153, 227, 228, 266, 271, 280, 477, 482, 488, 489, 492, 537, 549, 552, 554], "ordered_op": 145, "ordereddefaultdict": 280, "ordereddict": [403, 412, 416, 419, 430, 522], "ordinari": 551, "org": [169, 187, 230, 420, 439, 492, 494, 534, 544], "orient": 490, "orig_answer_text": 225, "orig_bit": 444, "orig_lay": [142, 429], "orig_model": 477, "orig_sav": 478, "orig_text": 225, "origin": [30, 31, 125, 133, 141, 145, 173, 185, 192, 195, 209, 225, 267, 280, 283, 385, 398, 413, 427, 431, 433, 441, 442, 462, 466, 477, 478, 479, 481, 488, 491, 544, 546, 549, 553, 554], "original_model": [431, 441, 477], "ort": 257, "ortsmoothqu": 29, "other": [52, 53, 55, 138, 149, 208, 232, 243, 324, 325, 327, 390, 397, 413, 473, 478, 481, 488, 490, 492, 496, 497, 502, 522, 530, 531, 533, 535, 538, 540, 541, 542, 544, 546, 552, 553, 554, 555], "otherwis": [140, 211, 225, 396, 406, 413, 427, 452, 466, 477, 490, 496, 549, 553], "ouput_dir": 547, "our": [128, 145, 195, 380, 413, 473, 489, 494, 528, 538, 541, 551], "out": [178, 195, 209, 210, 211, 412, 413, 477, 479, 481, 488, 491, 492, 494, 544, 546, 549], "out_dtyp": 439, "out_featur": [403, 423, 429], "out_graph_def": [133, 385], "out_graph_fil": [133, 385], "outcom": 234, "outer": [202, 387], "outlier": [125, 283, 475, 477, 480, 488, 496, 521, 549, 552], "outlin": [496, 497], "outofcheeseerror": 522, "outperform": 477, "output": [29, 30, 31, 36, 39, 44, 59, 83, 90, 95, 133, 145, 166, 173, 174, 179, 192, 195, 198, 199, 227, 234, 235, 243, 262, 308, 311, 316, 331, 354, 360, 385, 390, 391, 408, 413, 415, 417, 425, 431, 433, 439, 442, 449, 458, 459, 462, 463, 466, 472, 477, 478, 479, 488, 489, 492, 495, 496, 525, 526, 528, 530, 532, 537, 538, 540, 543, 544, 546, 549, 552, 553, 554], "output_data": 29, "output_dict": 31, "output_dir": [139, 408, 415, 431, 462, 478, 538], "output_fn": 225, "output_func": [145, 433], "output_graph": 195, "output_graph_def": 480, "output_handl": [161, 466], "output_index_map": [234, 537], "output_mod": 209, "output_model": [281, 538], "output_nam": [87, 127, 195, 243, 390, 458, 459, 528], "output_node_nam": [68, 69, 116, 117, 121, 133, 340, 341, 371, 372, 376, 385], "output_path": 230, "output_process": 166, "output_shap": 87, "output_tensor": [133, 243, 385, 390], "output_tensor_id_op_nam": [412, 413, 417], "output_tensor_ids_op_nam": [145, 413, 417], "output_tensor_nam": [133, 243, 385, 390], "output_valu": [145, 433], "outputs_to_valu": 90, "over": [90, 140, 211, 234, 396, 496, 527, 533, 537, 544, 552, 554], "overal": [477, 497, 549], "overflow": [488, 546], "overhead": [477, 552], "overli": 522, "overrid": [195, 466, 478, 489, 522], "overridden": 554, "overview": [494, 531, 554], "overwrit": 211, "overwrite_exist": 151, "own": [59, 140, 207, 225, 245, 331, 396, 470, 488, 496, 523, 533, 537, 542, 549, 551], "p": [3, 477, 484, 488, 489, 549], "p_conf": [538, 543], "pack": [30, 46, 318, 421, 426, 444], "pack_array_with_numba_b2_c16": 444, "pack_array_with_numba_b2_c32": 444, "pack_array_with_numba_b2_c64": 444, "pack_array_with_numba_b2_c8": 444, "pack_array_with_numba_b4_c16": 444, "pack_array_with_numba_b4_c32": 444, "pack_array_with_numba_b4_c64": 444, "pack_array_with_numba_b4_c8": 444, "pack_array_with_numba_b8_c16": 444, "pack_array_with_numba_b8_c32": 444, "pack_array_with_numba_b8_c64": 444, "pack_array_with_numba_b8_c8": 444, "packag": [89, 446, 494, 495, 522, 529, 534, 545, 550, 554], "package_nam": 446, "packed_arrai": 444, "packer": 421, "pad": [16, 31, 56, 57, 209, 225, 292, 294, 297, 298, 328, 329, 553], "pad_max_length": 549, "pad_tensor": 31, "pad_token": 209, "pad_token_segment_id": 209, "paddedcentercroptransform": 225, "padding_mod": 553, "pade": 31, "padoper": 22, "page": [490, 491], "pager": 522, "pain": [488, 546], "pair": [92, 195, 357, 406, 474, 476, 496, 523, 538, 539], "paper": [475, 477, 488, 544, 549, 552], "paragraph": [231, 232], "parallel": [210, 257, 261, 538, 554], "param": [145, 151, 154, 156, 165, 200, 234, 266, 387, 466, 472, 495, 540, 554], "param1": 281, "param2": 281, "param3": 281, "param_alia": 466, "param_dict": [163, 165], "param_nam": [398, 466], "paramet": [1, 3, 29, 30, 31, 52, 53, 55, 71, 90, 101, 117, 125, 126, 133, 134, 135, 140, 141, 144, 145, 146, 151, 152, 153, 154, 156, 160, 161, 162, 163, 165, 167, 169, 170, 171, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 194, 195, 198, 199, 208, 209, 211, 218, 221, 225, 227, 228, 230, 231, 232, 234, 235, 239, 243, 245, 257, 261, 262, 266, 274, 278, 280, 281, 283, 286, 301, 305, 324, 325, 327, 343, 372, 385, 390, 391, 396, 398, 404, 406, 408, 409, 411, 412, 413, 415, 417, 418, 420, 422, 425, 427, 431, 433, 435, 437, 438, 439, 441, 442, 443, 446, 448, 449, 455, 457, 458, 459, 462, 463, 465, 466, 473, 475, 476, 477, 480, 481, 483, 488, 492, 496, 523, 526, 533, 537, 538, 541, 544, 546, 547, 549, 551, 552, 553, 554], "parameter": 544, "parameter1": 281, "parameter2": 281, "paramlevel": 156, "params_list": [152, 156], "parent": [185, 186, 190, 413, 433], "pars": [90, 133, 145, 154, 209, 210, 221, 225, 249, 250, 253, 385, 390, 412, 413, 417, 496, 497, 553], "parse_auto_slim_config": [171, 544], "parse_cfg": 417, "parse_last_linear": 192, "parse_last_linear_tf": 192, "parse_saved_model": [133, 385], "parse_str2list": 154, "parse_to_prun": 192, "parse_to_prune_tf": 192, "parse_tune_config": 1, "parse_valid_pruner_typ": 183, "parsedecodebert": 209, "parsedecodecoco": 210, "parsedecodeimagenet": [221, 553], "parsedecodeimagenettransform": 221, "parsedecodevoctransform": 225, "parserfactori": 247, "part": [145, 171, 180, 433, 470, 474, 488, 492, 534, 539, 544, 547, 552], "parti": [230, 488, 491, 535, 546], "partial": [184, 481, 544, 546], "particip": 490, "particular": [489, 494, 544, 549], "particularli": 481, "partit": [526, 544], "partner": 545, "parzen": 554, "pascal": 211, "paser_cfg": 145, "pass": [62, 133, 138, 140, 151, 161, 234, 262, 334, 385, 391, 396, 397, 411, 413, 431, 441, 465, 466, 477, 488, 491, 492, 495, 520, 525, 526, 532, 537, 538, 544, 546, 548, 552], "past": [477, 488, 495, 549], "pat": 545, "patch": 427, "patch_hqq_moduil": 427, "path": [90, 125, 133, 141, 145, 160, 173, 188, 192, 209, 210, 211, 225, 235, 243, 261, 262, 278, 283, 385, 390, 398, 405, 412, 413, 415, 416, 417, 431, 442, 458, 459, 462, 466, 472, 476, 477, 478, 484, 492, 495, 526, 532, 537, 538, 539, 540, 543, 549, 553], "path_to_sav": 449, "pathlik": [140, 396], "pattern": [43, 44, 46, 63, 81, 92, 93, 94, 116, 121, 145, 169, 170, 173, 174, 180, 181, 182, 183, 185, 187, 189, 195, 211, 278, 315, 316, 318, 335, 352, 357, 358, 359, 371, 376, 406, 471, 476, 480, 483, 495, 522, 530, 533, 538, 543, 552, 554, 555], "pattern_analyz": 172, "pattern_detector": 136, "pattern_factori": 406, "pattern_lock": [183, 195], "pattern_lst": [143, 417], "pattern_pair": 406, "pattern_to_intern": 278, "pattern_to_path": 278, "patternmha": 177, "patternpair": 406, "pb": [151, 195, 235, 243, 262, 390, 481, 520, 526, 538, 540, 555], "pbound": 266, "pc": 491, "pdf": 3, "peak": [245, 483, 542], "pegasu": 555, "peleenet": 555, "penal": 544, "penalti": [227, 228, 537], "pend": 529, "pentium": 535, "peopl": [488, 546], "pep": [281, 522], "per": [29, 30, 31, 149, 151, 195, 398, 413, 433, 437, 461, 472, 473, 475, 477, 483, 495, 497, 523, 541, 544, 548, 549, 555], "per_channel": [98, 409, 439, 496, 497, 530, 546], "per_channel_symmetr": 497, "per_tensor": [292, 293, 294, 297, 298, 299, 303, 409, 439, 479, 496, 497, 530, 546], "per_tensor_symmetr": 497, "percdamp": [31, 439, 477, 549], "percent": 31, "percentag": [232, 477, 521, 549], "percentil": [3, 31, 125, 283, 303, 433, 521], "percentilecalibr": 3, "perceptron": 544, "perchannel": 31, "perform": [81, 125, 151, 189, 195, 209, 221, 234, 245, 262, 267, 271, 283, 284, 352, 413, 471, 472, 474, 476, 477, 478, 479, 481, 482, 483, 484, 488, 489, 491, 492, 494, 495, 496, 520, 524, 525, 527, 528, 531, 533, 534, 537, 538, 539, 540, 542, 543, 544, 545, 546, 547, 548, 549, 552, 554, 555], "performance_onli": [32, 33, 34, 92, 116, 121, 132, 289, 306, 357, 371, 376, 384, 538], "perm": [225, 553], "perman": [171, 490, 544], "permiss": 490, "permut": [225, 553], "persist": 540, "person": [391, 466, 490, 545], "perspect": 554, "phase": [146, 448, 481, 488, 538, 544, 546, 548, 554], "phi": [489, 535], "philip": 544, "philosophi": [476, 481, 546], "physic": [151, 154, 490, 520], "physical_cpu": 154, "pickl": [138, 140, 396, 397], "pickle_load_arg": [140, 396], "pickle_modul": [140, 170, 396], "pickle_protocol": 170, "pickleerror": [138, 397], "pickler": [138, 397], "pickletool": [138, 397], "picklingerror": [138, 397], "piec": [224, 227, 477, 488, 537, 549], "pil": [225, 553], "pile": [418, 452], "pin": 523, "pin_memori": [200, 202, 204, 387, 523], "ping_memori": [538, 546], "pip": [494, 526, 529, 531, 534, 550], "pipe": 522, "pipelin": [153, 162, 491, 525, 538, 543], "pixel": 553, "pkl": 466, "pl": 227, "place": [412, 413, 433, 442, 478, 525, 534, 544, 554], "placehold": [42, 207, 314, 387, 409, 522], "placeholder_dtyp": 173, "placeholder_shap": 173, "plai": [236, 390, 477, 488, 545, 549, 552], "plan": [478, 533], "platform": [466, 483, 488, 533, 545, 546], "platinum": 555, "pleas": [29, 135, 149, 169, 177, 178, 179, 187, 188, 195, 209, 210, 211, 214, 234, 262, 281, 413, 420, 470, 472, 474, 475, 476, 477, 478, 479, 480, 488, 489, 492, 493, 494, 495, 520, 521, 525, 526, 528, 534, 536, 537, 539, 544, 546, 548, 549, 550, 551, 552, 554], "plu": 539, "plug": 545, "png": 211, "point": [30, 31, 221, 231, 232, 266, 425, 433, 466, 471, 472, 473, 474, 475, 476, 477, 480, 488, 537, 541, 546, 549, 552, 553, 554], "pointwise_constraint": 298, "pointwise_initi": 298, "pointwise_regular": 298, "polici": [263, 265, 271, 482, 490, 494, 556], "polit": 490, "pollut": 522, "pont": 534, "pool": 16, "pool2d": 295, "pool_siz": 297, "pooloper": 23, "poor": 478, "popen": 522, "popular": [226, 468, 470, 478, 482, 488, 494, 495, 496, 521, 527, 528, 536, 538, 544, 546, 549, 554], "popularli": 537, "port": [52, 53, 55, 324, 325, 327], "portabl": [138, 397], "portion": 209, "pose": [473, 541, 547], "posit": [225, 442, 490, 537], "possibl": [140, 396, 406, 472, 543, 544, 547, 548, 554], "post": [29, 82, 83, 149, 187, 195, 198, 199, 262, 353, 354, 413, 420, 439, 473, 475, 476, 477, 478, 480, 482, 488, 490, 494, 495, 496, 525, 526, 528, 533, 534, 541, 544, 545, 547, 549, 552, 554], "post_batch": 1, "post_hostconst_convert": [80, 351], "post_node_nam": [62, 334], "post_quantized_op_cs": [80, 351], "post_training_auto_qu": 554, "post_training_dynamic_qu": [538, 554], "post_training_static_qu": [538, 554], "postcompressionutil": 174, "postcseoptim": [83, 354], "posterior": 554, "postfix": [74, 75, 346], "posthostconstconvert": [82, 353], "postposttrainingquantconfig": 548, "postprocess": [195, 222, 225, 234, 262, 455, 495, 538, 553], "postprocess_cfg": 455, "postprocess_cl": [223, 538], "postprocess_model": 448, "posttrainingquantconfig": [195, 262, 492, 497, 523, 528, 537, 538, 540, 546, 547, 548, 549, 551, 552, 554], "power": [472, 473, 481, 538, 541, 545], "pp": 544, "pr": [491, 523, 537, 550], "practic": [473, 541], "pre": [64, 101, 141, 173, 198, 199, 209, 235, 262, 336, 398, 439, 449, 476, 477, 482, 488, 522, 525, 538, 544, 545, 546, 549, 554], "pre_batch": 1, "pre_node_nam": [62, 334], "pre_optim": [61, 333], "pre_post_process_quant": [195, 546], "pre_process": 526, "preced": [227, 488, 552, 554], "precis": [134, 195, 198, 199, 232, 234, 235, 245, 264, 267, 278, 289, 399, 400, 401, 406, 437, 439, 466, 470, 472, 473, 476, 477, 478, 482, 488, 494, 495, 496, 501, 520, 521, 525, 526, 530, 531, 533, 541, 545, 546, 549, 552, 554], "pred": [133, 234, 385, 537], "pred_list": 234, "pred_text": 225, "predefin": 482, "predict": [133, 225, 227, 231, 232, 234, 262, 385, 477, 488, 537, 549, 552, 553], "prefer": [151, 262, 477, 488, 544, 549], "prefix": [133, 140, 141, 144, 145, 151, 154, 385, 396, 398, 433, 472], "preoptim": [64, 336], "prepar": [1, 31, 145, 392, 412, 415, 431, 437, 442, 448, 471, 472, 473, 475, 476, 477, 478, 481, 484, 488, 492, 494, 526, 531, 544, 546], "prepare_compress": [195, 449, 492, 525, 538, 543, 544, 546], "prepare_dataload": 1, "prepare_input": 31, "prepare_model": 1, "prepare_model_data": 1, "prepare_prun": [170, 544], "prepared_model": [471, 475, 476, 477, 484, 531], "preprint": [473, 477, 488, 521, 541, 544, 549, 552], "preprocess": [145, 195, 209, 225, 280, 433, 455, 526, 553], "preprocess_user_cfg": 280, "present": [52, 53, 55, 324, 325, 327, 488, 545, 552], "preserv": [169, 473, 477, 521, 541, 544, 549], "pretrain": [420, 462], "pretrained_model_name_or_path": [139, 141, 398, 448], "prettyt": 466, "preval": [477, 488, 549], "previou": [192, 470, 478, 488, 496, 497, 538, 544, 550, 552, 554], "previous": 523, "primari": [482, 521, 554], "primit": [140, 396], "print": [62, 135, 173, 234, 281, 334, 420, 425, 466, 483, 488, 489, 526, 537, 538, 544, 552, 554], "print_iter": 173, "print_op_list": 466, "print_tabl": 466, "printer": [161, 466], "prior": [1, 73, 345, 554], "prioriti": [152, 443, 446, 522], "privat": 490, "prob": 492, "probabl": [3, 195, 461, 477, 488, 549], "problem": [138, 209, 234, 397, 474, 537, 539], "proce": [181, 182, 187], "procedur": [544, 554], "proceed": [488, 552], "process": [101, 125, 135, 151, 153, 159, 161, 175, 180, 181, 182, 188, 190, 192, 195, 198, 199, 209, 210, 211, 221, 225, 235, 262, 263, 265, 266, 283, 412, 413, 448, 449, 465, 466, 471, 472, 473, 476, 477, 480, 481, 482, 484, 488, 489, 495, 496, 497, 521, 523, 525, 526, 528, 538, 541, 543, 544, 546, 547, 549, 552, 553], "process_and_check_config": 192, "process_config": 192, "process_weight_config": 192, "process_yaml_config": 192, "processor": [161, 439, 448, 474, 484, 488, 489, 494, 536, 539, 545, 546, 548], "processor_typ": [439, 484], "processortyp": [161, 439, 448], "product": [169, 474, 488, 533, 539, 545, 546, 551, 554], "profession": 490, "profil": [151, 195, 226], "profilerfactori": [254, 255, 259], "profilingpars": 250, "profilingresult": 251, "program": [476, 494, 526, 535], "progress": [30, 183, 211, 544, 546], "project": [225, 469, 490, 491, 545, 551, 554, 556], "promis": [472, 525, 538, 544], "promot": [473, 477, 541], "prompt": [489, 526], "prone": 544, "propag": [1, 85, 355], "properti": [209, 280, 535], "proport": 234, "propos": [474, 477, 488, 495, 539, 549, 552], "protect": [477, 549], "protected_nod": [65, 337], "proto": [221, 225, 553], "protobuf": [90, 243, 390], "prototyp": 492, "prove": [477, 488, 521, 549, 552], "provid": [29, 30, 31, 87, 90, 95, 173, 198, 199, 225, 230, 235, 262, 360, 409, 413, 449, 460, 462, 470, 472, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 488, 489, 494, 495, 496, 497, 520, 523, 525, 526, 527, 531, 533, 534, 536, 537, 538, 539, 540, 544, 546, 549, 552, 554, 555], "proxi": 492, "prune": [162, 169, 170, 171, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 189, 190, 191, 192, 195, 466, 470, 494, 526, 527, 530, 531, 533, 543, 545], "prune_conf": 538, "prune_config": 192, "pruner": [538, 544], "pruner2": 544, "pruner_class": 187, "pruner_info": 188, "pruners_info": 192, "pruning_class": 188, "pruning_config": [195, 538, 544], "pruning_end": 544, "pruning_frequ": [180, 195, 538, 544], "pruning_func": 538, "pruning_op_typ": [195, 538, 544], "pruning_pattern": 544, "pruning_scop": [195, 538, 544], "pruning_start": 544, "pruning_typ": [195, 538, 544], "pruningcallback": 162, "pruningconfig": 449, "pruningcriterion": [169, 191], "pruningschedul": 190, "pseudo": [198, 199, 262, 449, 477, 549], "pt": [140, 396, 465, 477, 489, 494, 531, 534, 540, 545, 546, 548, 549], "pt2e": [405, 407, 409, 435, 437, 441, 555], "pt2e_dynamic_quant_entri": 437, "pt2e_export": 434, "pt2e_quant": 394, "pt2e_static_quant_entri": 437, "pt_fp32_model": 459, "pt_int8_model": 459, "ptq": [195, 262, 475, 476, 480, 481, 492, 526, 533, 538, 546, 552, 554], "public": [281, 490], "publish": [474, 490, 535, 536, 539, 552], "pull": [52, 53, 55, 324, 325, 327], "punct_nondigit_r": 227, "punctuat": [224, 227, 232], "pure": 544, "purif": 545, "purpos": [474, 475, 476, 477, 479, 530, 539, 546], "push": [473, 477, 488, 491, 541, 544, 549, 552], "put": [140, 211, 396], "pvc": 489, "py": [3, 133, 135, 151, 154, 180, 195, 228, 230, 231, 232, 281, 385, 443, 479, 483, 484, 489, 492, 495, 522, 526, 534, 538, 546, 550], "pycocotool": [230, 529], "pyhessian": 135, "pylanc": 522, "pyobject": 529, "pypi": 534, "pytest": 491, "python": [3, 60, 133, 138, 140, 161, 211, 226, 281, 332, 385, 396, 397, 443, 466, 468, 484, 489, 492, 494, 522, 526, 529, 534, 538, 544, 545, 553], "python3": 529, "pythonmultiheadattentionprun": 184, "pytorch": [163, 165, 166, 170, 173, 176, 179, 180, 183, 188, 194, 195, 208, 209, 211, 214, 218, 225, 234, 235, 244, 262, 272, 392, 394, 435, 436, 437, 438, 439, 440, 441, 442, 445, 446, 447, 448, 459, 460, 466, 473, 478, 489, 491, 492, 494, 495, 496, 497, 502, 521, 522, 523, 525, 527, 530, 531, 533, 534, 536, 538, 539, 540, 541, 544, 545, 549, 550, 552, 554], "pytorch_cpu": 497, "pytorch_fx": [211, 218, 538], "pytorch_ipex": [211, 218, 538], "pytorch_prun": 188, "pytorchalignimagechannel": 225, "pytorchbasemodel": 244, "pytorchbasepattern": [175, 189], "pytorchbaseprun": 180, "pytorchbasicprun": 181, "pytorchbertdataset": 209, "pytorchblockmaskprun": 182, "pytorchcifar10": 211, "pytorchcifar100": 211, "pytorchcriterion": 163, "pytorchcropresizetransform": 225, "pytorchcrossentropyloss": 163, "pytorchdataload": 206, "pytorchdataset": 211, "pytorchdynamo": 478, "pytorchfashionmnist": 211, "pytorchfilt": 218, "pytorchfxmodel": 244, "pytorchimagenetraw": 214, "pytorchintermediatelayersknowledgedistillationloss": 163, "pytorchintermediatelayersknowledgedistillationlosswrapp": 163, "pytorchknowledgedistillationloss": [163, 538], "pytorchknowledgedistillationlosswrapp": 163, "pytorchloss": 234, "pytorchmetr": 234, "pytorchmnist": 211, "pytorchmodel": 244, "pytorchmxnettransform": 225, "pytorchmxnetwrapdataset": 211, "pytorchmxnetwrapfunct": [211, 225], "pytorchnormalizetransform": 225, "pytorchoptim": 165, "pytorchpatternlockprun": 185, "pytorchpatternninm": 178, "pytorchpatternnxm": 179, "pytorchprogressiveprun": 186, "pytorchretrainfreeprun": 187, "pytorchselfknowledgedistillationloss": 163, "pytorchselfknowledgedistillationlosswrapp": 163, "pytorchsgd": 165, "pytorchtransform": 225, "pytorchtranspos": 225, "q": [30, 31, 92, 94, 116, 357, 359, 371, 471, 476, 478, 488, 549, 552], "q_conf": 525, "q_config": [28, 145, 292, 293, 294, 297, 298, 459, 495], "q_dataload": [198, 199, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 532, 554], "q_func": [198, 199, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 413, 495, 496, 538, 554], "q_hook": [198, 199, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274], "q_max": [488, 552], "q_min": [488, 552], "q_model": [135, 139, 195, 262, 301, 305, 417, 471, 475, 476, 480, 482, 489, 492, 523, 526, 528, 532, 537, 538, 540, 546, 547, 548, 549], "q_tensor": 433, "q_weight": [31, 423], "q_x": [488, 552], "qa": [231, 232, 494], "qactivationoper": 5, "qargmaxoper": 6, "qas_id": 225, "qat": [97, 144, 195, 243, 494, 525, 526, 531, 533, 538, 546], "qat_clone_funct": 101, "qat_op_name_dict": 195, "qattent": 7, "qattentionoper": 7, "qavgpool2d": 297, "qbinari": 8, "qbinaryoper": 8, "qconcat": 9, "qconcatoper": 9, "qconfig": [1, 145, 417, 465, 472, 477, 494, 549], "qconfig_file_path": 160, "qconv2d": 292, "qconvoper": 10, "qd": 476, "qdens": 293, "qdepthwiseconv2d": 294, "qdirect": 11, "qdirectoper": 11, "qdq": [29, 72, 97, 98, 127, 145, 195, 288, 344, 362, 433, 457, 458, 459, 480, 495, 528, 533, 546, 552], "qdq_enabl": [33, 306], "qdq_op_fp32_bia": 195, "qdq_op_fp32_bias_qdq": 195, "qdq_op_int32_bia": 195, "qdq_quantiz": 412, "qdq_tensor": 31, "qdq_weight_actor": 433, "qdq_weight_asym": 433, "qdq_weight_sym": 433, "qdqlayer": [398, 429], "qembedlayernorm": 12, "qembedlayernormalizationoper": 12, "qgather": 13, "qgatheroper": 13, "qgemm": 15, "qgemmoper": 15, "qglobalaveragepooloper": 14, "qintegerop": [533, 546], "qkv": 184, "qkv_modul": 184, "qkv_name": 184, "qlinear": [195, 211, 218, 234, 457, 495], "qlinear2qdq": 456, "qlinearaveragepool": 23, "qlinearconv": 10, "qlinearglobalaveragepool": 14, "qlinearmatmul": 18, "qlinearop": [457, 533, 546, 555], "qlora": [477, 488, 549], "qmatmuloper": 18, "qmax": 145, "qmaxpool": 19, "qmaxpool2d": 297, "qmaxpooloper": 19, "qmin": 145, "qmodel": [135, 479, 481], "qnli": [209, 537, 555], "qop_registri": 21, "qoper": [5, 21, 195, 528], "qpad": 22, "qpadoper": 22, "qpooloper": 23, "qqp": [209, 537, 555], "qresiz": 25, "qresizeoper": 25, "qscheme": 497, "qseparableconv2d": 298, "qsplit": 26, "qsplitoper": 26, "qsym_model": 1, "qt_config": [33, 306], "qtensor": [423, 424], "qtensor_to_tensor": 1, "qtensorconfig": 422, "qtensormetainfo": 426, "qtype": [29, 30, 495], "quadrat": [439, 477], "quala": 545, "qualiti": [227, 522], "quant": [31, 133, 145, 195, 278, 285, 287, 288, 289, 299, 303, 413, 416, 433, 438, 439, 477, 488, 489, 492, 522, 536, 538, 546], "quant_axi": [292, 293, 294, 297, 298], "quant_block_list": [418, 439], "quant_config": [288, 289, 301, 305, 391, 392, 403, 405, 412, 416, 418, 419, 420, 427, 430, 432, 442, 448, 471, 473, 475, 476, 477, 478, 479, 480, 481, 484, 531, 541], "quant_dequant_data": 29, "quant_dequant_w_v1": 413, "quant_dequant_x_v1": 413, "quant_format": [195, 459, 528], "quant_level": [195, 549, 552, 554], "quant_lm_head": [420, 439, 452, 477], "quant_max": 497, "quant_min": 497, "quant_mod": [32, 278, 289, 292, 293, 294, 297, 298, 496, 497], "quant_mode_from_pattern": 278, "quant_narrow_rang": [292, 293, 294, 297, 298], "quant_opt": 280, "quant_round_mod": [292, 293, 294, 297, 298], "quant_scal": [439, 477], "quant_statu": [292, 293, 294, 297, 298], "quant_t": [292, 293, 294, 297, 298], "quant_tensor": [31, 433], "quant_typ": 280, "quant_weight_w_scal": 433, "quant_zero": [439, 477], "quantformat": 30, "quantif": [475, 477, 480, 549, 552], "quantil": [433, 554], "quantit": 477, "quantiz": [1, 3, 4, 29, 30, 31, 33, 34, 73, 77, 79, 81, 84, 92, 97, 98, 99, 100, 101, 102, 103, 104, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 134, 135, 137, 141, 142, 145, 149, 151, 153, 157, 159, 161, 162, 195, 198, 199, 221, 226, 267, 269, 271, 280, 283, 284, 286, 288, 289, 290, 291, 292, 293, 294, 295, 297, 298, 392, 395, 398, 402, 403, 404, 405, 408, 409, 411, 412, 413, 415, 416, 417, 418, 419, 420, 422, 423, 424, 425, 426, 429, 430, 431, 432, 433, 434, 435, 436, 448, 449, 452, 453, 459, 461, 462, 465, 466, 470, 474, 480, 482, 485, 489, 495, 501, 502, 522, 523, 526, 527, 530, 531, 532, 533, 537, 540, 543, 545, 550, 553, 554], "quantizable_nod": 1, "quantizable_op": [145, 413, 417], "quantizaiton_config": 489, "quantization_aware_training_conf": 538, "quantization_cfg": 495, "quantization_config": [451, 489], "quantization_param": 28, "quantizationawaretrainingcallback": 162, "quantizationawaretrainingconfig": [195, 449, 492, 525, 528, 538, 546, 548, 554], "quantizationmethod": 452, "quantizationmod": 30, "quantizationspec": 409, "quantize_4bit": 433, "quantize_config": [99, 496], "quantize_data": 30, "quantize_data_per_channel": 30, "quantize_data_with_scale_zero": 30, "quantize_elemwise_op": 404, "quantize_graph": [96, 361], "quantize_graph_bas": [97, 362], "quantize_graph_bn": [97, 362], "quantize_graph_common": [96, 361], "quantize_graph_concatv2": [97, 362], "quantize_graph_conv": [97, 362], "quantize_graph_for_intel_cpu": [97, 362], "quantize_graph_matmul": [97, 362], "quantize_graph_pool": [97, 362], "quantize_help": 99, "quantize_lay": 99, "quantize_layer_add": 102, "quantize_layer_bas": 102, "quantize_layer_bn": 102, "quantize_model": [305, 479, 480, 481, 488], "quantize_model_with_single_config": 305, "quantize_mx_op": 404, "quantize_nod": 459, "quantize_nparrai": 30, "quantize_per_channel": [488, 552], "quantize_per_tensor_absmax": [488, 552], "quantize_rang": 30, "quantize_recip": 101, "quantize_sym_model": 1, "quantize_wrapp": 99, "quantizeconfig": [100, 101], "quantized_data": 30, "quantized_dict": 135, "quantized_model": [477, 484, 531], "quantized_nod": [92, 357], "quantized_value_typ": 30, "quantizedconcatv2": [109, 119, 364, 374], "quantizedconv": [76, 77, 347, 348], "quantizeddeconv": [76, 347], "quantizediniti": 30, "quantizedinput": [221, 553], "quantizedmatmul": [78, 79, 349, 350], "quantizedmatmulwithbiasanddequant": [79, 350], "quantizedrnnconvert": 84, "quantizedvalu": 30, "quantizedvaluetyp": 30, "quantizegraph": [117, 372], "quantizegraphbas": [117, 372], "quantizegraphforintel": [121, 376], "quantizegraphhelp": [124, 379], "quantizelay": 105, "quantizelayeradd": 104, "quantizelayerbas": 105, "quantizelayerbatchnorm": 106, "quantizelinear": 30, "quantizenodebas": [117, 372], "quantizer_cl": 448, "quantizev2": [83, 354], "quantizewrapp": [101, 107], "quantizewrapperbas": 107, "quantiztaion": [195, 417], "quantopt": 280, "quanttyp": [30, 280], "queri": [1, 32, 133, 145, 173, 184, 195, 288, 289, 417, 448, 488, 496, 497, 546], "query_framework_cap": 496, "query_fused_pattern": 495, "query_fw_cap": [495, 497], "query_layer_nam": 184, "query_quantizable_nod": 1, "querybackendcap": [495, 496], "question": [225, 231, 232, 488, 490, 491, 494, 495, 534, 544, 552, 553, 555], "question_text": 225, "quick": [470, 484, 530, 539, 544], "quickli": [482, 536, 554], "quint8": [30, 413], "quit": 544, "qweight_config_path": 549, "qwen": 489, "qwen2": [494, 545], "r": [30, 128, 150, 380, 475, 476, 477, 479, 481, 483, 488, 534, 546, 555], "r1": [133, 385], "r34": 492, "race": 490, "rais": [52, 53, 55, 90, 138, 140, 145, 170, 176, 183, 192, 230, 235, 281, 324, 325, 327, 396, 397, 413, 433, 435, 448, 494, 522, 544, 545], "ram": [140, 396, 477, 484], "ran": 162, "rand": [418, 439, 477, 488, 552], "randn": [195, 528], "random": [41, 133, 161, 195, 221, 225, 266, 270, 313, 391, 418, 466, 477, 538, 544, 553], "random_crop": [221, 553], "random_flip_left_right": [221, 553], "random_se": [195, 266, 538], "randomcrop": 553, "randomcroptftransform": 225, "randomcroptransform": 225, "randomhorizontalflip": [225, 538, 553], "randomli": [225, 266, 488, 552, 553, 554], "randomresizedcrop": [538, 553], "randomresizedcropmxnettransform": 225, "randomresizedcroppytorchtransform": 225, "randomresizedcroptftransform": 225, "randomresizedcroptransform": 225, "randomst": 266, "randomtunestrategi": 273, "randomverticalflip": [225, 553], "rang": [212, 213, 225, 281, 387, 433, 449, 462, 471, 473, 474, 477, 478, 481, 483, 488, 494, 495, 497, 521, 525, 526, 538, 541, 543, 544, 546, 549, 552, 553], "rank": 526, "rapid": [534, 536], "rate": [477, 488, 538, 544, 546], "rather": [477, 532, 549], "ratio": [31, 128, 150, 175, 180, 192, 195, 221, 225, 380, 536, 538, 544, 553, 555], "ratiospars": 555, "raw": [30, 151, 210, 214, 217, 271, 412, 415, 431, 437, 537, 554], "raw_arrai": 444, "raw_cmd": [151, 154], "raw_func": 446, "raw_imag": 211, "rawgptquant": 420, "rb": [140, 396], "rcnn": 528, "re": 549, "reach": [153, 175, 190, 195, 491, 523, 544, 554], "read": [90, 133, 140, 214, 225, 385, 396, 548, 551], "read_graph": [133, 385], "read_squad_exampl": 225, "read_tensorflow_node_attr": 90, "readabl": [154, 522], "readi": 544, "readlin": [140, 396], "readm": 526, "real": [30, 212, 387, 488, 496, 521, 526, 545, 546], "realdiv": [51, 54, 323, 326], "realiz": [526, 542, 548, 551], "rearrang": [31, 187, 477, 544, 549], "reason": [477, 488, 490, 496, 523, 549, 554], "rebuild": [133, 385], "recal": [232, 234], "receiv": 551, "recent": [474, 538, 539], "recip": [33, 173, 195, 306, 473, 482, 494, 531, 538, 541, 547, 549, 552, 554], "recipe_sampl": 173, "recipesearch": 173, "recogn": [227, 448, 554], "recognit": [474, 526, 528, 544], "recognitionimagenet": 555, "recommend": [234, 446, 472, 474, 484, 489, 494, 495, 530, 531, 544, 554], "recommendation_system": 195, "reconstruct": [133, 385, 545], "reconstruct_saved_model": [133, 385], "record": [135, 145, 163, 166, 209, 210, 211, 217, 433, 466, 482, 549, 551, 554], "record_max_info": [303, 413], "record_output": 166, "recov": [29, 411, 413, 433, 465, 466, 495, 525], "recover_config": 34, "recover_forward": 433, "recover_model_from_json": [411, 465], "rectangl": [477, 547], "recurs": [194, 466], "recursivescriptmodul": 415, "redpajama": [475, 552], "reduc": [16, 195, 439, 474, 475, 476, 477, 480, 481, 488, 489, 495, 521, 525, 538, 539, 544, 545, 546, 547, 548, 549, 552, 554], "reduce_rang": [2, 28, 29, 195, 497], "reducemax": 24, "reducemin": 24, "reduceminmaxoper": 24, "reduceoper": 24, "reduct": [544, 547], "redund": [76, 78, 347, 349, 448], "ref": [3, 133, 385], "refer": [29, 135, 149, 169, 177, 178, 179, 187, 188, 192, 195, 209, 227, 228, 234, 262, 281, 413, 420, 439, 472, 474, 475, 476, 478, 479, 480, 481, 489, 492, 494, 495, 496, 520, 523, 525, 526, 528, 532, 534, 535, 537, 538, 539, 542, 547, 550, 553, 554], "reference_corpu": 228, "refin": [443, 481, 532, 544], "reflect": [537, 553], "reg": [170, 181, 182, 187, 195], "reg_term": 189, "regard": [490, 548], "regardless": 490, "region": [475, 552], "regist": [3, 21, 138, 140, 146, 152, 163, 165, 169, 170, 175, 176, 180, 183, 188, 189, 190, 191, 211, 218, 225, 234, 245, 274, 280, 299, 391, 396, 397, 398, 413, 439, 443, 444, 448, 495, 522, 526, 537, 538, 542, 552, 554], "register_acceler": 443, "register_algo": [391, 448, 522], "register_autotun": 413, "register_config": [152, 522], "register_criterion": [169, 191], "register_customer_metr": 234, "register_pack_func": 444, "register_packag": [140, 396], "register_pattern": 175, "register_prun": [180, 188], "register_reg": 189, "register_schedul": 190, "register_supported_configs_for_fwk": 152, "register_weight_hook": 398, "registr": [146, 211, 218, 225], "registri": [152, 169, 175, 180, 188, 189, 190, 191, 443], "registry_criterion": 163, "regress": [209, 491], "regul": [181, 182, 187], "regular": [189, 227], "regulariz": 189, "reinstal": 529, "reject": 490, "rel": [195, 245, 538, 554, 555], "relat": [174, 189, 230, 280, 407, 410, 414, 424, 447, 455, 497, 531, 544, 549], "relationship": 195, "relative_loss": 153, "releas": [471, 476, 494, 531, 534, 556], "relev": [266, 496, 497, 522, 544], "reli": [478, 538, 552], "religion": 490, "reload": 160, "relu": [59, 63, 94, 331, 335, 359, 492, 530, 552], "relu6": [59, 331, 530], "remain": [431, 441], "remaind": 521, "remap": [55, 140, 327, 396], "remov": [5, 30, 36, 42, 45, 59, 65, 68, 69, 70, 81, 83, 125, 171, 184, 232, 283, 308, 314, 317, 331, 337, 340, 341, 342, 352, 354, 448, 490, 538, 544], "removableactivationoper": 5, "remove_init_from_model_input": 30, "remove_training_nod": [61, 333], "removetrainingnodesoptim": [65, 337], "renam": [66, 338, 478, 550], "rename_batch_norm": [61, 333], "renamebatchnormoptim": [66, 338], "repeat": 554, "repercuss": 490, "replac": [3, 141, 145, 232, 398, 406, 413, 427, 433, 471, 473, 476, 489, 522, 526, 538, 541, 550], "replace_forward": 433, "replace_pattern": 406, "replacement_fn": 427, "replic": 554, "replica": 554, "repo": [209, 469, 491, 527, 556], "repo_id": [141, 398, 448], "repo_typ": [141, 398, 448], "report": [490, 491, 494, 551], "repositori": 491, "repr": 452, "repres": [30, 152, 153, 156, 157, 175, 179, 180, 188, 189, 192, 211, 216, 230, 232, 234, 406, 426, 476, 477, 481, 488, 490, 497, 521, 528, 544, 546, 547, 554], "represent": [30, 138, 397, 473, 477, 488, 490, 492, 521, 525, 541, 544, 546], "reproduc": 418, "requant": [77, 79, 348, 350], "requantize_cfg": 145, "request": [476, 494, 546], "requir": [145, 195, 201, 243, 261, 390, 433, 472, 474, 476, 477, 481, 488, 489, 492, 495, 496, 520, 521, 523, 525, 526, 529, 530, 538, 539, 540, 544, 546, 549, 550, 551, 552, 554], "requirements_pt": [529, 534], "requirements_tf": 534, "rerang": [132, 384], "rerange_quant": [132, 384], "rerange_quantized_concat": [130, 382], "rerangequantizedconcat": [132, 384], "rerewrit": [36, 308], "rerun": 489, "resblock": 195, "rescal": [221, 225, 553], "rescalekeraspretraintransform": 225, "rescaletftransform": 225, "rescaletransform": 225, "research": [478, 494, 535, 552], "reserv": [198, 199], "reset": 537, "reset_none_to_default": 192, "reshap": [46, 52, 53, 58, 318, 324, 325, 330, 413, 488, 552], "reshape_in0_ndef": [52, 53, 324, 325], "reshape_in1_ndef": [52, 53, 324, 325], "reshape_in_channel_to_last": 413, "reshape_scale_as_input": 413, "reshape_scale_as_weight": 413, "reshuffl": 523, "resid": [140, 396], "resiz": [16, 209, 210, 221, 225, 553], "resize_method": 221, "resize_shap": 216, "resize_sid": [221, 553], "resizecropimagenet": [526, 553], "resizemxnettransform": 225, "resizeoper": 25, "resizepytorchtransform": 225, "resizetftransform": 225, "resizetransform": 225, "resizewithaspectratio": 221, "resizewithratio": [225, 553], "resnest50": 555, "resnet": [526, 555], "resnet101": 555, "resnet18": [472, 494, 539, 555], "resnet34": 492, "resnet50": [195, 479, 494, 526, 528, 539, 544, 546, 551, 555], "resnet50_fp32_pretrained_model": 526, "resnet50_v1": [526, 538], "resnetv2": 555, "resnext101_32x8d": 555, "resolut": 521, "resolv": [492, 493], "resort": 538, "resourc": 554, "respect": [488, 490, 530, 544, 552], "respons": [249, 250, 253, 479, 546], "rest": [145, 433], "restor": [477, 549], "restrict": [55, 140, 195, 327, 396, 544, 551], "restructuredtext": 281, "result": [30, 133, 135, 145, 221, 225, 227, 230, 231, 232, 234, 262, 269, 385, 406, 408, 417, 449, 452, 465, 466, 470, 471, 472, 475, 477, 480, 482, 483, 488, 490, 494, 523, 526, 533, 535, 537, 538, 542, 544, 545, 546, 549, 551, 552, 553, 554, 555], "resum": [195, 198, 199, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274], "resume_from": [161, 195, 466], "retain": 521, "retrac": 548, "retrain": [187, 476, 525], "retrain_fre": [169, 183, 544], "retrainfreecriterion": 169, "retrainfreeprun": [187, 188], "retri": 413, "retriev": [152, 159, 207, 387, 406, 413, 544], "return": [1, 3, 31, 39, 52, 53, 55, 59, 90, 101, 133, 134, 135, 140, 144, 145, 146, 151, 152, 154, 160, 161, 163, 165, 170, 173, 175, 176, 180, 183, 188, 189, 190, 192, 194, 198, 199, 211, 218, 221, 225, 227, 228, 230, 231, 232, 234, 235, 239, 243, 245, 257, 261, 262, 266, 274, 278, 280, 281, 299, 301, 303, 305, 311, 324, 325, 327, 331, 385, 390, 391, 396, 398, 404, 406, 408, 409, 411, 412, 413, 415, 417, 418, 420, 425, 427, 431, 433, 435, 437, 438, 439, 441, 442, 446, 448, 449, 452, 455, 459, 465, 466, 474, 478, 479, 482, 488, 492, 496, 497, 523, 526, 537, 540, 542, 544, 546, 549, 552, 554], "return_int": [433, 549], "return_tensor": 489, "reus": 478, "revers": [133, 154], "reversed_numa_info": 154, "revert": [272, 280, 554], "reverted_data_typ": 280, "review": [490, 491, 494, 545], "revis": [141, 398, 431, 441, 448], "rewrit": [35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 91, 92, 93, 94, 170, 307, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 406], "rgb": 221, "right": [140, 221, 230, 396, 488, 490, 491, 492, 552, 553], "rmax": [30, 481, 488, 546], "rmin": [30, 473, 481, 488, 541, 546], "rmse": [234, 537, 554], "rnn": 84, "rnn_convert": 80, "rnn_detail": 84, "roberta": [209, 555], "roc": 234, "role": [236, 390, 477, 488, 549, 552], "rollov": [200, 202, 204, 208, 387, 523], "romanian": 544, "root": [173, 209, 210, 211, 214, 234, 526, 537, 538], "root_linear": 174, "root_rank": 526, "roughli": [477, 488, 549], "rouhani": [473, 541], "round": [31, 128, 150, 380, 404, 439, 471, 472, 477, 478, 488, 494, 545, 546, 549, 552], "round_": [488, 552], "round_method": 439, "round_multipli": 171, "roundingmod": 404, "row": [466, 488, 552], "rowi": 31, "rte": [209, 537, 555], "rtn": [31, 145, 303, 392, 428, 437, 438, 439, 478, 484, 488, 489, 522, 547, 549], "rtn_algo_entri": 522, "rtn_arg": [477, 547, 549], "rtn_entri": 437, "rtn_g32asym": 549, "rtn_quantiz": 31, "rtnconfig": [437, 438, 439, 448, 452, 477, 478, 482, 489, 531], "rtnquantiz": 430, "rule": [481, 544], "run": [1, 125, 133, 140, 151, 180, 192, 195, 198, 199, 208, 224, 235, 262, 266, 283, 385, 396, 413, 449, 472, 474, 477, 478, 481, 483, 484, 488, 494, 495, 520, 526, 529, 530, 538, 545, 546, 549, 550, 551, 554], "run_arg": [438, 442, 478], "run_cmd": 554, "run_fn": [412, 413, 438, 442, 475, 476, 477, 478], "run_forward": 1, "run_generation_gpu_woq": 489, "run_inst": 151, "run_multi_instance_command": 154, "runtim": [471, 488, 491, 494, 495, 496, 498, 523, 527, 530, 533, 539, 541, 549, 554], "s1": 554, "s2": 554, "s3": 554, "s4": 554, "s5": 554, "s6": 554, "s7": 554, "s8": [292, 293, 294, 297, 298, 459], "sa_optim": 167, "sacrif": [473, 541, 545], "safe": [431, 491], "safe_seri": 431, "safetensor": 494, "salient": [31, 477, 544, 549], "same": [68, 126, 133, 153, 187, 195, 209, 225, 230, 286, 340, 385, 413, 466, 472, 473, 477, 482, 483, 488, 495, 497, 520, 523, 526, 530, 538, 541, 544, 546, 549, 552, 553, 554], "sampl": [31, 62, 145, 153, 195, 203, 207, 209, 210, 211, 221, 225, 234, 266, 334, 387, 413, 418, 477, 481, 482, 483, 488, 523, 527, 533, 546, 549, 552, 553, 554, 555], "sampler": [153, 200, 202, 204, 277, 387, 418, 439, 477, 482, 523, 526], "sampling_s": 538, "samsum": 555, "sapphir": [534, 536], "satisfi": [208, 534], "satur": 553, "save": [90, 133, 139, 140, 160, 170, 195, 211, 243, 262, 385, 390, 396, 398, 408, 411, 415, 431, 442, 449, 453, 458, 459, 462, 466, 472, 478, 479, 488, 489, 492, 494, 495, 532, 538, 539, 540, 543, 546, 547, 549, 552], "save_config_map": 160, "save_dir": 489, "save_for_huggingface_upstream": [462, 538], "save_load": [158, 407, 410, 414, 428], "save_path": [458, 459, 495, 540], "save_pretrain": 489, "save_protobuf": 90, "save_q_input": 413, "save_to_disk": 495, "saved_dir": [462, 489, 549], "saved_model": [133, 385, 481, 547], "saved_model_sess": [243, 390], "saved_model_tag": [243, 390], "saved_path": 398, "saved_result": [408, 415, 431, 441, 477, 478, 540, 549], "savedmodel": [235, 243, 262, 390], "scalabl": [474, 481, 488, 494, 534, 536, 539, 545, 546, 548], "scalar": [198, 199, 235, 262, 449, 537, 546, 554], "scale": [3, 30, 31, 85, 126, 128, 142, 145, 149, 150, 221, 225, 286, 292, 293, 294, 297, 298, 355, 380, 413, 422, 425, 426, 429, 433, 471, 472, 473, 475, 476, 477, 481, 488, 541, 546, 549, 551, 552, 553, 554], "scale_bit": 404, "scale_c": [128, 150, 380], "scale_dtyp": [418, 429, 439, 452, 462, 477, 549], "scale_format": 439, "scale_info": 466, "scale_method": [439, 472], "scale_nam": 30, "scale_param": 439, "scale_propag": [80, 351], "scale_quant_group_s": [439, 477], "scale_shar": [413, 439], "scale_valu": 30, "scalepropagationtransform": [85, 355], "scaler": 285, "scales_per_op": [126, 286, 303], "scan": [491, 494], "scenario": [477, 478, 488, 495, 520, 538, 544, 549], "scene": 544, "schedul": [170, 180, 181, 182, 187, 189, 195, 477, 538, 543], "schema": [89, 433], "scheme": [29, 30, 31, 142, 145, 413, 417, 429, 433, 495, 496, 497, 523, 530, 549, 554], "scienc": 534, "scipi": 266, "scope": [133, 385, 478, 491, 496, 538], "score": [169, 175, 180, 184, 191, 195, 227, 228, 230, 231, 232, 234, 269, 455, 492, 537, 542, 544, 551, 554], "script": [154, 173, 227, 228, 231, 232, 489, 491, 494, 526, 533, 545], "seamless": 489, "seamlessli": [473, 541, 544, 554], "search": [133, 173, 187, 195, 198, 199, 227, 266, 406, 420, 433, 466, 470, 477, 478, 480, 494, 522, 533, 537, 544, 545, 549, 552, 554], "search_algorithm": 195, "search_clip": 433, "search_pattern": 406, "search_spac": 195, "searcher": [173, 195], "searching_result": 173, "sec": [483, 555], "second": [49, 140, 174, 195, 209, 267, 321, 396, 466, 477, 484, 488, 489, 538, 549, 552, 554], "section": [281, 497, 522, 525, 544, 549], "secur": [138, 397, 494, 545, 556], "sed": 550, "see": [138, 225, 227, 397, 472, 477, 488, 490, 491, 492, 493, 522, 535, 538, 549, 552, 554], "seed": [133, 161, 195, 391, 418, 439, 466, 477, 538], "seek": [140, 396, 549], "seem": 522, "seen": [466, 495], "segment": [209, 228, 230, 491], "segment_id": [225, 538], "select": [189, 192, 195, 209, 433, 437, 443, 462, 472, 477, 488, 492, 521, 533, 534, 544, 546, 549, 554], "self": [48, 145, 173, 195, 245, 320, 433, 478, 495, 496, 523, 533, 537, 551, 554], "selfknowledg": 163, "selfknowledgedistillationloss": 163, "selfknowledgedistillationlossconfig": [195, 538], "selfmhasearch": 173, "semant": [495, 530], "send": [491, 496], "senior": 521, "sensit": [169, 533, 544], "sensitivitybalanc": 555, "sentenc": 522, "sep": [494, 545], "separ": [298, 418, 490, 522, 532, 534, 535, 543], "separable_conv2d": 295, "separableconv2d": 298, "seq_len": 452, "seq_length": 209, "seqlen": [418, 439, 477], "seqtyp": 90, "sequenc": [48, 90, 174, 177, 178, 195, 209, 224, 225, 228, 232, 320, 418, 477, 488, 495, 530, 544, 549, 553], "sequencediagram": [496, 497], "sequenti": [153, 207, 263, 265, 387, 391, 420, 482, 554], "sequentialsampl": [153, 207, 387], "seri": [221, 494, 534, 538, 545, 553], "serial": [138, 140, 396, 397, 431], "serv": [195, 243, 390, 488], "server": [161, 484], "servic": [494, 545], "sess": [133, 243, 385, 390], "session": [31, 133, 208, 243, 385, 390, 538], "sessionopt": 257, "set": [1, 30, 36, 81, 90, 100, 133, 140, 145, 151, 152, 153, 154, 161, 165, 192, 195, 198, 199, 200, 209, 211, 214, 225, 230, 234, 235, 243, 261, 262, 281, 288, 302, 308, 352, 385, 387, 389, 390, 396, 406, 409, 413, 431, 433, 438, 439, 441, 448, 449, 466, 472, 474, 475, 477, 478, 479, 480, 482, 483, 484, 488, 489, 490, 494, 496, 520, 523, 526, 528, 529, 530, 532, 533, 537, 538, 539, 544, 546, 549, 551, 552, 554], "set_all_env_var": 151, "set_cores_for_inst": 154, "set_eager_execut": 261, "set_env_var": 151, "set_epoch": 526, "set_loc": [471, 475, 476, 477, 478, 479], "set_modul": [145, 413, 433, 448], "set_nam": 90, "set_random_se": [161, 195, 466], "set_resume_from": [161, 195, 466], "set_tensor": 495, "set_tensorboard": [161, 195, 466], "set_workspac": [161, 195, 466], "settings_recommend": 522, "setup": [494, 530, 534, 544], "sever": [124, 190, 225, 379, 473, 474, 484, 488, 525, 533, 538, 539, 541, 544, 547, 552, 553, 554], "sex": [466, 490], "sexual": 490, "sf": 529, "sgd": [165, 195, 538], "shaji": 477, "shape": [30, 31, 90, 133, 175, 212, 213, 221, 225, 230, 385, 387, 420, 426, 435, 477, 481, 488, 530, 538, 549, 552, 553], "shape_overrid": 127, "shard": [141, 398, 431], "share": [30, 31, 67, 83, 94, 126, 286, 339, 354, 359, 433, 477, 488, 529, 530, 549, 552, 554], "share_qdq_y_pattern": [91, 356], "shared_criterion": [413, 439, 552], "shareqdqforitexypatternoptim": [94, 359], "shell": [154, 494], "shen": [535, 544], "shift": [128, 150, 221, 380, 553], "shop": 522, "short": 554, "shortcut": 209, "shorter": [209, 225, 553], "shot": [169, 187, 190, 477, 481, 533, 538, 544, 545, 549], "should": [40, 100, 101, 140, 162, 171, 173, 184, 195, 198, 199, 207, 209, 211, 225, 228, 234, 235, 262, 281, 301, 305, 312, 396, 413, 427, 431, 441, 449, 459, 466, 476, 478, 479, 480, 481, 483, 488, 489, 492, 495, 496, 497, 526, 537, 538, 542, 544, 546, 549, 553, 554], "show": [180, 466, 473, 488, 490, 526, 538, 541, 552, 554], "show_memory_info": 466, "show_nam": [131, 383], "show_op": [131, 383], "shown": [472, 473, 475, 488, 523, 537, 538, 541, 542, 543, 544, 547, 552, 554], "shrink": 544, "shuffl": [200, 202, 204, 387, 523, 538, 546], "shufflenet": 555, "side": [225, 494, 553], "sigmoid": 528, "sign": [280, 439, 477, 488, 491, 494, 497, 521, 545, 546, 549, 551], "signatur": [243, 390], "signed_flag": 280, "signifi": 90, "signific": [481, 488, 544, 545, 547, 552], "significantli": [474, 477, 538, 539, 544, 554], "signround": 477, "sigopt": [197, 545], "sigopt_api_token": [538, 551, 554], "sigopt_experiment_id": 551, "sigopt_experiment_nam": [538, 551, 554], "sigopt_project_id": [538, 551, 554], "sigopttunestrategi": 198, "silicon": [473, 541], "similar": [184, 488, 538, 552, 554], "similarli": [488, 552], "simpl": [156, 225, 472, 481, 488, 538, 544, 551, 552, 554], "simple_attr": 156, "simple_infer": [145, 417], "simple_progress_bar": 30, "simplest": 521, "simpli": [526, 540], "simplic": [488, 552], "simplifi": [544, 545], "simul": [168, 478, 497], "simultan": [184, 543], "sinc": [195, 227, 472, 477, 488, 528, 537, 543, 549], "since_vers": 89, "singl": [169, 190, 195, 203, 209, 225, 230, 234, 262, 305, 387, 406, 413, 477, 492, 494, 534, 544, 547, 552], "single_output": 234, "singleton": [95, 161, 360, 391, 466], "site": 550, "situat": [195, 478, 526, 544], "sixteen": [474, 539], "size": [83, 153, 177, 178, 202, 203, 208, 211, 217, 221, 225, 243, 245, 354, 387, 390, 418, 426, 431, 433, 466, 473, 476, 477, 488, 490, 496, 523, 526, 529, 538, 541, 542, 544, 546, 547, 549, 552, 553, 555], "skip": [140, 396, 489, 554], "skip_convers": 88, "skip_first": 277, "skip_special_token": 489, "skip_verified_config": 153, "skylak": 534, "slave": [280, 554], "slice": 90, "slim": [171, 172, 173, 230, 241, 243, 390, 391, 540, 544], "slim_sess": [243, 390], "slower": [477, 549], "small": [3, 52, 53, 54, 55, 269, 324, 325, 326, 327, 477, 488, 544, 549, 554, 555], "smaller": [55, 225, 327, 473, 538, 541, 544, 553], "smbo": 554, "smooth": [3, 125, 126, 133, 149, 195, 283, 284, 285, 286, 301, 303, 412, 413, 437, 439, 478, 494, 522, 531, 544, 554], "smooth_distribut": 3, "smooth_quant": [4, 139, 145, 148, 195, 394, 417, 522, 546, 552], "smooth_quant_arg": [195, 546, 552], "smooth_quant_calibr": 96, "smooth_quant_config": 301, "smooth_quant_en": [145, 417], "smooth_quant_entri": [301, 437, 522], "smooth_quant_scal": 96, "smoother": 282, "smoothquant": [29, 145, 149, 284, 410, 412, 413, 439, 470, 475, 477, 478, 480, 488, 536, 545, 549], "smoothquant_scale_info": [145, 413], "smoothquantcalibr": [125, 283], "smoothquantcalibrationllm": [125, 283], "smoothquantconfig": [284, 301, 303, 437, 439, 475, 480, 481], "smoothquantquant": 412, "smoothquantsampl": 277, "smoothquantscal": [126, 286], "smoothquantscalerllm": [126, 286], "smt": 227, "snapshot": 465, "snip": [169, 195, 533, 544, 555], "snip_momentum": [169, 195, 538, 544], "snip_momentum_progress": 195, "snip_progress": 195, "snipcriterion": 169, "snipmomentumcriterion": 169, "snippet": [522, 533], "so": [31, 90, 153, 225, 280, 442, 470, 472, 473, 477, 478, 481, 488, 489, 492, 494, 495, 529, 533, 539, 541, 546, 548, 549, 552, 553], "social": [490, 545], "socio": 490, "socket": [151, 154, 466, 555], "soft": 195, "softwar": [494, 535, 544, 545, 550], "solut": [128, 150, 380, 475, 477, 480, 488, 494, 495, 496, 529, 545, 549, 550, 552, 554], "solv": [488, 491, 546, 550], "some": [171, 173, 195, 209, 477, 478, 488, 494, 496, 497, 522, 528, 531, 534, 537, 538, 542, 544, 546, 548, 549, 552, 554], "someth": [198, 199, 211, 262, 449], "sometim": [90, 488, 492, 546], "somewhat": 496, "soon": 536, "sort": [195, 271, 477, 549, 554], "sota": 545, "sound": 534, "sourc": [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 81, 82, 83, 84, 85, 87, 88, 89, 90, 92, 93, 94, 95, 98, 100, 101, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 131, 132, 133, 134, 135, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 149, 150, 151, 152, 153, 154, 156, 157, 159, 160, 161, 162, 163, 165, 166, 169, 170, 171, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 194, 195, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 221, 223, 224, 225, 226, 227, 228, 230, 231, 232, 234, 235, 236, 238, 239, 240, 241, 242, 243, 244, 245, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 277, 278, 279, 280, 281, 283, 284, 286, 288, 289, 292, 293, 294, 297, 298, 299, 301, 302, 303, 305, 306, 307, 308, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 345, 346, 347, 348, 349, 350, 352, 353, 354, 355, 357, 358, 359, 360, 363, 364, 365, 366, 367, 368, 369, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 383, 384, 385, 387, 389, 390, 391, 392, 396, 397, 398, 399, 401, 403, 404, 405, 406, 408, 409, 411, 412, 413, 415, 416, 417, 418, 419, 420, 421, 422, 423, 425, 426, 427, 429, 430, 431, 432, 433, 435, 437, 438, 439, 441, 442, 443, 444, 445, 446, 448, 449, 452, 453, 455, 457, 458, 459, 461, 462, 463, 464, 465, 466, 467, 468, 492, 494, 535, 538, 545], "space": [151, 181, 182, 187, 195, 198, 199, 232, 235, 262, 266, 271, 278, 478, 482, 488, 490, 491, 495, 496, 497, 522, 544, 546, 552], "spacetobatchnd": [43, 315], "spanbert": 555, "spars": [171, 185, 545], "sparse_dummy_v2": 213, "sparse_gpt": 544, "sparse_ratio": 213, "sparsecategoricalcrossentropyloss": 163, "sparsedummydataset": 213, "sparsegpt": [188, 544], "sparsegptprun": 188, "sparsiti": [175, 177, 178, 180, 181, 182, 185, 187, 190, 192, 195, 466, 470, 477, 494, 533, 538, 555], "sparsity_decay_typ": [195, 538, 544], "speak": [477, 488, 549], "special": [101, 140, 173, 245, 396, 478, 492, 523, 537, 538, 542, 544, 549], "specif": [29, 89, 101, 151, 152, 154, 165, 192, 195, 209, 210, 211, 212, 216, 218, 234, 262, 387, 389, 409, 413, 431, 448, 473, 474, 476, 477, 478, 481, 484, 490, 495, 496, 521, 522, 523, 530, 532, 536, 537, 538, 539, 540, 541, 542, 544, 549, 554], "specifi": [95, 140, 145, 151, 156, 192, 195, 198, 199, 225, 230, 234, 235, 262, 281, 360, 396, 404, 408, 413, 417, 418, 439, 441, 448, 449, 452, 465, 466, 478, 484, 488, 489, 496, 497, 526, 530, 537, 538, 542, 544, 549, 553, 554], "speed": [472, 488, 521, 538, 544, 545, 546, 554], "speedup": [488, 527, 533, 546], "spellcheck": 491, "spent": 554, "spevif": 433, "spiq": [29, 149, 413, 488, 552], "split": [16, 30, 67, 224, 225, 339, 418, 488, 492, 547, 549, 552, 553], "split_shared_bia": 30, "split_shared_input": [61, 333], "splitoper": 26, "splitsharedinputoptim": [67, 339], "spot": [475, 552], "spr": [32, 289], "sprase": 171, "sq": [284, 412, 481, 536, 552], "sq_config": 480, "sq_weight_tensor": 133, "sq_weights_nod": 133, "sqlalchemi": 529, "sqlinearwrapp": 413, "sqrt": [54, 326], "squad": [209, 225, 231, 232, 234, 537, 544, 553, 555], "squadexampl": 225, "squadf1": [234, 537], "squadv1": [225, 553], "squar": [234, 271, 472, 477, 537, 554], "squeez": [63, 335, 492], "squeezenet": 555, "squeezer": 174, "squential": 207, "src": 90, "ssd": [492, 528, 555], "ssd_mobilenet_v1": 538, "ssh": 526, "sst": [544, 555], "st": [209, 537], "stabil": [477, 549], "stabilityai": [475, 552], "stabl": [494, 544, 545], "stablelm": [475, 552], "stack": [420, 494], "stage": [152, 161, 263, 265, 272, 544, 554], "stai": [475, 477, 478, 489, 549, 552], "stand": [544, 554], "stand_norm": [212, 213, 387], "standard": [211, 225, 491, 496, 522, 528, 537, 553, 554], "star": 491, "start": [154, 195, 225, 230, 267, 470, 534, 545, 553, 554, 556], "start_epoch": 538, "start_posit": 225, "start_step": [180, 195, 538, 544], "stat": [154, 448], "state": [135, 174, 477, 484, 497, 544], "state_dict": [135, 398, 477, 526, 549], "statement": 522, "static": [28, 29, 32, 149, 173, 195, 262, 278, 287, 288, 289, 299, 301, 303, 413, 416, 417, 437, 439, 442, 459, 478, 494, 495, 496, 497, 528, 531, 533, 538, 547, 552, 554, 555], "static_config": 480, "static_graph": 173, "static_group": [439, 452, 477, 549], "static_qu": [282, 394, 479, 522], "static_quant_entri": [301, 437], "static_quant_export": 459, "staticmethod": [124, 379], "staticqu": [414, 416], "staticquantconfig": [288, 289, 299, 303, 391, 437, 439, 476, 479, 480, 481, 482], "staticquantquant": 416, "statist": [161, 466, 467, 472], "statu": [173, 478, 483, 490, 523, 541, 542, 544], "std": [221, 225, 538, 553], "std_valu": 221, "stderr": [391, 466], "step": [169, 179, 180, 181, 182, 186, 187, 190, 195, 449, 471, 475, 476, 477, 488, 489, 496, 497, 525, 526, 538, 543, 544, 548, 549, 551, 552, 554], "step1": 278, "step2": 278, "step3": 278, "step4": 278, "step5": 278, "step_siz": 552, "still": [478, 480, 488, 521, 538, 545, 546, 548], "stock": [32, 289, 552], "stop": [153, 195, 470, 482, 538, 554], "stopgradi": [65, 337], "storag": [140, 396, 477, 488, 537, 549, 552], "store": [125, 169, 170, 173, 180, 181, 182, 183, 184, 185, 186, 187, 189, 191, 195, 211, 283, 408, 413, 532], "str": [1, 29, 30, 31, 125, 135, 140, 141, 143, 144, 145, 146, 152, 154, 156, 160, 161, 163, 165, 170, 173, 194, 195, 209, 210, 211, 218, 221, 225, 227, 228, 230, 232, 234, 251, 256, 258, 260, 261, 277, 280, 281, 283, 299, 302, 303, 305, 391, 396, 398, 399, 404, 406, 408, 409, 413, 415, 417, 418, 425, 427, 431, 433, 435, 437, 439, 441, 442, 443, 446, 448, 452, 455, 458, 459, 462, 466, 467, 477, 478, 481, 522, 523, 537, 553], "str2arrai": 466, "str_label": 537, "straightforward": [477, 481, 488, 544, 549, 552], "strategi": [1, 145, 187, 195, 196, 211, 226, 470, 482, 488, 494, 495, 496, 497, 501, 521, 522, 523, 526, 533, 538, 545, 546, 548, 549, 550], "strategy_kwarg": [195, 551, 554], "strategy_registri": [274, 554], "stream": [391, 466], "streamlin": [534, 545], "stretch": 472, "strftime": 195, "strict": 452, "stride": [90, 225, 292, 294, 297, 298, 553], "string": [30, 101, 133, 138, 140, 151, 161, 163, 165, 173, 175, 180, 188, 189, 190, 192, 195, 211, 227, 230, 239, 243, 385, 390, 391, 396, 397, 420, 448, 452, 455, 466, 472, 481, 537, 553], "strip": [52, 53, 55, 68, 69, 133, 324, 325, 327, 340, 341, 385], "strip_equivalent_nod": [61, 133, 333, 385], "strip_unused_lib": [133, 385], "strip_unused_nod": [61, 133, 333, 385], "stripequivalentnodesoptim": [68, 340], "stripunusednodesoptim": [69, 341], "stroke": 554, "structur": [174, 185, 186, 195, 279, 420, 466, 470, 478, 488, 533, 544, 545, 546, 555], "stsb": 555, "student": [162, 166, 195, 209, 525, 555], "student1_layer_name1": 195, "student1_layer_name2": 195, "student2_layer_name1": 195, "student2_layer_name2": 195, "student_layer_nam": 195, "student_layer_output_process": 195, "student_loss": 525, "student_model": [163, 195, 538], "student_output": 525, "style": [203, 216, 281, 522, 532], "style_fold": 216, "style_transfer_dataset": 215, "styletransferdataset": 216, "sub": [51, 145, 151, 173, 323, 417, 522, 554], "sub_class": [234, 262], "sub_modul": 522, "subclass": [3, 21, 146, 165, 175, 180, 188, 190, 204, 208, 211, 218, 225, 234, 245, 274, 413, 495], "subfold": 211, "subgraph": [40, 51, 90, 144, 312, 323, 547, 554], "subgraphstyl": 554, "subject": 535, "submit": [491, 545], "subpixel": 221, "subprocess": [154, 522, 523], "subsect": [488, 552], "subsequ": [184, 496], "subset": [211, 214], "subsidiari": 535, "substanti": 547, "substitut": [301, 305, 481], "success": 534, "successfulli": [492, 494, 526, 545], "successor": [76, 77, 78, 79, 347, 348, 349, 350], "sudo": 529, "suffici": 551, "suffix": [133, 385], "suggest": [478, 492, 522, 551], "suit": 534, "suitabl": 478, "sum": [195, 234], "summar": [131, 383, 522], "summari": [151, 154, 449, 555], "summary_benchmark": 151, "summary_latency_throughput": 154, "super": 232, "supplement": 481, "suppli": 1, "support": [1, 29, 30, 31, 82, 89, 149, 152, 163, 165, 170, 176, 183, 187, 195, 198, 199, 208, 209, 211, 218, 221, 225, 226, 234, 235, 239, 245, 262, 272, 278, 281, 353, 413, 437, 448, 449, 455, 461, 462, 466, 468, 470, 471, 476, 479, 482, 488, 494, 496, 522, 531, 536, 538, 545, 548, 551, 554], "supported_lay": [145, 420, 433], "supported_op_typ": [48, 320], "suppos": [488, 552], "suppress": [477, 488, 549, 552], "sure": [175, 180, 188, 190, 494, 520], "surg": [140, 396], "surrog": 554, "suyu": 535, "sweet": [475, 552], "switch": [70, 342], "switch_optim": [61, 333], "switchoptim": [70, 342], "sy": [391, 452, 466, 522, 555], "sym": [29, 30, 31, 409, 413, 433, 452, 496, 497, 530, 546, 549], "sym_full_rang": 549, "sym_model": 1, "symbol": [1, 30, 144, 145, 227, 235, 262, 492, 540], "symbol_r": 227, "symbolblock": 1, "symbolic_trac": 136, "symmetr": [98, 409, 413, 462, 481, 497, 546, 549, 553, 554], "symnet": 1, "synchron": [446, 554], "syntax": 530, "sys_nic": 494, "system": [135, 140, 151, 154, 396, 466, 473, 474, 484, 489, 520, 541, 544, 555], "systemat": [475, 480, 552], "szymon": 521, "t": [30, 52, 53, 55, 128, 140, 150, 175, 195, 225, 232, 324, 325, 327, 380, 396, 474, 475, 476, 481, 483, 492, 495, 522, 526, 538, 539, 544, 546, 549, 554], "t10k": 211, "t5": 544, "t5norm": 552, "tab": 232, "tabl": [173, 184, 466, 473, 494, 497, 528, 531, 541, 555], "table_entri": 466, "taco": [494, 545], "tag": [140, 243, 390, 396, 488, 552], "tail": 477, "tailor": [477, 484], "take": [3, 175, 195, 198, 199, 203, 225, 234, 235, 262, 266, 387, 449, 474, 477, 481, 484, 490, 492, 494, 523, 530, 538, 544, 546, 548, 549, 553, 554], "taken": [198, 199, 235, 262, 449, 546], "tamper": [140, 396], "tar": 211, "target": [1, 29, 30, 87, 145, 173, 190, 192, 195, 266, 288, 406, 420, 462, 472, 478, 526, 538, 544, 553, 554], "target_boxes_num": 537, "target_depth": [145, 417], "target_dtyp": [277, 406], "target_height": [225, 553], "target_lay": 173, "target_linear": 174, "target_op_lut": 173, "target_spars": [175, 195, 538, 544], "target_sparsity_ratio": 180, "target_width": [225, 553], "targetspac": 266, "task": [128, 209, 216, 234, 380, 472, 475, 477, 484, 488, 489, 525, 528, 533, 537, 538, 544, 547, 549, 552], "taskdataset": 555, "taskset": [484, 489], "tbb": 529, "td": 554, "teacher": [162, 195, 525, 538, 555], "teacher_layer_nam": 195, "teacher_layer_name1": 195, "teacher_layer_name2": 195, "teacher_layer_output_process": 195, "teacher_logit": 538, "teacher_model": [163, 195, 525, 538], "team": [159, 232, 490, 548], "technic": 494, "techniqu": [226, 468, 470, 476, 481, 482, 488, 494, 531, 533, 538, 543, 544, 545, 546, 555], "technologi": [211, 473, 536, 538, 541], "tell": [140, 234, 262, 396], "temp_path": [125, 283], "temperatur": [163, 195, 489, 538], "templat": [188, 538, 544], "template_config": 192, "temporari": [125, 145, 283, 413, 466, 490], "temporarili": 490, "ten": 484, "tencent": [494, 545], "tend": 544, "tensor": [1, 2, 3, 30, 31, 55, 90, 125, 133, 135, 140, 141, 145, 169, 170, 179, 180, 181, 182, 183, 184, 185, 186, 187, 189, 191, 195, 225, 243, 271, 283, 327, 385, 390, 391, 396, 398, 404, 406, 408, 411, 412, 413, 417, 422, 423, 425, 426, 433, 438, 442, 457, 459, 462, 465, 466, 471, 472, 473, 478, 481, 492, 497, 522, 523, 541, 544, 546, 550, 553, 554], "tensor2tensor": 228, "tensor_data": [74, 346, 391, 466, 467], "tensor_dict": 495, "tensor_dtyp": 90, "tensor_nam": [30, 133, 140, 141, 385, 396, 398], "tensor_to_nod": 1, "tensor_v": 133, "tensor_valu": 30, "tensorboard": [161, 195, 455, 466, 495, 538], "tensorcollector": 1, "tensordataset": 209, "tensorflow": [37, 39, 60, 61, 62, 72, 80, 86, 87, 88, 89, 90, 91, 95, 96, 97, 99, 102, 115, 125, 126, 127, 130, 133, 163, 165, 191, 195, 208, 209, 210, 211, 214, 216, 218, 225, 226, 228, 230, 234, 235, 243, 256, 257, 260, 261, 262, 272, 458, 459, 466, 480, 491, 495, 496, 502, 521, 522, 523, 525, 527, 530, 531, 533, 538, 539, 540, 541, 544, 545, 551, 552, 554], "tensorflow1": 208, "tensorflow_addon": 165, "tensorflow_dataload": 260, "tensorflow_itex": [211, 214, 216, 218, 523], "tensorflow_itexadaptor": [32, 289], "tensorflow_model": [237, 260], "tensorflowadam": 165, "tensorflowadamw": 165, "tensorflowadaptor": [32, 289], "tensorflowbasemodel": [243, 260, 390], "tensorflowbertdataload": 208, "tensorflowbertdataset": 209, "tensorflowcheckpointmodel": [243, 390], "tensorflowcifar10": 211, "tensorflowcifar100": 211, "tensorflowcocomap": 234, "tensorflowconfig": 289, "tensorflowconfigconvert": 289, "tensorflowcriterion": 163, "tensorflowcroptoboundingbox": 225, "tensorflowcrossentropyloss": 163, "tensorflowdataload": [208, 260], "tensorflowdataset": 211, "tensorflowfashionmnist": 211, "tensorflowfilt": 218, "tensorflowglobalconfig": 389, "tensorflowimagenetdataset": 214, "tensorflowimagenetraw": 214, "tensorflowimagerecord": 211, "tensorflowknowledgedistillationloss": 163, "tensorflowknowledgedistillationlossextern": 163, "tensorflowknowledgedistillationlosswrapp": 163, "tensorflowllmmodel": [243, 390], "tensorflowmap": 234, "tensorflowmetr": 234, "tensorflowmnist": 211, "tensorflowmodel": [243, 390], "tensorflowmodelzoobertdataload": 208, "tensorflowmodelzoobertdataset": [209, 210], "tensorflowoptim": 165, "tensorflowparserfactori": 252, "tensorflowprofilingpars": 253, "tensorflowqatmodel": 243, "tensorflowqdqtoonnxqdqconvert": 127, "tensorflowqueri": [32, 289, 495], "tensorflowrandomhorizontalflip": 225, "tensorflowrandomverticalflip": 225, "tensorflowresizecropimagenettransform": 221, "tensorflowresizewithratio": 225, "tensorflowsavedmodelmodel": [243, 390], "tensorflowsgd": 165, "tensorflowshiftrescal": 221, "tensorflowsparsecategoricalcrossentropi": 163, "tensorflowtfrecorddataset": 211, "tensorflowtopk": 234, "tensorflowtransform": 225, "tensorflowtranspos": 225, "tensorflowtransposelastchannel": 221, "tensorflowvocmap": 234, "tensorflowvocrecord": 211, "tensorflowwrapfunct": 225, "tensorproto": [30, 90], "tensorrt": [30, 521, 539, 546], "tensorrtexecutionprovid": [539, 546], "tensors_kl": 1, "tensors_minmax": 1, "teq": [428, 437, 439, 478, 489, 494, 545, 547, 549], "teq_arg": 477, "teq_quantize_entri": 437, "teqconfig": [437, 439, 452, 477, 489], "teqlinearfakequ": [142, 429], "tequant": 432, "term": [162, 181, 182, 187, 189, 478, 481, 491, 521, 535, 537, 542, 543, 554], "test": [151, 195, 225, 491, 494, 555], "test_func": 526, "text": [90, 188, 224, 225, 227, 232, 473, 475, 477, 488, 528, 535, 541, 544, 545, 549, 552, 553, 555], "tf": [90, 101, 126, 133, 173, 192, 208, 209, 210, 211, 225, 241, 243, 286, 303, 305, 361, 385, 387, 389, 390, 391, 479, 481, 494, 522, 526, 534, 540, 545, 548, 553], "tf1": [208, 540], "tf2": 540, "tf2onnx": [195, 456], "tf2onnx_convert": 96, "tf2onnx_util": 86, "tf2onnxconfig": [195, 528], "tf_criteria": 170, "tf_modul": 261, "tf_to_fp32_onnx": 458, "tf_to_int8_onnx": 458, "tfdatadataload": 208, "tfmodelzoocollecttransform": 225, "tfrecord": [209, 210, 211], "tfslimnetsfactori": [241, 391], "tfsquadv1modelzooposttransform": 225, "tfsquadv1posttransform": 225, "th": [74, 75, 346], "thalaiyasingam": 544, "than": [40, 133, 209, 225, 312, 391, 418, 443, 466, 473, 477, 488, 494, 520, 532, 541, 542, 543, 544, 546, 549, 551, 553, 554], "theblok": 494, "thei": [140, 281, 396, 474, 490, 522, 530, 538, 542], "them": [140, 173, 209, 396, 477, 478, 488, 492, 497, 523, 528, 530, 532, 534, 543, 544, 549, 554], "themselv": 546, "theoret": [477, 488, 546, 549], "therefor": [483, 488, 492, 538, 544, 546, 547, 550, 552], "thi": [29, 71, 95, 117, 124, 128, 132, 135, 138, 140, 145, 150, 152, 153, 154, 162, 166, 171, 173, 175, 177, 178, 179, 180, 184, 185, 186, 187, 188, 189, 190, 195, 198, 199, 208, 209, 210, 211, 212, 213, 214, 216, 225, 227, 230, 231, 232, 234, 235, 245, 262, 266, 271, 280, 281, 343, 360, 372, 379, 380, 384, 387, 396, 397, 413, 417, 442, 449, 465, 466, 470, 472, 474, 475, 476, 477, 479, 480, 481, 482, 488, 489, 490, 491, 492, 494, 495, 496, 497, 521, 522, 523, 525, 526, 528, 529, 530, 533, 534, 535, 536, 537, 538, 539, 540, 544, 546, 548, 549, 550, 551, 552, 553, 554], "think": [477, 549], "third": [230, 491, 535], "those": [59, 140, 271, 331, 396, 466, 476, 481, 488, 525, 532, 538, 539, 543, 544, 546, 554], "though": [488, 552], "thread": [151, 195, 257, 261, 489, 523, 529], "threaten": 490, "three": [59, 192, 209, 263, 265, 331, 474, 478, 482, 488, 492, 495, 496, 521, 534, 539, 543, 546, 548, 551, 552], "threshold": [1, 147, 453, 461, 537], "through": [135, 198, 199, 234, 235, 262, 476, 477, 478, 488, 494, 520, 530, 532, 533, 537, 542, 543, 544, 546, 549], "throughput_pattern": 483, "throw": 523, "thu": [230, 488, 538, 544, 546], "thudm": 536, "tian": 535, "tiiuae": [475, 536, 552], "tile": 522, "till": [161, 466, 554], "tim": [477, 488, 549], "time": [133, 140, 161, 195, 225, 234, 245, 266, 385, 391, 396, 466, 476, 477, 478, 480, 482, 484, 488, 489, 494, 496, 523, 526, 529, 538, 542, 544, 545, 546, 549, 551, 552, 553, 554], "time_limit": 466, "timeout": [195, 538, 554], "tinybert": 555, "tip": 484, "titl": [466, 535, 554], "tloss": 526, "tmp_file_path": [391, 466], "to_numpi": 30, "toarrai": [225, 553], "todo": [156, 209, 212, 230, 278, 387, 420, 443], "togeth": [152, 195, 225, 534, 545, 553], "togethercomput": [475, 552], "token": [209, 222, 225, 227, 228, 418, 431, 452, 462, 475, 477, 488, 489, 537, 538, 549, 551, 552, 553, 554], "token_is_max_context": 225, "token_to_orig_map": 225, "token_type_id": [209, 538], "tokenzi": 224, "toler": [153, 481], "tolerable_loss": [153, 195, 482, 538, 554], "tolist": 552, "tondarrai": 553, "tondarraytransform": 225, "tool": [3, 133, 385, 458, 466, 488, 494, 495, 538, 544, 545, 546], "toolchain": [529, 545], "toolkit": [470, 492, 534, 545], "top": [225, 234, 491, 537, 553], "top1": [449, 537, 544, 546, 555], "topilimag": 553, "topk": [234, 262, 526, 537, 538, 546], "torch": [134, 135, 136, 137, 140, 141, 142, 143, 144, 145, 149, 153, 160, 170, 173, 174, 176, 184, 195, 226, 235, 262, 459, 462, 465, 471, 472, 473, 474, 475, 476, 477, 482, 484, 488, 489, 492, 497, 498, 526, 528, 529, 531, 538, 539, 540, 541, 546, 549, 552], "torch2onnx": [195, 456], "torch2onnxconfig": [195, 528], "torch_dtyp": [494, 497], "torch_load": 137, "torch_model": 237, "torch_to_fp32_onnx": 459, "torch_to_int8_onnx": 459, "torch_util": [439, 547, 552], "torchbaseconfig": 439, "torchdynamo": 478, "torchfunctyp": 406, "torchimport": 478, "torchscript": [441, 528, 547, 552], "torchsmoothqu": [412, 413, 552], "torchvis": [225, 472, 494, 527], "toronto": 211, "torr": 544, "total": [30, 180, 225, 234, 418, 477, 544, 549, 553, 555], "total_block_arg": 145, "total_block_kwarg": 145, "total_execution_tim": 251, "total_valu": [145, 433], "totensor": [538, 553], "tow": 526, "toward": 490, "tpe": 197, "tpetunestrategi": 199, "tqdm": [30, 538], "trace": [135, 144, 145, 173, 195, 269, 408, 412, 413, 417, 438, 442, 459, 476, 478, 492, 554], "trace_and_fuse_sub_graph": 144, "trace_gptq_target_block": 420, "traceabl": 492, "traced_model": [144, 413], "tracer": 492, "track": [161, 551], "trackabl": [133, 385], "trade": [477, 488, 549], "tradit": [481, 544], "train": [29, 42, 65, 149, 162, 181, 182, 185, 187, 195, 198, 199, 209, 211, 225, 226, 262, 314, 337, 413, 420, 439, 455, 473, 474, 475, 476, 477, 478, 480, 482, 494, 495, 496, 501, 525, 528, 531, 533, 539, 541, 543, 545, 547, 549, 551, 552, 554], "train_cfg": 455, "train_dataload": [538, 544, 546], "train_dataset": [526, 546], "train_fn": 477, "train_fun": 538, "train_func": [449, 526, 538, 546], "train_kwarg": 526, "train_load": 526, "train_loop": [449, 538, 543], "train_sampl": 526, "trainabl": [432, 477, 494, 545, 549], "trainableequivalenttransform": 432, "trainer": 538, "training_arg": 538, "training_func": 538, "training_func_for_nc": 525, "trane": 478, "transact": 552, "transfer": [162, 216, 488, 525, 538, 552], "transform": [30, 35, 128, 130, 131, 132, 143, 187, 209, 210, 211, 212, 213, 214, 216, 218, 220, 307, 380, 382, 383, 384, 387, 406, 412, 413, 417, 420, 432, 439, 446, 475, 476, 477, 480, 488, 492, 494, 521, 525, 526, 527, 531, 536, 538, 544, 545, 548, 549, 552, 554, 555], "transform_func": 225, "transform_graph": [96, 361], "transform_list": [225, 553], "transform_registri": 225, "transform_typ": 225, "transformer_block": 420, "transformerbasedmodelblockpatterndetector": [143, 417], "transformers_nam": 420, "transformers_pr": 420, "translat": [227, 228, 473, 541, 544, 554], "translation_corpu": 228, "transpos": [30, 58, 179, 221, 225, 330, 458, 549, 553], "travers": [482, 496, 497, 549, 554], "treat": [140, 396, 523], "tree": [188, 278], "tri": [152, 195, 522, 554], "trial": [153, 482, 554], "trigger": [151, 154, 180, 483], "troll": 490, "trt_env_setup": 30, "true": [1, 30, 31, 70, 98, 131, 133, 140, 151, 153, 195, 207, 209, 211, 212, 224, 225, 228, 234, 245, 262, 277, 281, 292, 293, 294, 298, 299, 303, 342, 383, 385, 387, 396, 398, 406, 412, 413, 418, 420, 423, 427, 429, 431, 432, 433, 439, 442, 452, 455, 459, 462, 465, 466, 471, 475, 476, 477, 478, 479, 481, 482, 483, 488, 489, 496, 523, 526, 530, 537, 538, 546, 547, 549, 552, 553, 554], "true_sequenti": [439, 452, 477, 549], "truncat": [209, 225, 418, 553], "trust": [140, 396], "trust_remote_cod": [431, 441, 489], "truth": [231, 232], "try": [207, 243, 390, 481, 488, 492, 494, 529, 544, 546, 552, 554], "try_cnt": [133, 385], "try_loading_kera": [243, 390], "tunabl": [152, 156], "tunable_typ": 156, "tune": [1, 151, 152, 153, 156, 159, 195, 198, 199, 209, 211, 235, 245, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 277, 278, 279, 280, 302, 413, 438, 439, 449, 466, 474, 475, 476, 477, 478, 482, 485, 489, 494, 495, 496, 521, 522, 523, 526, 531, 533, 535, 538, 539, 542, 544, 545, 548, 550, 551], "tune_cfg": [1, 134, 145, 412, 413, 417, 448, 495, 497, 554], "tune_config": [153, 302, 438, 474, 478, 479, 480, 481, 482, 496], "tuner": [198, 199, 235, 262, 449, 546], "tunestrategi": [274, 554], "tunestrategymeta": 274, "tuning_cfg_to_fw": 496, "tuning_config": 153, "tuning_criterion": [195, 538, 542, 551, 554], "tuning_history_path": 466, "tuning_items_prior": 277, "tuning_order_lst": 277, "tuning_param": 155, "tuning_sampl": 276, "tuning_spac": [276, 277, 279], "tuning_strategi": 195, "tuning_struct": [276, 277, 278], "tuningconfig": [153, 302, 438, 474, 478, 479, 480, 481, 482], "tuningcriterion": [195, 538, 542, 551, 554], "tuningitem": 278, "tuninglogg": [153, 159], "tuningmonitor": 153, "tuningord": 277, "tuningparam": 156, "tuningsampl": 277, "tuningspac": [277, 278], "tupl": [1, 90, 133, 145, 153, 195, 198, 199, 221, 225, 235, 262, 277, 302, 305, 399, 406, 408, 411, 412, 413, 417, 418, 425, 426, 435, 437, 438, 439, 442, 448, 449, 459, 465, 478, 481, 496, 522, 537, 538, 546, 553], "turbo": 555, "turn": [537, 547], "tutori": [470, 492, 545], "twitter": 545, "two": [133, 135, 140, 151, 174, 179, 186, 190, 211, 216, 230, 272, 396, 466, 472, 473, 474, 476, 477, 480, 482, 485, 488, 491, 492, 522, 523, 526, 528, 530, 538, 539, 541, 544, 546, 548, 549, 551, 552, 554], "txt": [214, 529, 534], "type": [1, 3, 29, 30, 31, 59, 81, 90, 101, 107, 117, 125, 133, 134, 135, 140, 145, 146, 152, 153, 154, 156, 160, 161, 163, 165, 173, 174, 175, 180, 188, 189, 190, 192, 194, 195, 207, 209, 211, 218, 225, 227, 228, 230, 234, 243, 245, 266, 267, 274, 278, 280, 281, 283, 301, 305, 331, 352, 372, 385, 390, 391, 396, 398, 403, 404, 406, 408, 409, 411, 413, 417, 418, 420, 425, 426, 427, 431, 433, 435, 437, 438, 439, 442, 448, 459, 465, 466, 470, 472, 473, 474, 475, 476, 477, 478, 479, 481, 484, 495, 496, 526, 528, 530, 533, 538, 539, 541, 546, 547, 549, 551, 553, 554], "typealia": 522, "types_to_splic": [65, 337], "typic": [494, 525, 527, 544, 555], "u": [128, 150, 380], "ubuntu": [534, 555], "ubuntu22": 494, "ubyt": 211, "uint4": 497, "uint8": [30, 221, 230, 280, 409, 433, 439, 481, 488, 495, 530, 546, 553], "ultim": [488, 546], "ultra": [494, 534, 555], "unaccept": 490, "unari": 27, "unary_op": 16, "unarydirect8bitoper": 27, "unaryoper": 27, "unbalanc": 555, "uncas": [209, 225, 553, 555], "uncertain": 544, "undefin": [140, 192, 396], "under": [95, 195, 211, 360, 391, 448, 462, 465, 482, 491, 494, 535, 537, 538, 543, 544, 545, 547, 549, 554], "underli": [52, 53, 55, 140, 324, 325, 327, 396], "understand": [470, 488, 546], "understudi": 227, "unicod": 224, "unicodedecodeerror": [140, 396], "unicoderegex": 227, "unifi": [159, 392, 435, 495, 496, 497, 523, 532, 533, 537, 538, 540], "uniform": [477, 521, 549], "uniformli": [477, 549], "union": [152, 234, 435, 438, 439, 448, 449, 478, 481, 537], "uniqu": [230, 488, 546], "unique_id": 225, "unit": [175, 293, 491, 544], "unit_scal": 472, "unk": 224, "unk_token": 224, "unless": [140, 396, 530], "unlik": 481, "unnecessari": 538, "unpack": [421, 429], "unpack_weight": 429, "unpack_zp": 429, "unpackedweightonlylinearparam": 429, "unpickl": [138, 140, 396, 397], "unpicklingerror": [138, 397], "unpreced": 544, "unquant": 406, "unquantized_node_set": 406, "unsaf": [140, 396], "unseen": [481, 488, 546], "unset": 31, "unsign": [145, 280, 497, 549], "unstructur": [195, 533, 544, 555], "unsupport": [528, 538, 544], "until": [413, 472, 496, 554], "untrac": 492, "untrust": [140, 396], "unus": [69, 133, 341, 385, 448], "unwelcom": 490, "up": [1, 85, 184, 225, 355, 417, 472, 488, 494, 526, 527, 529, 530, 533, 544, 545, 546, 551, 553, 554], "up1": 530, "up2": 530, "updat": [59, 141, 145, 169, 175, 192, 234, 262, 331, 398, 413, 417, 470, 477, 529, 536, 537, 538, 544, 549, 552, 554], "update_config": 195, "update_modul": [141, 398], "update_param": 192, "update_sq_scal": [145, 413], "upgrad": 538, "upload": 533, "upon": 489, "upstream": [431, 441], "url": [211, 420, 491, 494, 534, 535, 544], "us": [1, 3, 21, 29, 30, 31, 33, 34, 55, 88, 90, 101, 126, 135, 140, 145, 151, 152, 154, 161, 163, 165, 169, 173, 175, 180, 184, 185, 188, 190, 191, 195, 198, 199, 207, 208, 209, 210, 211, 212, 213, 216, 224, 225, 227, 228, 230, 234, 245, 257, 261, 262, 266, 269, 271, 274, 278, 280, 281, 286, 288, 289, 301, 305, 306, 327, 387, 391, 396, 404, 405, 406, 408, 409, 412, 413, 416, 417, 418, 425, 426, 427, 431, 433, 438, 441, 442, 443, 448, 449, 452, 453, 454, 455, 458, 459, 460, 462, 466, 472, 473, 474, 475, 476, 477, 478, 479, 481, 482, 484, 488, 489, 490, 491, 492, 495, 496, 520, 521, 522, 525, 526, 528, 530, 531, 532, 535, 538, 539, 540, 541, 542, 544, 545, 546, 547, 548, 549, 550, 551, 553, 554, 555], "usabl": 494, "usag": [145, 152, 153, 156, 208, 230, 281, 391, 433, 443, 445, 448, 478, 479, 484, 488, 496, 526, 527, 542, 549, 553], "use_auto_clip": [439, 477], "use_auto_scal": [439, 477], "use_bf16": [33, 34, 306], "use_bia": [292, 293, 294, 298], "use_bp": 228, "use_double_qu": [439, 477], "use_full_length": 549, "use_full_rang": [439, 477], "use_layer_wis": [420, 439, 452, 477], "use_max_length": [420, 549], "use_mse_search": [439, 452, 477], "use_optimum_format": [429, 462, 549], "use_sym": [439, 477, 482], "user": [140, 151, 165, 173, 185, 192, 195, 198, 199, 211, 214, 218, 225, 234, 235, 245, 262, 267, 280, 396, 406, 413, 417, 439, 448, 449, 466, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 484, 488, 489, 492, 495, 496, 497, 501, 520, 522, 523, 525, 528, 530, 533, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 550, 552, 553, 554], "user_cfg": [413, 417], "user_config": 192, "user_eval_fns1": 153, "user_eval_fns2": 153, "user_eval_fns3": 153, "user_eval_fns4": 153, "user_metr": [234, 262], "user_model": [473, 531, 541], "user_obj_cfg": 245, "user_object": 245, "user_postprocess": 223, "user_processor_typ": 448, "userfloatmodel": [471, 476], "usr": 529, "usr_cfg": 245, "usual": [209, 481, 488, 496, 543, 544, 546, 547, 552], "utf": [140, 224, 396], "util": [0, 4, 90, 95, 96, 124, 127, 134, 135, 136, 137, 144, 152, 153, 155, 164, 170, 193, 195, 226, 228, 270, 290, 301, 302, 304, 305, 395, 402, 407, 410, 414, 418, 428, 436, 437, 439, 470, 472, 481, 484, 496, 497, 498, 521, 522, 526, 538, 544, 546, 554], "v": [544, 554], "v0": [475, 536, 552], "v1": [133, 221, 231, 232, 234, 243, 385, 390, 475, 486, 526, 528, 529, 537, 540, 544, 550, 551, 552, 555], "v14": 227, "v2": [135, 269, 475, 523, 526, 528, 529, 536, 550, 552, 554, 555], "v3": [486, 555], "v4": 555, "v5": 491, "v5s6": 555, "val": [133, 192, 211, 214, 426, 538], "val2017": 210, "val_dataload": [479, 538, 546], "val_dataset": [479, 538, 546], "val_load": 546, "val_map": 214, "valid": [40, 52, 53, 90, 159, 180, 183, 192, 195, 211, 230, 243, 292, 294, 297, 298, 312, 324, 325, 390, 409, 489, 494, 495, 527, 530, 533, 535, 538, 544, 546, 549, 550], "valid_keras_format": 391, "valid_mixed_precis": 530, "valid_reshape_input": [52, 53, 324, 325], "validate_and_inference_input_output": [243, 390], "validate_graph_nod": [243, 390], "valu": [3, 30, 31, 41, 52, 53, 55, 74, 75, 90, 128, 133, 140, 145, 150, 151, 169, 173, 174, 184, 191, 192, 195, 198, 199, 209, 212, 213, 221, 225, 230, 234, 235, 262, 266, 271, 278, 280, 281, 313, 324, 325, 327, 346, 380, 387, 396, 398, 413, 417, 425, 429, 449, 453, 455, 466, 471, 472, 473, 474, 475, 477, 480, 481, 488, 492, 496, 497, 521, 530, 537, 538, 539, 541, 542, 544, 546, 549, 552, 553, 554], "valuabl": 544, "value_layer_nam": 184, "valueerror": [52, 53, 55, 145, 230, 281, 324, 325, 327, 433, 529], "valueinfo": 30, "values_from_const": [52, 53, 55, 324, 325, 327], "vanhouck": 521, "vanilla": [495, 496, 533], "vari": [544, 552, 555], "variabl": [30, 138, 151, 189, 195, 211, 266, 280, 397, 443, 474, 478, 484, 489, 544, 554], "varianc": [128, 150, 380], "variant": 205, "varieti": [478, 482, 527, 554], "variou": [235, 392, 473, 476, 478, 481, 497, 533, 541, 544, 547], "vault": 494, "vcvtne2ps2bf16": [474, 539], "vcvtneps2bf16": [474, 539], "vdpbf16p": [474, 539], "vecchio": 534, "vector": 474, "ventura": 534, "verbos": [30, 266, 425, 459], "veri": [470, 477, 488, 538, 546, 549, 554], "verifi": [528, 536], "version": [1, 39, 89, 142, 145, 173, 195, 226, 311, 429, 446, 458, 459, 470, 475, 490, 492, 494, 495, 522, 530, 534, 535, 538, 545, 552, 554], "version1": [133, 391, 466], "version1_eq_version2": [133, 391, 466], "version1_gt_version2": [133, 391, 466], "version1_gte_version2": [133, 391, 466], "version1_lt_version2": [133, 391, 466], "version1_lte_version2": [133, 391, 466], "version2": [133, 391, 466], "vertic": [179, 225, 553], "vgg": 555, "vgg16": [528, 555], "vgg19": 555, "via": [439, 477, 488, 490, 494, 526, 533, 538, 544, 545], "view": [491, 494, 527, 550], "viewpoint": 490, "vincent": 521, "violat": [138, 397], "virtual": [280, 545], "visibl": 483, "vision": [472, 488, 494, 528, 552], "visit": 555, "visual": [195, 551, 554], "vit": 555, "vmware": 545, "vnni": [488, 527, 533, 546], "voc": [211, 234], "vocab": 224, "vocab_fil": [224, 225, 553], "vocabulari": [209, 224, 225, 553], "vocmap": 537, "vscode": 522, "vtune": 535, "w": [31, 225, 420, 475, 488, 544, 552, 553], "w8a8": [405, 471, 476, 477, 488, 547, 549], "w8a8pt2equant": 405, "w_algo": 439, "w_dq": [488, 552], "w_dtype": [439, 473, 475, 476, 541], "w_fp32": [128, 150, 380], "w_granular": 439, "w_int8": [128, 150, 380], "w_q": [488, 552], "w_scale": [488, 552], "w_sym": 439, "wa": [140, 195, 396, 473, 475, 495, 538, 541, 545, 554], "wai": [173, 210, 211, 214, 243, 390, 477, 480, 488, 497, 523, 537, 538, 543, 544, 546, 549, 552, 554], "wanda": 170, "want": [52, 53, 55, 173, 195, 207, 209, 267, 324, 325, 327, 449, 492, 495, 523, 526, 538, 542, 546, 549, 551, 554], "waq": 552, "warm": 417, "warmup": [195, 520, 538], "warn": [413, 463, 522], "wasn": [140, 396], "wasserblat": 544, "we": [29, 30, 52, 53, 55, 59, 71, 128, 140, 149, 150, 151, 153, 173, 177, 184, 195, 208, 227, 230, 269, 324, 325, 327, 331, 343, 380, 396, 413, 470, 471, 475, 476, 477, 478, 480, 481, 482, 483, 484, 488, 489, 490, 492, 494, 495, 496, 497, 522, 523, 526, 528, 530, 531, 536, 537, 538, 542, 543, 544, 546, 547, 549, 550, 551, 552, 554], "web": 494, "websit": 534, "wechat": [494, 545], "wei": [477, 488, 549, 552], "weight": [29, 30, 31, 45, 46, 107, 125, 126, 128, 133, 135, 145, 150, 153, 169, 170, 174, 177, 178, 179, 180, 181, 182, 183, 185, 186, 187, 189, 191, 195, 234, 262, 269, 278, 280, 283, 286, 317, 318, 380, 398, 413, 422, 428, 429, 431, 432, 433, 439, 441, 459, 466, 467, 470, 471, 472, 475, 476, 478, 479, 480, 481, 482, 489, 495, 496, 497, 521, 525, 530, 531, 532, 536, 538, 542, 544, 545, 546, 547, 552, 554], "weight_algorithm": 303, "weight_bit": 496, "weight_clip": [303, 413], "weight_config": [31, 420, 432], "weight_correct": [148, 195, 546], "weight_decai": 538, "weight_dict": 135, "weight_dtyp": [299, 303, 452, 479], "weight_empir": [128, 380], "weight_granular": [299, 303, 479], "weight_max_lb": 413, "weight_max_valu": [292, 293, 294, 297, 298], "weight_min_valu": [292, 293, 294, 297, 298], "weight_name_map": [125, 283], "weight_onli": [4, 195, 394, 439, 465, 473, 522, 541, 547, 549], "weight_shap": 31, "weight_slim": [172, 184], "weight_sym": [299, 303, 479, 481, 482], "weight_tensor": 466, "weight_typ": 459, "weightcorrect": 150, "weightdetail": 466, "weightonli": 31, "weightonlylinear": [429, 477, 549], "weightonlyqu": 477, "weightonlyquantsampl": 277, "weightpruningconfig": [195, 538, 543, 544], "weights_detail": 460, "weights_onli": [140, 396], "weightsdetail": 467, "weightsstatist": 467, "welcom": [469, 490, 491, 494, 556], "well": [198, 199, 235, 262, 449, 473, 488, 494, 522, 541, 544, 546, 550, 552], "wenhua": [477, 488], "were": [140, 234, 396, 544], "wget": 529, "what": [145, 180, 207, 234, 262, 417, 449, 481, 490, 545], "when": [30, 39, 49, 55, 133, 138, 140, 153, 174, 180, 185, 192, 195, 209, 210, 218, 225, 228, 262, 301, 305, 311, 321, 327, 385, 396, 397, 406, 431, 444, 449, 472, 476, 477, 481, 482, 483, 484, 488, 490, 496, 522, 523, 538, 544, 546, 549, 552, 553, 554], "where": [30, 140, 195, 230, 234, 280, 396, 408, 466, 488, 497, 521, 552], "whether": [1, 30, 31, 90, 101, 133, 140, 145, 175, 195, 207, 209, 221, 225, 228, 230, 234, 305, 385, 391, 396, 409, 412, 413, 420, 425, 426, 431, 433, 442, 446, 455, 462, 465, 466, 477, 483, 495, 537, 546, 549, 552, 553], "which": [1, 29, 41, 68, 95, 135, 140, 152, 169, 173, 174, 176, 180, 181, 182, 184, 187, 188, 190, 192, 195, 209, 210, 211, 221, 227, 232, 234, 239, 245, 262, 266, 313, 340, 360, 391, 396, 413, 420, 425, 426, 431, 448, 466, 470, 472, 474, 476, 477, 478, 479, 481, 482, 488, 490, 492, 495, 496, 497, 521, 523, 526, 529, 530, 533, 537, 538, 539, 540, 543, 544, 546, 547, 548, 549, 550, 552, 553, 554], "while": [174, 185, 192, 266, 474, 476, 477, 481, 488, 494, 495, 496, 527, 533, 539, 544, 546, 549, 552], "white_list": [152, 299, 303, 439], "white_module_list": 448, "white_nod": 2, "whitespac": [224, 232], "whitespace_token": 224, "whl": [494, 534], "who": [267, 490], "whole": [471, 488, 497, 546, 555], "whose": [149, 175, 243, 390, 413, 532, 552, 554], "why": [488, 552], "wide": [474, 477, 488, 494, 527, 539, 544, 546], "wideresnet40": 555, "width": [179, 221, 225, 488, 521, 526, 544, 546, 553], "wiki": 490, "wikitext": 555, "window": [151, 154, 483, 484, 520, 534], "winter": [488, 552], "wip": 536, "wise": [128, 137, 139, 141, 150, 195, 266, 267, 271, 380, 395, 398, 404, 470, 489, 496, 533, 544, 549, 552, 554], "wish": 535, "with_arg": 497, "within": [89, 152, 153, 181, 182, 187, 194, 230, 243, 257, 261, 390, 474, 477, 489, 490, 492, 497, 521, 539, 544, 545, 549, 552, 554], "without": [34, 75, 133, 185, 225, 280, 385, 446, 473, 488, 490, 526, 538, 541, 544, 545, 546, 554], "wnli": [209, 537], "won": [195, 546, 549], "woq": [431, 439, 441, 473, 489, 494, 536, 541], "woq_config": 489, "woq_model": 489, "woqmodelload": 431, "word": [227, 477, 488, 522, 537, 544, 549, 555], "wordpiec": [224, 225, 553], "wordpiecetoken": 224, "work": [195, 301, 305, 478, 481, 483, 484, 488, 493, 496, 522, 523, 548, 549, 550, 552], "worker": [538, 546], "workflow": [470, 473, 494, 496, 497, 528, 531, 534, 539, 541], "workload": [466, 474, 545], "workload_loc": 466, "workshop": 545, "workspac": [161, 195, 465, 466, 540], "workspace_path": 195, "worth": [522, 544], "would": [476, 488, 491, 538, 544, 546, 552], "wrap": [1, 90, 101, 163, 230, 305, 389, 492, 526], "wrapmxnetmetr": 234, "wraponnxrtmetr": 234, "wrapped_lay": 101, "wrapper": [60, 87, 88, 89, 90, 107, 125, 134, 142, 153, 163, 170, 208, 225, 230, 234, 239, 243, 283, 332, 389, 390, 401, 429, 433, 481, 548], "wrapperlay": 413, "wrappytorchmetr": 234, "write": [133, 218, 385, 466, 526, 532, 538], "write_graph": [133, 385], "written": 538, "wt_compare_dict": 135, "www": [211, 555], "x": [1, 40, 59, 174, 195, 208, 225, 266, 281, 312, 331, 413, 482, 488, 494, 521, 522, 526, 531, 534, 539, 540, 546, 547, 552, 553], "x1": [488, 552, 554], "x2": [488, 552, 554], "x86": [474, 539], "x86_64": 520, "x86_inductor_quant": 409, "x86inductorquant": [409, 471], "x_max": 266, "x_q": [488, 552], "x_scale": [488, 552], "x_tmp": [488, 552], "xdoctest": [140, 396], "xeon": [474, 483, 488, 494, 534, 535, 536, 539, 545, 546, 548, 555], "xgb": 195, "xgboost": 544, "xiao": [477, 488, 549, 552], "xiui": [477, 488, 549, 552], "xlm": [209, 555], "xlnet": [209, 555], "xpu": [195, 417, 443, 478, 481, 489, 546], "xpu_acceler": 443, "xx": [145, 433], "xx_func": 522, "xxx": [154, 211, 538], "xxy": 211, "xxz": 211, "y": [94, 195, 225, 266, 359, 488, 522, 529, 552, 553], "y_dq": [488, 552], "y_max": 266, "y_q": [488, 552], "yaml": [192, 195, 198, 199, 209, 211, 288, 465, 466, 495, 497, 532, 537, 538, 542, 553], "yaml_fil": [466, 526], "yaml_file_path": 526, "yao": 135, "year": 535, "yet": 546, "yield": [153, 198, 199, 207, 235, 262, 266, 281, 387, 449, 488, 523, 546, 554], "yolo": 555, "yolov3": 555, "yolov5": 544, "you": [29, 140, 195, 207, 209, 234, 262, 396, 413, 470, 472, 489, 491, 492, 496, 522, 523, 526, 529, 534, 535, 537, 539, 544, 546, 549, 550, 551, 552], "your": [207, 209, 470, 476, 489, 491, 494, 522, 523, 526, 535, 538, 544, 545, 551, 554], "your_node1_nam": 526, "your_node2_nam": 526, "your_script": 550, "yourmodel": 477, "yourself": 544, "youtub": 545, "yum": [529, 534], "yvinec": [488, 552], "z": [30, 483, 488, 521, 522, 552], "za": 483, "zafrir": 544, "zero": [3, 30, 31, 180, 192, 225, 292, 293, 294, 298, 413, 422, 425, 426, 433, 466, 473, 476, 477, 488, 541, 544, 546, 549, 552, 553, 554], "zero_grad": [526, 538, 544], "zero_point": [30, 31, 452], "zero_point_nam": 30, "zeropoint": [481, 488, 546], "zfnet": 555, "zhen": 135, "zhewei": 135, "zip": [413, 492], "zo_valu": 30, "zone": 545, "zoo": [208, 225, 494, 527, 555], "zp": [31, 429, 433, 488, 552], "\u03b1": 521, "\u03b2": 521, "\u2776": 496, "\u2777": 496, "\u2778": 496, "\u2779": 496, "\u277a": 496, "\u277b": 496, "\u277c": 496, "\u817e\u8baf\u4e91taco": 545, "\u96c6\u6210\u82f1\u7279\u5c14": 545}, "titles": ["neural_compressor.adaptor.mxnet_utils", "neural_compressor.adaptor.mxnet_utils.util", "neural_compressor.adaptor.ox_utils.calibration", "neural_compressor.adaptor.ox_utils.calibrator", "neural_compressor.adaptor.ox_utils", "neural_compressor.adaptor.ox_utils.operators.activation", "neural_compressor.adaptor.ox_utils.operators.argmax", "neural_compressor.adaptor.ox_utils.operators.attention", "neural_compressor.adaptor.ox_utils.operators.binary_op", "neural_compressor.adaptor.ox_utils.operators.concat", "neural_compressor.adaptor.ox_utils.operators.conv", "neural_compressor.adaptor.ox_utils.operators.direct_q8", "neural_compressor.adaptor.ox_utils.operators.embed_layernorm", "neural_compressor.adaptor.ox_utils.operators.gather", "neural_compressor.adaptor.ox_utils.operators.gavgpool", "neural_compressor.adaptor.ox_utils.operators.gemm", "neural_compressor.adaptor.ox_utils.operators", "neural_compressor.adaptor.ox_utils.operators.lstm", "neural_compressor.adaptor.ox_utils.operators.matmul", "neural_compressor.adaptor.ox_utils.operators.maxpool", "neural_compressor.adaptor.ox_utils.operators.norm", "neural_compressor.adaptor.ox_utils.operators.ops", "neural_compressor.adaptor.ox_utils.operators.pad", "neural_compressor.adaptor.ox_utils.operators.pooling", "neural_compressor.adaptor.ox_utils.operators.reduce", "neural_compressor.adaptor.ox_utils.operators.resize", "neural_compressor.adaptor.ox_utils.operators.split", "neural_compressor.adaptor.ox_utils.operators.unary_op", "neural_compressor.adaptor.ox_utils.quantizer", "neural_compressor.adaptor.ox_utils.smooth_quant", "neural_compressor.adaptor.ox_utils.util", "neural_compressor.adaptor.ox_utils.weight_only", "neural_compressor.adaptor.tensorflow", "neural_compressor.adaptor.tf_utils.graph_converter", "neural_compressor.adaptor.tf_utils.graph_converter_without_calib", "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert", "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer", "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer", "neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base", "neural_compressor.adaptor.tf_utils.graph_rewriter", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation", "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx", "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph", "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node", "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema", "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils", "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq", "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern", "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq", "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern", "neural_compressor.adaptor.tf_utils.graph_util", "neural_compressor.adaptor.tf_utils", "neural_compressor.adaptor.tf_utils.quantize_graph", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize", "neural_compressor.adaptor.tf_utils.quantize_graph.qat", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq", "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base", "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn", "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2", "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv", "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu", "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul", "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling", "neural_compressor.adaptor.tf_utils.quantize_graph_common", "neural_compressor.adaptor.tf_utils.smooth_quant_calibration", "neural_compressor.adaptor.tf_utils.smooth_quant_scaler", "neural_compressor.adaptor.tf_utils.tf2onnx_converter", "neural_compressor.adaptor.tf_utils.transform_graph.bias_correction", "neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base", "neural_compressor.adaptor.tf_utils.transform_graph", "neural_compressor.adaptor.tf_utils.transform_graph.insert_logging", "neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat", "neural_compressor.adaptor.tf_utils.util", "neural_compressor.adaptor.torch_utils.bf16_convert", "neural_compressor.adaptor.torch_utils.hawq_metric", "neural_compressor.adaptor.torch_utils", "neural_compressor.adaptor.torch_utils.layer_wise_quant", "neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle", "neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize", "neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load", "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils", "neural_compressor.adaptor.torch_utils.model_wrapper", "neural_compressor.adaptor.torch_utils.pattern_detector", "neural_compressor.adaptor.torch_utils.symbolic_trace", "neural_compressor.adaptor.torch_utils.util", "neural_compressor.algorithm.algorithm", "neural_compressor.algorithm.fast_bias_correction", "neural_compressor.algorithm", "neural_compressor.algorithm.smooth_quant", "neural_compressor.algorithm.weight_correction", "neural_compressor.benchmark", "neural_compressor.common.base_config", "neural_compressor.common.base_tuning", "neural_compressor.common.benchmark", "neural_compressor.common", "neural_compressor.common.tuning_param", "neural_compressor.common.utils.constants", "neural_compressor.common.utils", "neural_compressor.common.utils.logger", "neural_compressor.common.utils.save_load", "neural_compressor.common.utils.utility", "neural_compressor.compression.callbacks", "neural_compressor.compression.distillation.criterions", "neural_compressor.compression.distillation", "neural_compressor.compression.distillation.optimizers", "neural_compressor.compression.distillation.utility", "neural_compressor.compression.hpo", "neural_compressor.compression.hpo.sa_optimizer", "neural_compressor.compression.pruner.criteria", "neural_compressor.compression.pruner", "neural_compressor.compression.pruner.model_slim.auto_slim", "neural_compressor.compression.pruner.model_slim", "neural_compressor.compression.pruner.model_slim.pattern_analyzer", "neural_compressor.compression.pruner.model_slim.weight_slim", "neural_compressor.compression.pruner.patterns.base", "neural_compressor.compression.pruner.patterns", "neural_compressor.compression.pruner.patterns.mha", "neural_compressor.compression.pruner.patterns.ninm", "neural_compressor.compression.pruner.patterns.nxm", "neural_compressor.compression.pruner.pruners.base", "neural_compressor.compression.pruner.pruners.basic", "neural_compressor.compression.pruner.pruners.block_mask", "neural_compressor.compression.pruner.pruners", "neural_compressor.compression.pruner.pruners.mha", "neural_compressor.compression.pruner.pruners.pattern_lock", "neural_compressor.compression.pruner.pruners.progressive", "neural_compressor.compression.pruner.pruners.retrain_free", "neural_compressor.compression.pruner.pruning", "neural_compressor.compression.pruner.regs", "neural_compressor.compression.pruner.schedulers", "neural_compressor.compression.pruner.tf_criteria", "neural_compressor.compression.pruner.utils", "neural_compressor.compression.pruner.wanda", "neural_compressor.compression.pruner.wanda.utils", "neural_compressor.config", "neural_compressor.contrib", "neural_compressor.contrib.strategy", "neural_compressor.contrib.strategy.sigopt", "neural_compressor.contrib.strategy.tpe", "neural_compressor.data.dataloaders.base_dataloader", "neural_compressor.data.dataloaders.dataloader", "neural_compressor.data.dataloaders.default_dataloader", "neural_compressor.data.dataloaders.fetcher", "neural_compressor.data.dataloaders.mxnet_dataloader", "neural_compressor.data.dataloaders.onnxrt_dataloader", "neural_compressor.data.dataloaders.pytorch_dataloader", "neural_compressor.data.dataloaders.sampler", "neural_compressor.data.dataloaders.tensorflow_dataloader", "neural_compressor.data.datasets.bert_dataset", "neural_compressor.data.datasets.coco_dataset", "neural_compressor.data.datasets.dataset", "neural_compressor.data.datasets.dummy_dataset", "neural_compressor.data.datasets.dummy_dataset_v2", "neural_compressor.data.datasets.imagenet_dataset", "neural_compressor.data.datasets", "neural_compressor.data.datasets.style_transfer_dataset", "neural_compressor.data.filters.coco_filter", "neural_compressor.data.filters.filter", "neural_compressor.data.filters", "neural_compressor.data", "neural_compressor.data.transforms.imagenet_transform", "neural_compressor.data.transforms", "neural_compressor.data.transforms.postprocess", "neural_compressor.data.transforms.tokenization", "neural_compressor.data.transforms.transform", "neural_compressor", "neural_compressor.metric.bleu", "neural_compressor.metric.bleu_util", "neural_compressor.metric.coco_label_map", "neural_compressor.metric.coco_tools", "neural_compressor.metric.evaluate_squad", "neural_compressor.metric.f1", "neural_compressor.metric", "neural_compressor.metric.metric", "neural_compressor.mix_precision", "neural_compressor.model.base_model", "neural_compressor.model", "neural_compressor.model.keras_model", "neural_compressor.model.model", "neural_compressor.model.mxnet_model", "neural_compressor.model.nets_factory", "neural_compressor.model.onnx_model", "neural_compressor.model.tensorflow_model", "neural_compressor.model.torch_model", "neural_compressor.objective", "neural_compressor.profiling", "neural_compressor.profiling.parser.factory", "neural_compressor.profiling.parser.onnx_parser.factory", "neural_compressor.profiling.parser.onnx_parser.parser", "neural_compressor.profiling.parser.parser", "neural_compressor.profiling.parser.result", "neural_compressor.profiling.parser.tensorflow_parser.factory", "neural_compressor.profiling.parser.tensorflow_parser.parser", "neural_compressor.profiling.profiler.factory", "neural_compressor.profiling.profiler.onnxrt_profiler.factory", "neural_compressor.profiling.profiler.onnxrt_profiler.profiler", "neural_compressor.profiling.profiler.onnxrt_profiler.utils", "neural_compressor.profiling.profiler.profiler", "neural_compressor.profiling.profiler.tensorflow_profiler.factory", "neural_compressor.profiling.profiler.tensorflow_profiler.profiler", "neural_compressor.profiling.profiler.tensorflow_profiler.utils", "neural_compressor.quantization", "neural_compressor.strategy.auto", "neural_compressor.strategy.auto_mixed_precision", "neural_compressor.strategy.basic", "neural_compressor.strategy.bayesian", "neural_compressor.strategy.conservative", "neural_compressor.strategy.exhaustive", "neural_compressor.strategy.hawq_v2", "neural_compressor.strategy", "neural_compressor.strategy.mse", "neural_compressor.strategy.mse_v2", "neural_compressor.strategy.random", "neural_compressor.strategy.strategy", "neural_compressor.strategy.utils.constant", "neural_compressor.strategy.utils", "neural_compressor.strategy.utils.tuning_sampler", "neural_compressor.strategy.utils.tuning_space", "neural_compressor.strategy.utils.tuning_structs", "neural_compressor.strategy.utils.utility", "neural_compressor.template.api_doc_example", "neural_compressor.tensorflow.algorithms", "neural_compressor.tensorflow.algorithms.smoother.calibration", "neural_compressor.tensorflow.algorithms.smoother.core", "neural_compressor.tensorflow.algorithms.smoother", "neural_compressor.tensorflow.algorithms.smoother.scaler", "neural_compressor.tensorflow.algorithms.static_quant", "neural_compressor.tensorflow.algorithms.static_quant.keras", "neural_compressor.tensorflow.algorithms.static_quant.tensorflow", "neural_compressor.tensorflow", "neural_compressor.tensorflow.keras", "neural_compressor.tensorflow.keras.layers.conv2d", "neural_compressor.tensorflow.keras.layers.dense", "neural_compressor.tensorflow.keras.layers.depthwise_conv2d", "neural_compressor.tensorflow.keras.layers", "neural_compressor.tensorflow.keras.layers.layer_initializer", "neural_compressor.tensorflow.keras.layers.pool2d", "neural_compressor.tensorflow.keras.layers.separable_conv2d", "neural_compressor.tensorflow.keras.quantization.config", "neural_compressor.tensorflow.keras.quantization", "neural_compressor.tensorflow.quantization.algorithm_entry", "neural_compressor.tensorflow.quantization.autotune", "neural_compressor.tensorflow.quantization.config", "neural_compressor.tensorflow.quantization", "neural_compressor.tensorflow.quantization.quantize", "neural_compressor.tensorflow.quantization.utils.graph_converter", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base", "neural_compressor.tensorflow.quantization.utils.graph_rewriter", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern", "neural_compressor.tensorflow.quantization.utils.graph_util", "neural_compressor.tensorflow.quantization.utils", "neural_compressor.tensorflow.quantization.utils.quantize_graph", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq", "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base", "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn", "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2", "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv", "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu", "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul", "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling", "neural_compressor.tensorflow.quantization.utils.quantize_graph_common", "neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction", "neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base", "neural_compressor.tensorflow.quantization.utils.transform_graph", "neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging", "neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat", "neural_compressor.tensorflow.quantization.utils.utility", "neural_compressor.tensorflow.utils.constants", "neural_compressor.tensorflow.utils.data", "neural_compressor.tensorflow.utils", "neural_compressor.tensorflow.utils.model", "neural_compressor.tensorflow.utils.model_wrappers", "neural_compressor.tensorflow.utils.utility", "neural_compressor.torch.algorithms.base_algorithm", "neural_compressor.torch.algorithms.fp8_quant.utils.logger", "neural_compressor.torch.algorithms", "neural_compressor.torch.algorithms.layer_wise", "neural_compressor.torch.algorithms.layer_wise.load", "neural_compressor.torch.algorithms.layer_wise.modified_pickle", "neural_compressor.torch.algorithms.layer_wise.utils", "neural_compressor.torch.algorithms.mixed_precision.half_precision_convert", "neural_compressor.torch.algorithms.mixed_precision", "neural_compressor.torch.algorithms.mixed_precision.module_wrappers", "neural_compressor.torch.algorithms.mx_quant", "neural_compressor.torch.algorithms.mx_quant.mx", "neural_compressor.torch.algorithms.mx_quant.utils", "neural_compressor.torch.algorithms.pt2e_quant.core", "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter", "neural_compressor.torch.algorithms.pt2e_quant", "neural_compressor.torch.algorithms.pt2e_quant.save_load", "neural_compressor.torch.algorithms.pt2e_quant.utility", "neural_compressor.torch.algorithms.smooth_quant", "neural_compressor.torch.algorithms.smooth_quant.save_load", "neural_compressor.torch.algorithms.smooth_quant.smooth_quant", "neural_compressor.torch.algorithms.smooth_quant.utility", "neural_compressor.torch.algorithms.static_quant", "neural_compressor.torch.algorithms.static_quant.save_load", "neural_compressor.torch.algorithms.static_quant.static_quant", "neural_compressor.torch.algorithms.static_quant.utility", "neural_compressor.torch.algorithms.weight_only.autoround", "neural_compressor.torch.algorithms.weight_only.awq", "neural_compressor.torch.algorithms.weight_only.gptq", "neural_compressor.torch.algorithms.weight_only.hqq.bitpack", "neural_compressor.torch.algorithms.weight_only.hqq.config", "neural_compressor.torch.algorithms.weight_only.hqq.core", "neural_compressor.torch.algorithms.weight_only.hqq", "neural_compressor.torch.algorithms.weight_only.hqq.optimizer", "neural_compressor.torch.algorithms.weight_only.hqq.qtensor", "neural_compressor.torch.algorithms.weight_only.hqq.quantizer", "neural_compressor.torch.algorithms.weight_only", "neural_compressor.torch.algorithms.weight_only.modules", "neural_compressor.torch.algorithms.weight_only.rtn", "neural_compressor.torch.algorithms.weight_only.save_load", "neural_compressor.torch.algorithms.weight_only.teq", "neural_compressor.torch.algorithms.weight_only.utility", "neural_compressor.torch.export", "neural_compressor.torch.export.pt2e_export", "neural_compressor.torch", "neural_compressor.torch.quantization.algorithm_entry", "neural_compressor.torch.quantization.autotune", "neural_compressor.torch.quantization.config", "neural_compressor.torch.quantization", "neural_compressor.torch.quantization.load_entry", "neural_compressor.torch.quantization.quantize", "neural_compressor.torch.utils.auto_accelerator", "neural_compressor.torch.utils.bit_packer", "neural_compressor.torch.utils.constants", "neural_compressor.torch.utils.environ", "neural_compressor.torch.utils", "neural_compressor.torch.utils.utility", "neural_compressor.training", "neural_compressor.transformers.quantization.utils", "neural_compressor.transformers.utils", "neural_compressor.transformers.utils.quantization_config", "neural_compressor.utils.collect_layer_histogram", "neural_compressor.utils.constant", "neural_compressor.utils.create_obj_from_config", "neural_compressor.utils.export", "neural_compressor.utils.export.qlinear2qdq", "neural_compressor.utils.export.tf2onnx", "neural_compressor.utils.export.torch2onnx", "neural_compressor.utils", "neural_compressor.utils.kl_divergence", "neural_compressor.utils.load_huggingface", "neural_compressor.utils.logger", "neural_compressor.utils.options", "neural_compressor.utils.pytorch", "neural_compressor.utils.utility", "neural_compressor.utils.weights_details", "neural_compressor.version", "Intel\u00ae Neural Compressor Documentation", "2.X API User Guide", "Dynamic Quantization", "FP8 Quantization", "Microscaling Quantization", "PyTorch Mixed Precision", "PyTorch Smooth Quantization", "PyTorch Static Quantization", "PyTorch Weight Only Quantization", "Torch", "TensorFlow Quantization", "Smooth Quant", "TensorFlow", "AutoTune", "Benchmark", "Quantization on Client", "Design", "Version mapping between Intel Neural Compressor to Gaudi Software Stack", "<no title>", "Quantization", "Transformers-like API", "Contributor Covenant Code of Conduct", "Contribution Guidelines", "FX", "Security Policy", "Intel\u00ae Neural Compressor", "Adaptor", "How to Add An Adaptor", "How to Support New Data Type, Like Int4, with a Few Line Changes", "Adaptor", "ONNX Runtime", "Torch Utils", "2.0 API", "3.0 API", "API Document Example", "APIs", "Benchmark", "Compression", "Config", "Mix Precision", "Model", "Objective", "Quantization", "Strategy", "Tensorflow Quantization AutoTune", "Tensorflow Quantization Base API", "Tensorflow Quantization Config", "Pytorch Quantization AutoTune", "Pytorch Quantization Base API", "Pytorch Quantization Config", "Training", "Benchmarking", "Calibration Algorithms in Quantization", "INC Coding Conventions", "DataLoader", "Design", "Distillation for Quantization", "Distributed Training and Inference (Evaluation)", "Examples", "Export", "Frequently Asked Questions", "Framework YAML Configuration Files", "Getting Started", "Incompatible changes between v1.2 and v1.1", "Infrastructure of Intel\u00ae Neural Compressor", "Installation", "Legal Information", "LLMs Quantization Recipes", "Metrics", "Code Migration from Intel Neural Compressor 1.X to Intel Neural Compressor 2.X", "Mixed Precision", "Model", "Microscaling Quantization", "Objective", "Optimization Orchestration", "Pruning", "Full Publications/Events (85)", "Quantization", "Layer Wise Quantization (LWQ)", "Turn OFF Auto Mixed Precision during Quantization", "Weight Only Quantization (WOQ)", "Release", "SigOpt Strategy", "Smooth Quant", "Transform", "Tuning Strategies", "Validated Models", "Intel\u00ae Neural Compressor Documentation"], "titleterms": {"": [494, 534], "0": [501, 502, 555], "1": [526, 529, 532, 538, 555], "15": [545, 555], "16": 555, "18": 555, "2": [470, 501, 526, 529, 532, 538, 555], "2018": 545, "2020": 545, "2021": 545, "2022": 545, "2023": 545, "2024": 545, "25": 545, "3": [502, 529, 555], "35": 545, "4": [529, 545], "5": 529, "6": 545, "64": 534, "85": 545, "For": 489, "One": 543, "With": 479, "abil": 497, "accept": 491, "accord": [496, 497], "accuraci": [474, 479, 488, 536, 539, 546, 554], "activ": 5, "ad": 495, "adaptor": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 495, 496, 498], "add": 496, "addit": 494, "advanc": 470, "ai": 534, "algorithm": [146, 147, 148, 149, 150, 282, 283, 284, 285, 286, 287, 288, 289, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 489, 521, 549, 552, 554], "algorithm_entri": [301, 437], "alpha": [475, 480, 552], "an": 496, "annot": 522, "api": [470, 473, 474, 478, 481, 489, 495, 496, 501, 502, 503, 504, 514, 517, 520, 523, 525, 526, 532, 537, 539, 541, 542, 543, 544], "api_doc_exampl": 281, "appendix": 528, "approach": [481, 546], "architectur": [485, 524, 533, 534], "argmax": 6, "argument": 477, "ask": 529, "asymmetr": 488, "attent": 7, "attribut": [211, 281, 490], "auto": [263, 480, 548, 552, 554], "auto_acceler": 443, "auto_mixed_precis": 264, "auto_slim": 171, "autoround": [418, 477], "autotun": [302, 438, 474, 478, 482, 513, 516], "awar": [479, 488, 492, 538, 544, 546], "awq": [419, 477], "backend": [476, 481, 495, 546], "background": [495, 522], "base": [175, 180, 470, 514, 517, 534], "base_algorithm": 392, "base_config": 152, "base_dataload": 200, "base_model": 236, "base_tun": 153, "basic": [181, 265, 554], "bayesian": [266, 554], "benchmark": [151, 154, 483, 505, 520, 538], "benefit": 551, "bert_dataset": 209, "between": [486, 532], "bf16": [35, 36, 37, 307, 308, 309, 474, 539], "bf16_convert": [35, 134, 307], "bias_correct": [128, 380], "binari": 534, "binary_op": 8, "bit_pack": 444, "bitpack": 421, "bleu": 227, "bleu_util": 228, "block": 552, "block_mask": 182, "build": [523, 529, 537], "built": [532, 534, 537], "calcul": 496, "calibr": [2, 3, 283, 496, 521], "callback": 162, "capabl": [495, 549], "case": 483, "chang": [497, 532, 550], "channel": [488, 552], "check": 491, "checklist": 491, "citat": 535, "class": [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 81, 82, 83, 84, 85, 87, 88, 89, 90, 92, 93, 94, 95, 98, 100, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 131, 132, 134, 135, 139, 142, 143, 146, 147, 149, 150, 152, 153, 156, 157, 159, 161, 162, 163, 165, 169, 173, 174, 175, 177, 178, 179, 180, 181, 182, 184, 185, 186, 187, 188, 189, 190, 191, 195, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 221, 223, 224, 225, 227, 230, 234, 236, 238, 239, 240, 241, 242, 243, 244, 245, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 258, 259, 260, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 277, 278, 279, 280, 281, 283, 284, 286, 288, 289, 292, 293, 294, 297, 298, 299, 303, 306, 307, 308, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 345, 346, 347, 348, 349, 350, 352, 353, 354, 355, 357, 358, 359, 360, 363, 364, 365, 366, 367, 368, 369, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 383, 384, 387, 389, 390, 391, 392, 398, 399, 401, 403, 404, 405, 406, 412, 413, 416, 417, 418, 419, 420, 421, 422, 423, 426, 427, 429, 430, 431, 432, 433, 439, 443, 445, 449, 452, 453, 461, 462, 463, 464, 466, 467, 495], "client": [477, 484], "coco_dataset": 210, "coco_filt": 217, "coco_label_map": 229, "coco_tool": 230, "code": [490, 491, 522, 538, 549], "collect_layer_histogram": 453, "comment": 522, "common": [152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 477, 478, 492, 529], "commun": 494, "comparison": 551, "compat": 534, "compress": [162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 506, 549], "compressor": [469, 486, 492, 494, 523, 533, 534, 537, 538, 551, 556], "comput": 534, "concat": 9, "conduct": [490, 491], "config": [195, 299, 303, 422, 439, 507, 515, 518, 542], "configur": [497, 526, 530, 551], "conserv": [267, 554], "constant": [157, 275, 386, 445, 454], "content": [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 81, 82, 83, 84, 85, 87, 88, 89, 90, 92, 93, 94, 95, 98, 100, 101, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 131, 132, 133, 134, 135, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 149, 150, 151, 152, 153, 154, 156, 157, 159, 160, 161, 162, 163, 165, 166, 169, 170, 171, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 194, 195, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 221, 223, 224, 225, 227, 228, 230, 231, 232, 234, 235, 236, 238, 239, 240, 241, 242, 243, 244, 245, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 277, 278, 279, 280, 281, 283, 284, 286, 288, 289, 292, 293, 294, 297, 298, 299, 301, 302, 303, 305, 306, 307, 308, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 345, 346, 347, 348, 349, 350, 352, 353, 354, 355, 357, 358, 359, 360, 363, 364, 365, 366, 367, 368, 369, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 383, 384, 385, 387, 389, 390, 391, 392, 396, 397, 398, 399, 401, 403, 404, 405, 406, 408, 409, 411, 412, 413, 415, 416, 417, 418, 419, 420, 421, 422, 423, 425, 426, 427, 429, 430, 431, 432, 433, 435, 437, 438, 439, 441, 442, 443, 444, 445, 446, 448, 449, 452, 453, 455, 457, 458, 459, 461, 462, 463, 464, 465, 466, 467, 494], "contrib": [196, 197, 198, 199], "contribut": 491, "contributor": [490, 491], "conv": 10, "conv2d": 292, "convent": 522, "convert_add_to_biasadd": [38, 310], "convert_layout": [39, 311], "convert_leakyrelu": [40, 312], "convert_nan_to_random": [41, 313], "convert_placeholder_to_const": [42, 314], "core": [284, 405, 423], "coven": [490, 491], "cpu": [489, 494, 534, 555], "creat": 491, "create_obj_from_config": 455, "criteria": [169, 491, 544, 554], "criterion": 163, "custom": [523, 537, 554], "data": [200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 387, 496, 497, 527], "dataload": [200, 201, 202, 203, 204, 205, 206, 207, 208, 523], "dataset": [209, 210, 211, 212, 213, 214, 215, 216, 532], "decai": 544, "default_dataload": 202, "defin": [497, 526], "demo": [472, 483], "dens": 293, "depend": 489, "deploy": 544, "depthwise_conv2d": 294, "dequantize_cast_optim": [36, 308], "design": [485, 496, 524, 554], "detail": 492, "determin": [480, 552], "devic": [481, 489, 546], "differ": 551, "dilated_contract": [43, 315], "direct_q8": 11, "distil": [163, 164, 165, 166, 525, 538, 555], "distribut": [526, 554], "docker": [494, 534], "document": [469, 494, 503, 556], "driven": [474, 539], "dummy_biasadd": [44, 316], "dummy_dataset": 212, "dummy_dataset_v2": 213, "dump": 483, "dure": [539, 548], "dynam": [471, 488, 492, 546], "each": 552, "effici": 477, "embed_layernorm": 12, "enforc": 490, "engin": [534, 552], "enhanc": 552, "entir": 552, "environ": [446, 534], "evalu": 526, "evaluate_squad": 231, "event": [494, 545], "exampl": [471, 472, 473, 474, 476, 477, 479, 480, 488, 489, 492, 495, 503, 520, 523, 525, 526, 527, 528, 537, 538, 539, 540, 541, 542, 543, 544, 546, 547, 549, 552, 555], "except": [138, 397], "execut": 526, "exhaust": [268, 554], "exit": 554, "expanddims_optim": [45, 317], "export": [434, 435, 456, 457, 458, 459, 528, 549], "f1": 232, "face": 532, "factori": [247, 248, 252, 254, 255, 259], "fake_quant": 98, "fast_bias_correct": 147, "featur": [526, 530, 531, 533, 546], "fetch_weight_from_reshap": [46, 318], "fetcher": 203, "few": 497, "file": 530, "filter": [217, 218, 219], "fix": [475, 480, 552], "flow": [495, 546], "fold_batch_norm": [47, 319], "fold_const": [48, 320], "folder": 522, "fp16": [474, 539], "fp32": [496, 528], "fp8": [472, 494], "fp8_quant": 393, "framework": [475, 494, 496, 523, 528, 530, 534, 540, 541, 547, 549, 552], "free": 544, "freeze_fake_qu": [73, 345], "freeze_valu": [74, 346], "freeze_value_without_calib": 75, "frequent": 529, "from": [494, 496, 534, 538], "full": 545, "function": [1, 3, 21, 29, 30, 31, 52, 53, 55, 89, 90, 101, 103, 133, 134, 135, 140, 141, 144, 145, 146, 151, 152, 153, 154, 160, 161, 163, 165, 166, 169, 170, 171, 173, 175, 176, 180, 183, 188, 189, 190, 191, 192, 194, 201, 202, 209, 211, 218, 224, 225, 227, 228, 230, 231, 232, 234, 235, 239, 243, 245, 257, 261, 262, 266, 274, 278, 280, 281, 292, 293, 294, 297, 298, 299, 301, 302, 303, 305, 324, 325, 327, 385, 387, 390, 391, 396, 398, 404, 406, 408, 409, 411, 412, 413, 415, 417, 418, 420, 425, 427, 431, 433, 435, 437, 438, 439, 441, 442, 443, 444, 446, 448, 449, 455, 457, 458, 459, 462, 463, 465, 466, 526], "fundament": [488, 546, 552], "fuse_biasadd_add": [49, 321], "fuse_column_wise_mul": [50, 322], "fuse_conv_redundant_dequant": [76, 347], "fuse_conv_requant": [77, 348], "fuse_conv_with_math": [51, 323], "fuse_decomposed_bn": [52, 324], "fuse_decomposed_in": [53, 325], "fuse_gelu": [54, 326], "fuse_layer_norm": [55, 327], "fuse_matmul_redundant_dequant": [78, 349], "fuse_matmul_requant": [79, 350], "fuse_pad_with_conv": [56, 328], "fuse_pad_with_fp32_conv": [57, 329], "fuse_qdq_bn": [108, 363], "fuse_qdq_concatv2": [109, 364], "fuse_qdq_conv": [110, 365], "fuse_qdq_deconv": [111, 366], "fuse_qdq_in": [112, 367], "fuse_qdq_matmul": [113, 368], "fuse_qdq_pool": [114, 369], "fuse_reshape_transpos": [58, 330], "fx": 492, "gather": 13, "gaudi": 486, "gavgpool": 14, "gemm": 15, "gener": [38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 483, 496], "get": [471, 472, 473, 474, 476, 477, 479, 484, 492, 494, 495, 520, 523, 525, 526, 530, 531, 537, 539, 541, 542, 543, 544, 546], "gptq": [420, 477], "gpu": [489, 494, 534], "graph": 496, "graph_bas": [71, 343], "graph_convert": [33, 306], "graph_converter_without_calib": 34, "graph_cse_optim": [59, 331], "graph_rewrit": [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359], "graph_transform_bas": [129, 381], "graph_util": [95, 360], "grappler_pass": [60, 332], "guid": 470, "guidelin": 491, "half_precision_convert": 399, "half_precision_rewrit": 406, "hardwar": [474, 534, 539, 555], "hawq_metr": 135, "hawq_v2": [269, 554], "heterogen": 534, "horovodrun": 526, "how": [482, 496, 497], "hpo": [167, 168], "hpu": [494, 534], "hqq": [421, 422, 423, 424, 425, 426, 427, 477], "hyperparamet": 544, "imag": [494, 534], "imagenet_dataset": 214, "imagenet_transform": 221, "implement": [495, 496], "import": 522, "inc": 522, "incompat": [532, 550], "infer": 526, "inform": 535, "infrastructur": 533, "insert_log": [131, 383], "insert_print_nod": [62, 334], "insert_qdq_pattern": [92, 357], "instal": [494, 534], "int4": 497, "int8": [73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 528, 555], "intel": [469, 486, 489, 494, 523, 533, 534, 537, 538, 556], "intel_extension_for_pytorch": [494, 534], "interfac": 522, "intern": 522, "introduct": [471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 483, 484, 488, 489, 492, 495, 496, 497, 520, 521, 523, 525, 526, 528, 530, 533, 537, 539, 540, 541, 542, 543, 544, 546, 547, 549, 551, 552, 553, 554], "invok": 497, "ipex": [476, 488, 546, 555], "issu": [529, 550], "iter": 496, "json": 522, "kera": [288, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 555], "keras_model": 238, "kernel": 497, "kit": 534, "kl_diverg": 461, "knowledg": 555, "known": 550, "languag": [494, 536, 544], "larg": [494, 536, 544], "latenc": 483, "layer": [292, 293, 294, 295, 296, 297, 298, 477, 547, 552], "layer_initi": 296, "layer_wis": [395, 396, 397, 398], "layer_wise_qu": [137, 138, 139, 140, 141], "legal": 535, "licens": 535, "like": [478, 489, 497], "limit": [488, 552], "line": 497, "list": [496, 527, 553], "llm": [494, 536], "load": [396, 477, 478, 494], "load_entri": 441, "load_huggingfac": 462, "logger": [159, 393, 463, 522], "lstm": 17, "lwq": 547, "map": 486, "matmul": [18, 488, 552], "matrix": [474, 475, 477, 478, 481, 483, 492, 495, 520, 521, 523, 525, 526, 528, 530, 531, 533, 537, 539, 540, 541, 542, 543, 544, 546, 547, 549, 552], "maxpool": 19, "merge_duplicated_qdq": [93, 358], "meta_op_optim": [81, 352], "metric": [227, 228, 229, 230, 231, 232, 233, 234, 532, 537], "mha": [177, 184], "microsc": [473, 541], "migrat": 538, "mix": [474, 481, 508, 538, 539, 548], "mix_precis": 235, "mixed_precis": [399, 400, 401], "mme": 534, "mode": [492, 555], "model": [236, 237, 238, 239, 240, 241, 242, 243, 244, 389, 475, 476, 482, 494, 496, 509, 528, 534, 536, 538, 540, 541, 544, 547, 549, 552, 555], "model_slim": [171, 172, 173, 174], "model_wrapp": [142, 390], "modified_pickl": [138, 397], "modul": [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 81, 82, 83, 84, 85, 87, 88, 89, 90, 92, 93, 94, 95, 98, 100, 101, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 131, 132, 133, 134, 135, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 149, 150, 151, 152, 153, 154, 156, 157, 159, 160, 161, 162, 163, 165, 166, 169, 171, 173, 174, 175, 177, 178, 179, 180, 181, 182, 184, 185, 186, 187, 188, 189, 190, 191, 192, 194, 195, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 221, 223, 224, 225, 227, 228, 230, 231, 232, 234, 235, 236, 238, 239, 240, 241, 242, 243, 244, 245, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 277, 278, 279, 280, 281, 283, 284, 286, 288, 289, 292, 293, 294, 297, 298, 299, 301, 302, 303, 305, 306, 307, 308, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 345, 346, 347, 348, 349, 350, 352, 353, 354, 355, 357, 358, 359, 360, 363, 364, 365, 366, 367, 368, 369, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 383, 384, 385, 387, 389, 390, 391, 392, 396, 397, 398, 399, 401, 403, 404, 405, 406, 408, 409, 411, 412, 413, 415, 416, 417, 418, 419, 420, 421, 422, 423, 425, 426, 427, 429, 430, 431, 432, 433, 435, 437, 438, 439, 441, 442, 443, 444, 445, 446, 448, 449, 452, 453, 455, 457, 458, 459, 461, 462, 463, 464, 465, 466, 467], "module_wrapp": 401, "move_squeeze_after_relu": [63, 335], "mse": [271, 554], "mse_v2": [272, 554], "multipl": [534, 542, 555], "mx": 403, "mx_quant": [402, 403, 404], "mxnet": [537, 546, 553], "mxnet_dataload": 204, "mxnet_model": 240, "mxnet_util": [0, 1], "need": 496, "nets_factori": 241, "network": 544, "neural": [469, 486, 492, 494, 523, 533, 534, 537, 538, 544, 551, 556], "neural_compressor": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468], "new": [494, 495, 497, 554], "ninm": 178, "norm": 20, "note": [492, 550], "nxm": 179, "object": [245, 510, 542], "off": 548, "onli": [477, 488, 494, 549], "onnx": [86, 87, 88, 89, 90, 499, 534, 546, 547, 555], "onnx_graph": 87, "onnx_model": 242, "onnx_nod": 88, "onnx_pars": [248, 249], "onnx_schema": 89, "onnxrt": [537, 553], "onnxrt_dataload": 205, "onnxrt_profil": [255, 256, 257], "onnxrtadaptor": 495, "op": [21, 528], "oper": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 496, 497], "optim": [165, 425, 543, 544], "optimize_lay": 103, "optimize_qdq": [116, 371], "option": [464, 526], "orchestr": [538, 543], "other": [494, 534], "our": [490, 552], "overview": [470, 482, 491], "ox_util": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31], "packag": [170, 176, 183, 489], "pad": 22, "paramet": 472, "parser": [247, 248, 249, 250, 251, 252, 253], "pattern": [175, 176, 177, 178, 179, 544], "pattern_analyz": 173, "pattern_detector": 143, "pattern_lock": 185, "per": [488, 552], "perform": 551, "platform": [494, 534, 551], "pledg": 490, "polici": [493, 554], "pool": 23, "pool2d": 297, "post": [481, 492, 538, 546], "post_hostconst_convert": [82, 353], "post_quantized_op_cs": [83, 354], "postprocess": 223, "pre_optim": [64, 336], "precis": [474, 481, 508, 538, 539, 548], "prepar": [489, 496, 551], "prerequisit": 534, "problem": [478, 492], "process": 554, "processor": 534, "profil": [246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261], "progress": 186, "prune": [188, 538, 544, 555], "pruner": [169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194], "pt2e": 476, "pt2e_export": 435, "pt2e_quant": [405, 406, 407, 408, 409], "ptq": 555, "public": [494, 522, 545], "pull": 491, "pure": 526, "pypi": 494, "python": [470, 523, 537], "pytorch": [465, 474, 475, 476, 477, 482, 488, 516, 517, 518, 526, 528, 537, 546, 547, 548, 553, 555], "pytorch_dataload": 206, "qat": [98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 555], "qdq": [91, 92, 93, 94, 108, 109, 110, 111, 112, 113, 114, 115, 116, 356, 357, 358, 359, 363, 364, 365, 366, 367, 368, 369, 370, 371, 555], "qlinear2qdq": 457, "qtensor": 426, "quant": [480, 552], "quantiz": [28, 139, 262, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 427, 437, 438, 439, 440, 441, 442, 450, 471, 472, 473, 475, 476, 477, 478, 479, 481, 484, 488, 492, 494, 496, 497, 511, 513, 514, 515, 516, 517, 518, 521, 525, 528, 534, 536, 538, 539, 541, 546, 547, 548, 549, 552, 555], "quantization_config": 452, "quantize_config": 100, "quantize_graph": [97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378], "quantize_graph_bas": [117, 372], "quantize_graph_bn": [118, 373], "quantize_graph_common": [124, 379], "quantize_graph_concatv2": [119, 374], "quantize_graph_conv": [120, 375], "quantize_graph_for_intel_cpu": [121, 376], "quantize_graph_matmul": [122, 377], "quantize_graph_pool": [123, 378], "quantize_help": 101, "quantize_lay": [102, 103, 104, 105, 106], "quantize_layer_add": 104, "quantize_layer_bas": 105, "quantize_layer_bn": 106, "quantize_wrapp": 107, "queri": 495, "query_fw_cap": 496, "question": 529, "quick": 531, "random": [273, 554], "rang": 496, "recip": [536, 546], "recommend": 522, "reduc": 24, "refer": [473, 477, 488, 521, 522, 541, 544, 546, 549, 552], "reg": 189, "regular": 544, "releas": [527, 550], "remove_training_nod": [65, 337], "rename_batch_norm": [66, 338], "report": 493, "request": [474, 491, 539], "requir": 534, "rerange_quantized_concat": [132, 384], "resiz": 25, "respons": 490, "result": 251, "retrain": 544, "retrain_fre": 187, "rnn_convert": 84, "rtn": [430, 477], "rule": [475, 476, 477, 479, 522, 546], "run": 496, "runtim": [499, 534, 546, 547, 555], "sa_optim": 168, "sampl": [476, 496, 531], "sampler": 207, "save": 477, "save_load": [160, 408, 411, 415, 431], "scale_propag": [85, 355], "scaler": 286, "schedul": [190, 544], "scheme": [481, 488, 546], "scope": [490, 544], "section": [469, 556], "secur": [493, 526], "select": 494, "separable_conv2d": 298, "set": 522, "share_qdq_y_pattern": [94, 359], "shot": 543, "side": 477, "sigopt": [198, 551, 554], "singl": 542, "smooth": [475, 480, 481, 488, 552], "smooth_quant": [29, 149, 410, 411, 412, 413], "smooth_quant_calibr": 125, "smooth_quant_scal": 126, "smoother": [283, 284, 285, 286], "smoothquant": 552, "softwar": [474, 486, 534, 539], "sourc": 534, "space": 554, "spars": 544, "sparsiti": 544, "specif": 497, "specifi": [475, 476, 477, 479, 546], "split": 26, "split_shared_input": [67, 339], "stack": 486, "standard": 490, "start": [471, 472, 473, 474, 476, 477, 479, 484, 492, 494, 495, 520, 523, 525, 526, 530, 531, 537, 539, 541, 542, 543, 544, 546], "static": [476, 481, 488, 492, 546], "static_qu": [287, 288, 289, 414, 415, 416, 417], "statu": 491, "step": 491, "strategi": [197, 198, 199, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 512, 551, 554], "string": 522, "strip_equivalent_nod": [68, 340], "strip_unused_nod": [69, 341], "structur": 522, "style_transfer_dataset": 216, "submodul": [0, 4, 16, 37, 61, 72, 80, 86, 91, 96, 97, 99, 102, 115, 130, 136, 137, 148, 155, 158, 164, 167, 170, 172, 176, 183, 193, 197, 215, 219, 222, 226, 233, 237, 270, 276, 285, 287, 295, 300, 304, 309, 333, 344, 351, 356, 361, 362, 370, 382, 388, 394, 395, 400, 402, 407, 410, 414, 424, 428, 434, 440, 447, 451, 456, 460], "subpackag": [4, 72, 96, 97, 99, 136, 155, 170, 196, 220, 226, 270, 282, 290, 291, 304, 344, 361, 362, 394, 428, 436, 460], "summari": [483, 497], "support": [472, 474, 475, 477, 478, 481, 483, 489, 491, 492, 495, 497, 520, 521, 523, 525, 526, 528, 530, 533, 534, 537, 539, 540, 541, 542, 543, 544, 546, 547, 549, 552, 553], "switch_optim": [70, 342], "symbolic_trac": 144, "symmetr": 488, "system": 534, "templat": [281, 491], "tensor": [488, 552], "tensorflow": [32, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 479, 481, 482, 488, 494, 513, 514, 515, 526, 528, 534, 537, 546, 548, 553, 555], "tensorflow_dataload": 208, "tensorflow_model": 243, "tensorflow_pars": [252, 253], "tensorflow_profil": [259, 260, 261], "teq": [432, 477], "tf2onnx": 458, "tf2onnx_convert": 127, "tf2onnx_util": 90, "tf_criteria": 191, "tf_util": [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133], "through": [480, 534, 552, 555], "throughput": 483, "todo": 522, "token": 224, "topic": 470, "torch": [392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 478, 494, 500, 534, 555], "torch2onnx": 459, "torch_load": 140, "torch_model": 244, "torch_util": [134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145], "tpc": 534, "tpe": [199, 554], "trademark": 535, "train": [449, 481, 488, 492, 519, 526, 538, 544, 546], "transform": [221, 222, 223, 224, 225, 450, 451, 452, 489, 532, 553], "transform_graph": [128, 129, 130, 131, 132, 380, 381, 382, 383, 384], "tune": [479, 480, 488, 497, 546, 549, 552, 554], "tune_cfg": 496, "tuning_param": 156, "tuning_sampl": 277, "tuning_spac": 278, "tuning_struct": 279, "turn": 548, "two": 534, "type": [497, 522, 544], "unary_op": 27, "us": [480, 483, 494, 497, 523, 534, 537, 552], "usag": [472, 475, 476, 477, 480, 483, 489, 552, 554], "user": [470, 526, 532, 549], "util": [1, 30, 133, 141, 145, 157, 158, 159, 160, 161, 166, 192, 194, 257, 261, 275, 276, 277, 278, 279, 280, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 393, 398, 404, 409, 413, 417, 433, 443, 444, 445, 446, 447, 448, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 500], "v": 522, "v1": 532, "valid": [475, 534, 552, 555], "vendor": 534, "version": [468, 486], "vulner": 493, "wanda": [193, 194], "weight": [477, 488, 494, 549], "weight_correct": 150, "weight_onli": [31, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433], "weight_slim": 174, "weights_detail": 467, "what": 494, "wise": [477, 547], "without": 479, "woq": 549, "work": [482, 495, 546], "workflow": [485, 524], "x": [470, 538], "xe": 534, "yaml": [496, 526, 530]}}) \ No newline at end of file +Search.setIndex({"alltitles": {"2.0 API": [[501, "api"]], "2.X API User Guide": [[470, "x-api-user-guide"]], "2018 - 2020 (4)": [[545, "id5"]], "2021 (15)": [[545, "id4"]], "2022 (35)": [[545, "id3"]], "2023 (25)": [[545, "id2"]], "2024 (6)": [[545, "id1"]], "3.0 API": [[502, "api"]], "API Document Example": [[503, "api-document-example"]], "API List that Need to Implement": [[496, "api-list-that-need-to-implement"]], "API for TensorFlow": [[481, "api-for-tensorflow"]], "APIs": [[504, "apis"]], "AWQ": [[477, "awq"]], "Accuracy Aware Tuning": [[488, "accuracy-aware-tuning"], [546, "accuracy-aware-tuning"]], "Accuracy Criteria": [[554, "accuracy-criteria"]], "Accuracy-driven mixed precision": [[474, "accuracy-driven-mixed-precision"], [539, "accuracy-driven-mixed-precision"]], "Adaptor": [[495, "adaptor"], [498, "adaptor"]], "Adaptor Support Matrix": [[495, "adaptor-support-matrix"]], "Add quantize API according to tune_cfg": [[496, "add-quantize-api-according-to-tune-cfg"]], "Add query_fw_capability to Adaptor": [[496, "add-query-fw-capability-to-adaptor"]], "Additional Content": [[494, "additional-content"]], "Advanced Topics": [[470, "advanced-topics"]], "Algorithm: Auto-tuning of $\\alpha$.": [[552, "algorithm-auto-tuning-of-alpha"]], "Appendix": [[528, "appendix"]], "Architecture": [[485, "architecture"], [524, "architecture"], [533, "architecture"]], "Attributes": [[211, "attributes"], [281, "attributes"]], "Attribution": [[490, "attribution"]], "Auto": [[554, "auto"]], "Auto-tune the alpha for each layer/block": [[552, "auto-tune-the-alpha-for-each-layer-block"]], "Auto-tune the alpha for the entire model": [[552, "auto-tune-the-alpha-for-the-entire-model"]], "AutoRound": [[477, "autoround"]], "AutoTune": [[482, "autotune"]], "Autotune API": [[478, "autotune-api"]], "Backend and Device": [[481, "backend-and-device"]], "Background": [[495, "background"], [522, "background"]], "Basic": [[554, "basic"]], "Bayesian": [[554, "bayesian"]], "Benchmark": [[483, "benchmark"], [505, "benchmark"], [538, "benchmark"]], "Benchmark Support Matrix": [[520, "benchmark-support-matrix"]], "Benchmarking": [[520, "benchmarking"]], "Benefit of SigOpt Strategy": [[551, "benefit-of-sigopt-strategy"]], "Build Custom Dataloader with Python API": [[523, "build-custom-dataloader-with-python-api"]], "Build Custom Metric with Python API": [[537, "build-custom-metric-with-python-api"]], "Built-in transform/dataset/metric APIs": [[532, "built-in-transform-dataset-metric-apis"]], "Calculate the data range and generate quantized model": [[496, "calculate-the-data-range-and-generate-quantized-model"]], "Calibration Algorithms": [[521, "calibration-algorithms"]], "Calibration Algorithms in Quantization": [[521, "calibration-algorithms-in-quantization"]], "Capability": [[495, "capability"]], "Citation": [[535, "citation"]], "Classes": [[1, "classes"], [2, "classes"], [3, "classes"], [5, "classes"], [6, "classes"], [7, "classes"], [8, "classes"], [9, "classes"], [10, "classes"], [11, "classes"], [12, "classes"], [13, "classes"], [14, "classes"], [15, "classes"], [17, "classes"], [18, "classes"], [19, "classes"], [20, "classes"], [21, "classes"], [22, "classes"], [23, "classes"], [24, "classes"], [25, "classes"], [26, "classes"], [27, "classes"], [28, "classes"], [29, "classes"], [30, "classes"], [32, "classes"], [33, "classes"], [34, "classes"], [35, "classes"], [36, "classes"], [38, "classes"], [39, "classes"], [40, "classes"], [41, "classes"], [42, "classes"], [43, "classes"], [44, "classes"], [45, "classes"], [46, "classes"], [47, "classes"], [48, "classes"], [49, "classes"], [50, "classes"], [51, "classes"], [52, "classes"], [53, "classes"], [54, "classes"], [55, "classes"], [56, "classes"], [57, "classes"], [58, "classes"], [59, "classes"], [60, "classes"], [62, "classes"], [63, "classes"], [64, "classes"], [65, "classes"], [66, "classes"], [67, "classes"], [68, "classes"], [69, "classes"], [70, "classes"], [71, "classes"], [73, "classes"], [74, "classes"], [75, "classes"], [76, "classes"], [77, "classes"], [78, "classes"], [79, "classes"], [81, "classes"], [82, "classes"], [83, "classes"], [84, "classes"], [85, "classes"], [87, "classes"], [88, "classes"], [89, "classes"], [90, "classes"], [92, "classes"], [93, "classes"], [94, "classes"], [95, "classes"], [98, "classes"], [100, "classes"], [104, "classes"], [105, "classes"], [106, "classes"], [107, "classes"], [108, "classes"], [109, "classes"], [110, "classes"], [111, "classes"], [112, "classes"], [113, "classes"], [114, "classes"], [116, "classes"], [117, "classes"], [118, "classes"], [119, "classes"], [120, "classes"], [121, "classes"], [122, "classes"], [123, "classes"], [124, "classes"], [125, "classes"], [126, "classes"], [127, "classes"], [128, "classes"], [129, "classes"], [131, "classes"], [132, "classes"], [134, "classes"], [135, "classes"], [139, "classes"], [142, "classes"], [143, "classes"], [146, "classes"], [147, "classes"], [149, "classes"], [150, "classes"], [152, "classes"], [153, "classes"], [156, "classes"], [157, "classes"], [159, "classes"], [161, "classes"], [162, "classes"], [163, "classes"], [165, "classes"], [169, "classes"], [173, "classes"], [174, "classes"], [175, "classes"], [177, "classes"], [178, "classes"], [179, "classes"], [180, "classes"], [181, "classes"], [182, "classes"], [184, "classes"], [185, "classes"], [186, "classes"], [187, "classes"], [188, "classes"], [189, "classes"], [190, "classes"], [191, "classes"], [195, "classes"], [198, "classes"], [199, "classes"], [200, "classes"], [201, "classes"], [202, "classes"], [203, "classes"], [204, "classes"], [205, "classes"], [206, "classes"], [207, "classes"], [208, "classes"], [209, "classes"], [210, "classes"], [211, "classes"], [212, "classes"], [213, "classes"], [214, "classes"], [216, "classes"], [217, "classes"], [218, "classes"], [221, "classes"], [223, "classes"], [224, "classes"], [225, "classes"], [227, "classes"], [230, "classes"], [234, "classes"], [236, "classes"], [238, "classes"], [239, "classes"], [240, "classes"], [241, "classes"], [242, "classes"], [243, "classes"], [244, "classes"], [245, "classes"], [247, "classes"], [248, "classes"], [249, "classes"], [250, "classes"], [251, "classes"], [252, "classes"], [253, "classes"], [254, "classes"], [255, "classes"], [256, "classes"], [258, "classes"], [259, "classes"], [260, "classes"], [263, "classes"], [264, "classes"], [265, "classes"], [266, "classes"], [267, "classes"], [268, "classes"], [269, "classes"], [271, "classes"], [272, "classes"], [273, "classes"], [274, "classes"], [277, "classes"], [278, "classes"], [279, "classes"], [280, "classes"], [281, "classes"], [283, "classes"], [284, "classes"], [286, "classes"], [288, "classes"], [289, "classes"], [292, "classes"], [293, "classes"], [294, "classes"], [297, "classes"], [298, "classes"], [299, "classes"], [303, "classes"], [306, "classes"], [307, "classes"], [308, "classes"], [310, "classes"], [311, "classes"], [312, "classes"], [313, "classes"], [314, "classes"], [315, "classes"], [316, "classes"], [317, "classes"], [318, "classes"], [319, "classes"], [320, "classes"], [321, "classes"], [322, "classes"], [323, "classes"], [324, "classes"], [325, "classes"], [326, "classes"], [327, "classes"], [328, "classes"], [329, "classes"], [330, "classes"], [331, "classes"], [332, "classes"], [334, "classes"], [335, "classes"], [336, "classes"], [337, "classes"], [338, "classes"], [339, "classes"], [340, "classes"], [341, "classes"], [342, "classes"], [343, "classes"], [345, "classes"], [346, "classes"], [347, "classes"], [348, "classes"], [349, "classes"], [350, "classes"], [352, "classes"], [353, "classes"], [354, "classes"], [355, "classes"], [357, "classes"], [358, "classes"], [359, "classes"], [360, "classes"], [363, "classes"], [364, "classes"], [365, "classes"], [366, "classes"], [367, "classes"], [368, "classes"], [369, "classes"], [371, "classes"], [372, "classes"], [373, "classes"], [374, "classes"], [375, "classes"], [376, "classes"], [377, "classes"], [378, "classes"], [379, "classes"], [380, "classes"], [381, "classes"], [383, "classes"], [384, "classes"], [387, "classes"], [389, "classes"], [390, "classes"], [391, "classes"], [392, "classes"], [398, "classes"], [399, "classes"], [401, "classes"], [403, "classes"], [404, "classes"], [405, "classes"], [406, "classes"], [412, "classes"], [413, "classes"], [416, "classes"], [417, "classes"], [418, "classes"], [419, "classes"], [420, "classes"], [421, "classes"], [422, "classes"], [423, "classes"], [426, "classes"], [427, "classes"], [429, "classes"], [430, "classes"], [431, "classes"], [432, "classes"], [433, "classes"], [439, "classes"], [443, "classes"], [445, "classes"], [449, "classes"], [452, "classes"], [453, "classes"], [461, "classes"], [462, "classes"], [463, "classes"], [464, "classes"], [466, "classes"], [467, "classes"]], "Code Migration from Intel Neural Compressor 1.X to Intel Neural Compressor 2.X": [[538, "code-migration-from-intel-neural-compressor-1-x-to-intel-neural-compressor-2-x"]], "Comments": [[522, "comments"]], "Common Build Issues": [[529, "common-build-issues"]], "Common Problem": [[492, "common-problem"]], "Common Problems": [[478, "common-problems"]], "Common arguments": [[477, "common-arguments"]], "Communication": [[494, "communication"]], "Compression": [[506, "compression"]], "Config": [[507, "config"]], "Config Multiple Objectives": [[542, "config-multiple-objectives"]], "Config Single Objective": [[542, "config-single-objective"]], "Conservative Tuning": [[554, "conservative-tuning"]], "Contribution Guidelines": [[491, "contribution-guidelines"]], "Contributor Covenant Code of Conduct": [[490, "contributor-covenant-code-of-conduct"], [491, "contributor-covenant-code-of-conduct"]], "Create Pull Request": [[491, "create-pull-request"]], "Customize a New Tuning Strategy": [[554, "customize-a-new-tuning-strategy"]], "DataLoader": [[523, "dataloader"]], "Define the Quantization Ability of the Specific Operator": [[497, "define-the-quantization-ability-of-the-specific-operator"]], "Demo Usage": [[472, "demo-usage"]], "Demo usage": [[483, "demo-usage"]], "Design": [[485, "design"], [524, "design"], [554, "design"], [554, "id1"], [554, "id3"], [554, "id5"], [554, "id7"], [554, "id9"], [554, "id11"], [554, "id13"], [554, "id15"], [554, "id17"], [554, "id19"], [554, "id21"]], "Design the framework YAML": [[496, "design-the-framework-yaml"]], "Details": [[492, "details"]], "Determining the alpha through auto-tuning": [[480, "determining-the-alpha-through-auto-tuning"], [552, "determining-the-alpha-through-auto-tuning"]], "Distillation": [[538, "distillation"]], "Distillation for Quantization": [[525, "distillation-for-quantization"]], "Distillation for Quantization Support Matrix": [[525, "distillation-for-quantization-support-matrix"]], "Distributed Training and Inference (Evaluation)": [[526, "distributed-training-and-inference-evaluation"]], "Distributed Tuning": [[554, "distributed-tuning"]], "Documentation": [[494, "documentation"]], "Dump Throughput and Latency Summary": [[483, "dump-throughput-and-latency-summary"]], "During quantization mixed precision": [[539, "during-quantization-mixed-precision"]], "Dynamic Quantization": [[471, "dynamic-quantization"], [488, "dynamic-quantization"], [492, "dynamic-quantization"]], "Efficient Usage on Client-Side": [[477, "efficient-usage-on-client-side"]], "Enforcement": [[490, "enforcement"]], "Engineering": [[552, "engineering"]], "Example": [[537, "example"], [542, "example"]], "Example List": [[527, "example-list"]], "Example of Adding a New Backend Support": [[495, "example-of-adding-a-new-backend-support"]], "Examples": [[471, "examples"], [472, "examples"], [473, "examples"], [474, "examples"], [477, "examples"], [479, "examples"], [480, "examples"], [489, "examples"], [492, "examples"], [520, "examples"], [523, "examples"], [525, "examples"], [526, "examples"], [527, "examples"], [528, "examples"], [538, "examples"], [539, "examples"], [540, "examples"], [541, "examples"], [543, "examples"], [544, "examples"], [546, "examples"], [547, "examples"], [549, "examples"]], "Exceptions": [[138, "exceptions"], [397, "exceptions"]], "Exhaustive": [[554, "exhaustive"]], "Exit Policy": [[554, "exit-policy"]], "Export": [[528, "export"]], "Export Compressed Model": [[549, "export-compressed-model"]], "FP32 Model Export": [[528, "fp32-model-export"], [528, "id1"]], "FP8 Quantization": [[472, "fp8-quantization"], [494, "fp8-quantization"]], "FX": [[492, "fx"]], "FX Mode Support Matrix in Neural Compressor": [[492, "fx-mode-support-matrix-in-neural-compressor"]], "Feature Matrix": [[531, "feature-matrix"]], "Fixed Alpha": [[475, "fixed-alpha"]], "Folder structure": [[522, "folder-structure"]], "Framework YAML Configuration Files": [[530, "framework-yaml-configuration-files"]], "Frequently Asked Questions": [[529, "frequently-asked-questions"]], "Full Publications/Events (85)": [[545, "full-publications-events-85"]], "Functions": [[1, "functions"], [3, "functions"], [21, "functions"], [29, "functions"], [30, "functions"], [31, "functions"], [52, "functions"], [53, "functions"], [55, "functions"], [89, "functions"], [90, "functions"], [101, "functions"], [103, "functions"], [133, "functions"], [134, "functions"], [135, "functions"], [140, "functions"], [141, "functions"], [144, "functions"], [145, "functions"], [146, "functions"], [151, "functions"], [152, "functions"], [153, "functions"], [154, "functions"], [160, "functions"], [161, "functions"], [163, "functions"], [165, "functions"], [166, "functions"], [169, "functions"], [170, "functions"], [171, "functions"], [173, "functions"], [175, "functions"], [176, "functions"], [180, "functions"], [183, "functions"], [188, "functions"], [189, "functions"], [190, "functions"], [191, "functions"], [192, "functions"], [194, "functions"], [201, "functions"], [202, "functions"], [209, "functions"], [211, "functions"], [218, "functions"], [224, "functions"], [225, "functions"], [227, "functions"], [228, "functions"], [230, "functions"], [231, "functions"], [232, "functions"], [234, "functions"], [235, "functions"], [239, "functions"], [243, "functions"], [245, "functions"], [257, "functions"], [261, "functions"], [262, "functions"], [266, "functions"], [274, "functions"], [278, "functions"], [280, "functions"], [281, "functions"], [292, "functions"], [293, "functions"], [294, "functions"], [297, "functions"], [298, "functions"], [299, "functions"], [301, "functions"], [302, "functions"], [303, "functions"], [305, "functions"], [324, "functions"], [325, "functions"], [327, "functions"], [385, "functions"], [387, "functions"], [390, "functions"], [391, "functions"], [396, "functions"], [398, "functions"], [404, "functions"], [406, "functions"], [408, "functions"], [409, "functions"], [411, "functions"], [412, "functions"], [413, "functions"], [415, "functions"], [417, "functions"], [418, "functions"], [420, "functions"], [425, "functions"], [427, "functions"], [431, "functions"], [433, "functions"], [435, "functions"], [437, "functions"], [438, "functions"], [439, "functions"], [441, "functions"], [442, "functions"], [443, "functions"], [444, "functions"], [446, "functions"], [448, "functions"], [449, "functions"], [455, "functions"], [457, "functions"], [458, "functions"], [459, "functions"], [462, "functions"], [463, "functions"], [465, "functions"], [466, "functions"]], "GPTQ": [[477, "gptq"]], "General Use Cases": [[483, "general-use-cases"]], "Get Start with FP8 Quantization": [[472, "get-start-with-fp8-quantization"]], "Get Started": [[476, "get-started"], [477, "get-started"], [479, "get-started"], [484, "get-started"], [492, "get-started"], [546, "get-started"]], "Get Started with Adaptor API": [[495, "get-started-with-adaptor-api"]], "Get Started with Benchmark API": [[520, "get-started-with-benchmark-api"]], "Get Started with DataLoader": [[523, "get-started-with-dataloader"]], "Get Started with Distillation for Quantization API": [[525, "get-started-with-distillation-for-quantization-api"]], "Get Started with Distributed Training and Inference API": [[526, "get-started-with-distributed-training-and-inference-api"]], "Get Started with Metric": [[537, "get-started-with-metric"]], "Get Started with Microscaling Quantization API": [[473, "get-started-with-microscaling-quantization-api"], [541, "get-started-with-microscaling-quantization-api"]], "Get Started with Mixed Precision API": [[539, "get-started-with-mixed-precision-api"]], "Get Started with Objective API": [[542, "get-started-with-objective-api"]], "Get Started with Orchestration API": [[543, "get-started-with-orchestration-api"]], "Get Started with Pruning API": [[544, "get-started-with-pruning-api"]], "Get Started with autotune API": [[474, "get-started-with-autotune-api"]], "Get started with Framework YAML Files": [[530, "get-started-with-framework-yaml-files"]], "Getting Started": [[494, "getting-started"], [531, "getting-started"]], "Getting Started with Dynamic Quantization": [[471, "getting-started-with-dynamic-quantization"]], "HAWQ_V2": [[554, "hawq-v2"]], "HQQ": [[477, "hqq"]], "Hardware and Software requests for BF16": [[474, "hardware-and-software-requests-for-bf16"], [539, "hardware-and-software-requests-for-bf16"]], "Hardware and Software requests for FP16": [[474, "hardware-and-software-requests-for-fp16"], [539, "hardware-and-software-requests-for-fp16"]], "Horovodrun Execution": [[526, "horovodrun-execution"]], "How it Works": [[482, "how-it-works"]], "How to Add An Adaptor": [[496, "how-to-add-an-adaptor"]], "How to Support New Data Type, Like Int4, with a Few Line Changes": [[497, "how-to-support-new-data-type-like-int4-with-a-few-line-changes"]], "INC Coding Conventions": [[522, "inc-coding-conventions"]], "INT8 Model Export": [[528, "int8-model-export"], [528, "id2"]], "Implement ONNXRTAdaptor Class": [[495, "implement-onnxrtadaptor-class"]], "Imports": [[522, "imports"]], "Incompatible Changes": [[550, "incompatible-changes"]], "Incompatible changes between v1.2 and v1.1": [[532, "incompatible-changes-between-v1-2-and-v1-1"]], "Infrastructure of Intel\u00ae Neural Compressor": [[533, "infrastructure-of-intel-neural-compressor"]], "Install Framework": [[494, "install-framework"], [534, "install-framework"]], "Install from AI Kit": [[534, "install-from-ai-kit"]], "Install from Binary": [[534, "install-from-binary"]], "Install from Source": [[534, "install-from-source"]], "Install from pypi": [[494, "install-from-pypi"]], "Install tensorflow": [[494, "install-tensorflow"], [534, "install-tensorflow"]], "Install torch for CPU": [[494, "install-torch-for-cpu"], [534, "install-torch-for-cpu"]], "Install torch for other platform": [[494, "install-torch-for-other-platform"], [534, "install-torch-for-other-platform"]], "Install torch/intel_extension_for_pytorch for Intel GPU": [[494, "install-torch-intel-extension-for-pytorch-for-intel-gpu"], [534, "install-torch-intel-extension-for-pytorch-for-intel-gpu"]], "Installation": [[494, "installation"], [534, "installation"], [534, "id1"]], "Intel\u00ae Neural Compressor": [[494, "intel-neural-compressor"]], "Intel\u00ae Neural Compressor Documentation": [[469, "intel-neural-compressor-documentation"], [556, "intel-neural-compressor-documentation"]], "Intel\u00ae Neural Compressor quantized ONNX models support multiple hardware vendors through ONNX Runtime:": [[534, "intel-neural-compressor-quantized-onnx-models-support-multiple-hardware-vendors-through-onnx-runtime"]], "Intel\u00ae Neural Compressor supports CPUs based on Intel 64 architecture or compatible processors:": [[534, "intel-neural-compressor-supports-cpus-based-on-intel-64-architecture-or-compatible-processors"]], "Intel\u00ae Neural Compressor supports GPUs built on Intel\u2019s Xe architecture:": [[534, "intel-neural-compressor-supports-gpus-built-on-intel-s-xe-architecture"]], "Intel\u00ae Neural Compressor supports HPUs based on heterogeneous architecture with two compute engines (MME and TPC):": [[534, "intel-neural-compressor-supports-hpus-based-on-heterogeneous-architecture-with-two-compute-engines-mme-and-tpc"]], "Introduction": [[471, "introduction"], [472, "introduction"], [473, "introduction"], [474, "introduction"], [475, "introduction"], [476, "introduction"], [477, "introduction"], [478, "introduction"], [479, "introduction"], [480, "introduction"], [481, "introduction"], [483, "introduction"], [484, "introduction"], [488, "introduction"], [489, "introduction"], [492, "introduction"], [495, "introduction"], [496, "introduction"], [497, "introduction"], [520, "introduction"], [521, "introduction"], [523, "introduction"], [525, "introduction"], [526, "introduction"], [528, "introduction"], [530, "introduction"], [533, "introduction"], [537, "introduction"], [539, "introduction"], [540, "introduction"], [541, "introduction"], [542, "introduction"], [543, "introduction"], [544, "introduction"], [547, "introduction"], [549, "introduction"], [551, "introduction"], [552, "introduction"], [553, "introduction"], [554, "introduction"]], "Invoke the Operator Kernel According to the Tuning Configuration": [[497, "invoke-the-operator-kernel-according-to-the-tuning-configuration"]], "Issue 1:": [[529, "issue-1"]], "Issue 2:": [[529, "issue-2"]], "Issue 3:": [[529, "issue-3"]], "Issue 4:": [[529, "issue-4"]], "Issue 5:": [[529, "issue-5"]], "Keras Models with keras 2.15.1": [[555, "keras-models-with-keras-2-15-1"]], "Known Issues": [[550, "known-issues"]], "LLMs Quantization Recipes": [[536, "llms-quantization-recipes"]], "Large Language Model Pruning": [[544, "large-language-model-pruning"]], "Large Language Models Accuracy": [[536, "large-language-models-accuracy"]], "Large Language Models Recipes": [[536, "large-language-models-recipes"]], "Layer Wise Quantization": [[477, "layer-wise-quantization"]], "Layer Wise Quantization (LWQ)": [[547, "layer-wise-quantization-lwq"]], "Legal Information": [[535, "legal-information"]], "License": [[535, "license"]], "Load API": [[478, "load-api"]], "Logger": [[522, "logger"]], "MSE": [[554, "mse"]], "MSE_V2": [[554, "mse-v2"]], "MXNet": [[537, "mxnet"], [553, "mxnet"]], "Matmul quantization example": [[488, "matmul-quantization-example"], [552, "matmul-quantization-example"]], "Metrics": [[537, "metrics"]], "Microscaling Quantization": [[473, "microscaling-quantization"], [541, "microscaling-quantization"]], "Mix Precision": [[508, "mix-precision"], [538, "mix-precision"]], "Mixed Precision": [[481, "mixed-precision"], [539, "mixed-precision"]], "Mixed Precision Support Matrix": [[474, "mixed-precision-support-matrix"], [539, "mixed-precision-support-matrix"]], "Model": [[509, "model"], [540, "model"]], "Model Examples": [[476, "model-examples"]], "Model Examples with PT2E": [[476, "model-examples-with-pt2e"]], "Model Quantization": [[538, "model-quantization"]], "Module Contents": [[1, "module-contents"], [2, "module-contents"], [3, "module-contents"], [5, "module-contents"], [6, "module-contents"], [7, "module-contents"], [8, "module-contents"], [9, "module-contents"], [10, "module-contents"], [11, "module-contents"], [12, "module-contents"], [13, "module-contents"], [14, "module-contents"], [15, "module-contents"], [17, "module-contents"], [18, "module-contents"], [19, "module-contents"], [20, "module-contents"], [21, "module-contents"], [22, "module-contents"], [23, "module-contents"], [24, "module-contents"], [25, "module-contents"], [26, "module-contents"], [27, "module-contents"], [28, "module-contents"], [29, "module-contents"], [30, "module-contents"], [31, "module-contents"], [32, "module-contents"], [33, "module-contents"], [34, "module-contents"], [35, "module-contents"], [36, "module-contents"], [38, "module-contents"], [39, "module-contents"], [40, "module-contents"], [41, "module-contents"], [42, "module-contents"], [43, "module-contents"], [44, "module-contents"], [45, "module-contents"], [46, "module-contents"], [47, "module-contents"], [48, "module-contents"], [49, "module-contents"], [50, "module-contents"], [51, "module-contents"], [52, "module-contents"], [53, "module-contents"], [54, "module-contents"], [55, "module-contents"], [56, "module-contents"], [57, "module-contents"], [58, "module-contents"], [59, "module-contents"], [60, "module-contents"], [62, "module-contents"], [63, "module-contents"], [64, "module-contents"], [65, "module-contents"], [66, "module-contents"], [67, "module-contents"], [68, "module-contents"], [69, "module-contents"], [70, "module-contents"], [71, "module-contents"], [73, "module-contents"], [74, "module-contents"], [75, "module-contents"], [76, "module-contents"], [77, "module-contents"], [78, "module-contents"], [79, "module-contents"], [81, "module-contents"], [82, "module-contents"], [83, "module-contents"], [84, "module-contents"], [85, "module-contents"], [87, "module-contents"], [88, "module-contents"], [89, "module-contents"], [90, "module-contents"], [92, "module-contents"], [93, "module-contents"], [94, "module-contents"], [95, "module-contents"], [98, "module-contents"], [100, "module-contents"], [101, "module-contents"], [103, "module-contents"], [104, "module-contents"], [105, "module-contents"], [106, "module-contents"], [107, "module-contents"], [108, "module-contents"], [109, "module-contents"], [110, "module-contents"], [111, "module-contents"], [112, "module-contents"], [113, "module-contents"], [114, "module-contents"], [116, "module-contents"], [117, "module-contents"], [118, "module-contents"], [119, "module-contents"], [120, "module-contents"], [121, "module-contents"], [122, "module-contents"], [123, "module-contents"], [124, "module-contents"], [125, "module-contents"], [126, "module-contents"], [127, "module-contents"], [128, "module-contents"], [129, "module-contents"], [131, "module-contents"], [132, "module-contents"], [133, "module-contents"], [134, "module-contents"], [135, "module-contents"], [138, "module-contents"], [139, "module-contents"], [140, "module-contents"], [141, "module-contents"], [142, "module-contents"], [143, "module-contents"], [144, "module-contents"], [145, "module-contents"], [146, "module-contents"], [147, "module-contents"], [149, "module-contents"], [150, "module-contents"], [151, "module-contents"], [152, "module-contents"], [153, "module-contents"], [154, "module-contents"], [156, "module-contents"], [157, "module-contents"], [159, "module-contents"], [160, "module-contents"], [161, "module-contents"], [162, "module-contents"], [163, "module-contents"], [165, "module-contents"], [166, "module-contents"], [169, "module-contents"], [171, "module-contents"], [173, "module-contents"], [174, "module-contents"], [175, "module-contents"], [177, "module-contents"], [178, "module-contents"], [179, "module-contents"], [180, "module-contents"], [181, "module-contents"], [182, "module-contents"], [184, "module-contents"], [185, "module-contents"], [186, "module-contents"], [187, "module-contents"], [188, "module-contents"], [189, "module-contents"], [190, "module-contents"], [191, "module-contents"], [192, "module-contents"], [194, "module-contents"], [195, "module-contents"], [198, "module-contents"], [199, "module-contents"], [200, "module-contents"], [201, "module-contents"], [202, "module-contents"], [203, "module-contents"], [204, "module-contents"], [205, "module-contents"], [206, "module-contents"], [207, "module-contents"], [208, "module-contents"], [209, "module-contents"], [210, "module-contents"], [211, "module-contents"], [212, "module-contents"], [213, "module-contents"], [214, "module-contents"], [216, "module-contents"], [217, "module-contents"], [218, "module-contents"], [221, "module-contents"], [223, "module-contents"], [224, "module-contents"], [225, "module-contents"], [227, "module-contents"], [228, "module-contents"], [230, "module-contents"], [231, "module-contents"], [232, "module-contents"], [234, "module-contents"], [235, "module-contents"], [236, "module-contents"], [238, "module-contents"], [239, "module-contents"], [240, "module-contents"], [241, "module-contents"], [242, "module-contents"], [243, "module-contents"], [244, "module-contents"], [245, "module-contents"], [247, "module-contents"], [248, "module-contents"], [249, "module-contents"], [250, "module-contents"], [251, "module-contents"], [252, "module-contents"], [253, "module-contents"], [254, "module-contents"], [255, "module-contents"], [256, "module-contents"], [257, "module-contents"], [258, "module-contents"], [259, "module-contents"], [260, "module-contents"], [261, "module-contents"], [262, "module-contents"], [263, "module-contents"], [264, "module-contents"], [265, "module-contents"], [266, "module-contents"], [267, "module-contents"], [268, "module-contents"], [269, "module-contents"], [271, "module-contents"], [272, "module-contents"], [273, "module-contents"], [274, "module-contents"], [277, "module-contents"], [278, "module-contents"], [279, "module-contents"], [280, "module-contents"], [281, "module-contents"], [283, "module-contents"], [284, "module-contents"], [286, "module-contents"], [288, "module-contents"], [289, "module-contents"], [292, "module-contents"], [293, "module-contents"], [294, "module-contents"], [297, "module-contents"], [298, "module-contents"], [299, "module-contents"], [301, "module-contents"], [302, "module-contents"], [303, "module-contents"], [305, "module-contents"], [306, "module-contents"], [307, "module-contents"], [308, "module-contents"], [310, "module-contents"], [311, "module-contents"], [312, "module-contents"], [313, "module-contents"], [314, "module-contents"], [315, "module-contents"], [316, "module-contents"], [317, "module-contents"], [318, "module-contents"], [319, "module-contents"], [320, "module-contents"], [321, "module-contents"], [322, "module-contents"], [323, "module-contents"], [324, "module-contents"], [325, "module-contents"], [326, "module-contents"], [327, "module-contents"], [328, "module-contents"], [329, "module-contents"], [330, "module-contents"], [331, "module-contents"], [332, "module-contents"], [334, "module-contents"], [335, "module-contents"], [336, "module-contents"], [337, "module-contents"], [338, "module-contents"], [339, "module-contents"], [340, "module-contents"], [341, "module-contents"], [342, "module-contents"], [343, "module-contents"], [345, "module-contents"], [346, "module-contents"], [347, "module-contents"], [348, "module-contents"], [349, "module-contents"], [350, "module-contents"], [352, "module-contents"], [353, "module-contents"], [354, "module-contents"], [355, "module-contents"], [357, "module-contents"], [358, "module-contents"], [359, "module-contents"], [360, "module-contents"], [363, "module-contents"], [364, "module-contents"], [365, "module-contents"], [366, "module-contents"], [367, "module-contents"], [368, "module-contents"], [369, "module-contents"], [371, "module-contents"], [372, "module-contents"], [373, "module-contents"], [374, "module-contents"], [375, "module-contents"], [376, "module-contents"], [377, "module-contents"], [378, "module-contents"], [379, "module-contents"], [380, "module-contents"], [381, "module-contents"], [383, "module-contents"], [384, "module-contents"], [385, "module-contents"], [387, "module-contents"], [389, "module-contents"], [390, "module-contents"], [391, "module-contents"], [392, "module-contents"], [396, "module-contents"], [397, "module-contents"], [398, "module-contents"], [399, "module-contents"], [401, "module-contents"], [403, "module-contents"], [404, "module-contents"], [405, "module-contents"], [406, "module-contents"], [408, "module-contents"], [409, "module-contents"], [411, "module-contents"], [412, "module-contents"], [413, "module-contents"], [415, "module-contents"], [416, "module-contents"], [417, "module-contents"], [418, "module-contents"], [419, "module-contents"], [420, "module-contents"], [421, "module-contents"], [422, "module-contents"], [423, "module-contents"], [425, "module-contents"], [426, "module-contents"], [427, "module-contents"], [429, "module-contents"], [430, "module-contents"], [431, "module-contents"], [432, "module-contents"], [433, "module-contents"], [435, "module-contents"], [437, "module-contents"], [438, "module-contents"], [439, "module-contents"], [441, "module-contents"], [442, "module-contents"], [443, "module-contents"], [444, "module-contents"], [445, "module-contents"], [446, "module-contents"], [448, "module-contents"], [449, "module-contents"], [452, "module-contents"], [453, "module-contents"], [455, "module-contents"], [457, "module-contents"], [458, "module-contents"], [459, "module-contents"], [461, "module-contents"], [462, "module-contents"], [463, "module-contents"], [464, "module-contents"], [465, "module-contents"], [466, "module-contents"], [467, "module-contents"]], "Multiple Objectives": [[542, "multiple-objectives"]], "Neural Compressor Configuration": [[551, "neural-compressor-configuration"]], "Neural Network Pruning": [[544, "neural-network-pruning"]], "Note": [[492, "note"]], "ONNX Models with ONNX Runtime 1.18.1": [[555, "onnx-models-with-onnx-runtime-1-18-1"]], "ONNX Runtime": [[499, "onnx-runtime"]], "ONNX Runtime framework example": [[547, "onnx-runtime-framework-example"]], "ONNXRT": [[537, "onnxrt"], [553, "onnxrt"]], "Objective": [[510, "objective"], [542, "objective"]], "Objective Support Matrix": [[542, "objective-support-matrix"]], "One-shot": [[543, "one-shot"]], "Optimization Orchestration": [[543, "optimization-orchestration"]], "Option 1: Pure Yaml Configuration": [[526, "option-1-pure-yaml-configuration"]], "Option 2: User Defined Training Function": [[526, "option-2-user-defined-training-function"]], "Orchestration": [[538, "orchestration"]], "Orchestration Support Matrix": [[543, "orchestration-support-matrix"]], "Our Pledge": [[490, "our-pledge"]], "Our Responsibilities": [[490, "our-responsibilities"]], "Our Standards": [[490, "our-standards"]], "Our enhancement:": [[552, "our-enhancement"]], "Overview": [[470, "overview"], [482, "overview"]], "Package Contents": [[170, "package-contents"], [176, "package-contents"], [183, "package-contents"]], "Per-channel example": [[488, "per-channel-example"], [552, "per-channel-example"]], "Per-channel limitation": [[488, "per-channel-limitation"], [552, "per-channel-limitation"]], "Per-tensor & Per-channel": [[488, "per-tensor-per-channel"], [552, "per-tensor-per-channel"]], "Per-tensor example": [[488, "per-tensor-example"], [552, "per-tensor-example"]], "Performance": [[551, "performance"]], "Performance Comparison of Different Strategies": [[551, "performance-comparison-of-different-strategies"]], "Post Training Dynamic Quantization": [[492, "post-training-dynamic-quantization"], [546, "post-training-dynamic-quantization"]], "Post Training Quantization": [[546, "post-training-quantization"]], "Post Training Static Quantization": [[481, "post-training-static-quantization"], [492, "post-training-static-quantization"], [546, "post-training-static-quantization"]], "Post-training Quantization": [[538, "post-training-quantization"]], "Preparation": [[551, "preparation"]], "Prepare Dependency Packages": [[489, "prepare-dependency-packages"]], "Prepare calibration model from fp32 graph": [[496, "prepare-calibration-model-from-fp32-graph"]], "Prerequisites": [[534, "prerequisites"]], "Pruning": [[538, "pruning"], [544, "pruning"]], "Pruning Criteria": [[544, "pruning-criteria"]], "Pruning Patterns": [[544, "pruning-patterns"]], "Pruning Schedules": [[544, "pruning-schedules"]], "Pruning Scope": [[544, "pruning-scope"]], "Pruning Support Matrix": [[544, "pruning-support-matrix"]], "Pruning Types": [[544, "pruning-types"]], "Pruning with Hyperparameter Optimization": [[544, "pruning-with-hyperparameter-optimization"]], "Public and Internal Interfaces": [[522, "public-and-internal-interfaces"]], "Pull Request Acceptance Criteria": [[491, "pull-request-acceptance-criteria"]], "Pull Request Checklist": [[491, "pull-request-checklist"]], "Pull Request Status Checks Overview": [[491, "pull-request-status-checks-overview"]], "Pull Request Template": [[491, "pull-request-template"]], "PyTorch": [[537, "pytorch"], [548, "pytorch"]], "PyTorch Examples:": [[526, "pytorch-examples"]], "PyTorch Mixed Precision": [[474, "pytorch-mixed-precision"]], "PyTorch Model": [[528, "pytorch-model"]], "PyTorch Models with Torch 2.3.0+cpu in IPEX Mode": [[555, "pytorch-models-with-torch-2-3-0-cpu-in-ipex-mode"]], "PyTorch Models with Torch 2.3.0+cpu in PTQ Mode": [[555, "pytorch-models-with-torch-2-3-0-cpu-in-ptq-mode"]], "PyTorch Models with Torch 2.3.0+cpu in QAT Mode": [[555, "pytorch-models-with-torch-2-3-0-cpu-in-qat-mode"]], "PyTorch Smooth Quantization": [[475, "pytorch-smooth-quantization"]], "PyTorch Static Quantization": [[476, "pytorch-static-quantization"]], "PyTorch Weight Only Quantization": [[477, "pytorch-weight-only-quantization"]], "PyTorch framework example": [[547, "pytorch-framework-example"]], "Python-based APIs": [[470, "python-based-apis"]], "Pytorch": [[553, "pytorch"]], "Pytorch Quantization AutoTune": [[516, "pytorch-quantization-autotune"]], "Pytorch Quantization Base API": [[517, "pytorch-quantization-base-api"]], "Pytorch Quantization Config": [[518, "pytorch-quantization-config"]], "Quantization": [[488, "quantization"], [511, "quantization"], [546, "quantization"]], "Quantization APIs": [[478, "quantization-apis"]], "Quantization Approaches": [[481, "quantization-approaches"], [546, "quantization-approaches"]], "Quantization Aware Training": [[488, "quantization-aware-training"], [538, "quantization-aware-training"], [546, "quantization-aware-training"], [546, "id1"]], "Quantization Capability": [[549, "quantization-capability"]], "Quantization Fundamentals": [[488, "quantization-fundamentals"], [546, "quantization-fundamentals"], [552, "quantization-fundamentals"]], "Quantization Introduction": [[546, "quantization-introduction"]], "Quantization Scheme": [[481, "quantization-scheme"]], "Quantization Scheme in IPEX": [[488, "quantization-scheme-in-ipex"], [546, "quantization-scheme-in-ipex"]], "Quantization Scheme in MXNet": [[546, "quantization-scheme-in-mxnet"]], "Quantization Scheme in ONNX Runtime": [[546, "quantization-scheme-in-onnx-runtime"]], "Quantization Scheme in PyTorch": [[488, "quantization-scheme-in-pytorch"], [546, "quantization-scheme-in-pytorch"]], "Quantization Scheme in TensorFlow": [[488, "quantization-scheme-in-tensorflow"], [546, "quantization-scheme-in-tensorflow"]], "Quantization Support Matrix": [[546, "quantization-support-matrix"]], "Quantization on Client": [[484, "quantization-on-client"]], "Quantization-Aware Training": [[492, "quantization-aware-training"]], "Query API": [[495, "query-api"]], "Query API Introduction": [[495, "query-api-introduction"]], "Quick Samples": [[531, "quick-samples"]], "RTN": [[477, "rtn"]], "Random": [[554, "random"]], "Recommend VS Code settings.json": [[522, "recommend-vs-code-settings-json"]], "Reference": [[473, "reference"], [477, "reference"], [488, "reference"], [521, "reference"], [522, "reference"], [541, "reference"], [544, "reference"], [546, "reference"], [549, "reference"], [552, "reference"]], "Regularization": [[544, "regularization"]], "Release": [[550, "release"]], "Release Data": [[527, "release-data"]], "Release Notes": [[550, "release-notes"]], "Report a Vulnerability": [[493, "report-a-vulnerability"]], "Retrain-free Pruning API": [[544, "retrain-free-pruning-api"]], "Rules": [[522, "rules"]], "Run sampling iterations of the fp32 graph to calibrate quantizable operators.": [[496, "run-sampling-iterations-of-the-fp32-graph-to-calibrate-quantizable-operators"]], "Saving and Loading": [[477, "saving-and-loading"]], "Scope": [[490, "scope"]], "Sections": [[469, "sections"], [556, "sections"]], "Security": [[526, "security"]], "Security Policy": [[493, "security-policy"]], "Selected Publications/Events": [[494, "selected-publications-events"]], "SigOpt": [[554, "sigopt"]], "SigOpt Platform": [[551, "sigopt-platform"]], "SigOpt Strategy": [[551, "sigopt-strategy"]], "Single Objective": [[542, "single-objective"]], "Smooth Quant": [[480, "smooth-quant"], [552, "smooth-quant"]], "Smooth Quantization": [[481, "smooth-quantization"], [488, "smooth-quantization"]], "SmoothQuant": [[552, "smoothquant"]], "SmoothQuant and Our Enhancement": [[552, "smoothquant-and-our-enhancement"]], "Sparse Model Deployment": [[544, "sparse-model-deployment"]], "Sparsity Decay Types": [[544, "sparsity-decay-types"]], "Specify Quantization Backend and Device": [[546, "specify-quantization-backend-and-device"]], "Specify Quantization Recipes": [[546, "specify-quantization-recipes"]], "Specify Quantization Rules": [[475, "specify-quantization-rules"], [476, "specify-quantization-rules"], [477, "specify-quantization-rules"], [479, "specify-quantization-rules"], [546, "specify-quantization-rules"]], "Static Quantization": [[488, "static-quantization"]], "Static Quantization & Quantization Aware Training": [[492, "static-quantization-quantization-aware-training"]], "Static Quantization with IPEX Backend": [[476, "static-quantization-with-ipex-backend"]], "Static Quantization with PT2E Backend": [[476, "static-quantization-with-pt2e-backend"]], "Step-by-Step guidelines": [[491, "step-by-step-guidelines"]], "Strategy": [[512, "strategy"]], "Strategy Design": [[554, "strategy-design"]], "Strings": [[522, "strings"]], "Submodules": [[0, "submodules"], [4, "submodules"], [16, "submodules"], [37, "submodules"], [61, "submodules"], [72, "submodules"], [80, "submodules"], [86, "submodules"], [91, "submodules"], [96, "submodules"], [97, "submodules"], [99, "submodules"], [102, "submodules"], [115, "submodules"], [130, "submodules"], [136, "submodules"], [137, "submodules"], [148, "submodules"], [155, "submodules"], [158, "submodules"], [164, "submodules"], [167, "submodules"], [170, "submodules"], [172, "submodules"], [176, "submodules"], [183, "submodules"], [193, "submodules"], [197, "submodules"], [215, "submodules"], [219, "submodules"], [222, "submodules"], [226, "submodules"], [233, "submodules"], [237, "submodules"], [270, "submodules"], [276, "submodules"], [285, "submodules"], [287, "submodules"], [295, "submodules"], [300, "submodules"], [304, "submodules"], [309, "submodules"], [333, "submodules"], [344, "submodules"], [351, "submodules"], [356, "submodules"], [361, "submodules"], [362, "submodules"], [370, "submodules"], [382, "submodules"], [388, "submodules"], [394, "submodules"], [395, "submodules"], [400, "submodules"], [402, "submodules"], [407, "submodules"], [410, "submodules"], [414, "submodules"], [424, "submodules"], [428, "submodules"], [434, "submodules"], [440, "submodules"], [447, "submodules"], [451, "submodules"], [456, "submodules"], [460, "submodules"]], "Subpackages": [[4, "subpackages"], [72, "subpackages"], [96, "subpackages"], [97, "subpackages"], [99, "subpackages"], [136, "subpackages"], [155, "subpackages"], [170, "subpackages"], [196, "subpackages"], [220, "subpackages"], [226, "subpackages"], [270, "subpackages"], [282, "subpackages"], [290, "subpackages"], [291, "subpackages"], [304, "subpackages"], [344, "subpackages"], [361, "subpackages"], [362, "subpackages"], [394, "subpackages"], [428, "subpackages"], [436, "subpackages"], [460, "subpackages"]], "Summary": [[497, "summary"]], "Support": [[491, "support"]], "Support Matrix": [[481, "support-matrix"], [521, "support-matrix"]], "Supported Algorithms": [[489, "supported-algorithms"]], "Supported Built-in Metric Matrix": [[537, "supported-built-in-metric-matrix"]], "Supported Feature Matrix": [[526, "supported-feature-matrix"], [530, "supported-feature-matrix"], [533, "supported-feature-matrix"], [546, "supported-feature-matrix"]], "Supported Framework Dataloader Matrix": [[523, "supported-framework-dataloader-matrix"]], "Supported Framework Matrix": [[475, "supported-framework-matrix"], [552, "supported-framework-matrix"]], "Supported Framework Model Matrix": [[528, "supported-framework-model-matrix"], [540, "supported-framework-model-matrix"], [541, "supported-framework-model-matrix"], [547, "supported-framework-model-matrix"], [549, "supported-framework-model-matrix"]], "Supported Matrix": [[477, "supported-matrix"], [478, "supported-matrix"], [483, "supported-matrix"]], "Supported Parameters": [[472, "supported-parameters"]], "Supported quantized ops": [[528, "supported-quantized-ops"]], "Symmetric & Asymmetric": [[488, "symmetric-asymmetric"]], "System Requirements": [[534, "system-requirements"]], "TEQ": [[477, "teq"]], "TODO Comments": [[522, "todo-comments"]], "TPE": [[554, "tpe"]], "TensorFlow": [[481, "tensorflow"], [537, "tensorflow"], [553, "tensorflow"]], "TensorFlow Examples:": [[526, "tensorflow-examples"]], "TensorFlow Models with TensorFlow 2.16.1": [[555, "tensorflow-models-with-tensorflow-2-16-1"]], "TensorFlow Quantization": [[479, "tensorflow-quantization"]], "Tensorflow": [[548, "tensorflow"]], "Tensorflow Model": [[528, "tensorflow-model"]], "Tensorflow Quantization AutoTune": [[513, "tensorflow-quantization-autotune"]], "Tensorflow Quantization Base API": [[514, "tensorflow-quantization-base-api"]], "Tensorflow Quantization Config": [[515, "tensorflow-quantization-config"]], "Torch": [[478, "torch"]], "Torch Utils": [[500, "torch-utils"]], "Torch-like APIs": [[478, "torch-like-apis"]], "Trademarks": [[535, "trademarks"]], "Training": [[519, "training"]], "Training-aware pruning API": [[544, "training-aware-pruning-api"]], "Transform": [[553, "transform"]], "Transform Support List": [[553, "transform-support-list"]], "Transformers-like API": [[489, "transformers-like-api"]], "Tuning Algorithms": [[554, "tuning-algorithms"]], "Tuning Process": [[554, "tuning-process"]], "Tuning Space": [[554, "tuning-space"]], "Tuning Strategies": [[554, "tuning-strategies"]], "Turn OFF Auto Mixed Precision during Quantization": [[548, "turn-off-auto-mixed-precision-during-quantization"]], "Type Annotations": [[522, "type-annotations"]], "Usage": [[475, "usage"], [477, "usage"], [480, "usage"], [483, "usage"], [552, "usage"], [554, "usage"], [554, "id2"], [554, "id4"], [554, "id6"], [554, "id8"], [554, "id10"], [554, "id12"], [554, "id14"], [554, "id16"], [554, "id18"], [554, "id20"], [554, "id22"]], "Usage For CPU": [[489, "usage-for-cpu"]], "Usage For Intel GPU": [[489, "usage-for-intel-gpu"]], "Usage Sample with IPEX": [[476, "usage-sample-with-ipex"]], "Usage Sample with PT2E": [[476, "usage-sample-with-pt2e"]], "Usage examples for CPU device": [[489, "usage-examples-for-cpu-device"]], "Use Docker Image with torch installed for HPU": [[494, "use-docker-image-with-torch-installed-for-hpu"], [534, "use-docker-image-with-torch-installed-for-hpu"]], "Use Intel\u00ae Neural Compressor DataLoader API": [[523, "use-intel-neural-compressor-dataloader-api"]], "Use Intel\u00ae Neural Compressor Metric API": [[537, "use-intel-neural-compressor-metric-api"]], "Use the New Data Type": [[497, "use-the-new-data-type"]], "User Code Example": [[549, "user-code-example"]], "User code example": [[549, "id1"]], "User-facing APIs": [[532, "user-facing-apis"]], "Using a Fixed alpha": [[480, "using-a-fixed-alpha"]], "Using a fixed alpha": [[552, "using-a-fixed-alpha"]], "Validated Hardware Environment": [[534, "validated-hardware-environment"]], "Validated Knowledge Distillation Examples": [[555, "validated-knowledge-distillation-examples"]], "Validated Models": [[475, "validated-models"], [552, "validated-models"], [555, "validated-models"]], "Validated ONNX QDQ INT8 Models on Multiple Hardware through ONNX Runtime": [[555, "validated-onnx-qdq-int8-models-on-multiple-hardware-through-onnx-runtime"]], "Validated Pruning Examples": [[555, "validated-pruning-examples"]], "Validated Quantization Examples": [[555, "validated-quantization-examples"]], "Validated Software Environment": [[534, "validated-software-environment"]], "Version mapping between Intel Neural Compressor to Gaudi Software Stack": [[486, "version-mapping-between-intel-neural-compressor-to-gaudi-software-stack"]], "WOQ Algorithms Tuning": [[549, "woq-algorithms-tuning"]], "Weight Only Quantization": [[488, "weight-only-quantization"]], "Weight Only Quantization (WOQ)": [[549, "weight-only-quantization-woq"]], "Weight-Only Large Language Model Loading (LLMs)": [[494, "weight-only-large-language-model-loading-llms"]], "What\u2019s New": [[494, "what-s-new"]], "With Accuracy Aware Tuning": [[479, "with-accuracy-aware-tuning"]], "Without Accuracy Aware Tuning": [[479, "without-accuracy-aware-tuning"]], "Workflow": [[524, "workflow"]], "Workflows": [[485, "workflows"]], "Working Flow": [[495, "working-flow"], [546, "working-flow"]], "Working with Autotune": [[482, "working-with-autotune"]], "Working with PyTorch Model": [[482, "working-with-pytorch-model"]], "Working with Tensorflow Model": [[482, "working-with-tensorflow-model"]], "neural_compressor": [[226, "module-neural_compressor"]], "neural_compressor.adaptor.mxnet_utils": [[0, "module-neural_compressor.adaptor.mxnet_utils"]], "neural_compressor.adaptor.mxnet_utils.util": [[1, "module-neural_compressor.adaptor.mxnet_utils.util"]], "neural_compressor.adaptor.ox_utils": [[4, "module-neural_compressor.adaptor.ox_utils"]], "neural_compressor.adaptor.ox_utils.calibration": [[2, "module-neural_compressor.adaptor.ox_utils.calibration"]], "neural_compressor.adaptor.ox_utils.calibrator": [[3, "module-neural_compressor.adaptor.ox_utils.calibrator"]], "neural_compressor.adaptor.ox_utils.operators": [[16, "module-neural_compressor.adaptor.ox_utils.operators"]], "neural_compressor.adaptor.ox_utils.operators.activation": [[5, "module-neural_compressor.adaptor.ox_utils.operators.activation"]], "neural_compressor.adaptor.ox_utils.operators.argmax": [[6, "module-neural_compressor.adaptor.ox_utils.operators.argmax"]], "neural_compressor.adaptor.ox_utils.operators.attention": [[7, "module-neural_compressor.adaptor.ox_utils.operators.attention"]], "neural_compressor.adaptor.ox_utils.operators.binary_op": [[8, "module-neural_compressor.adaptor.ox_utils.operators.binary_op"]], "neural_compressor.adaptor.ox_utils.operators.concat": [[9, "module-neural_compressor.adaptor.ox_utils.operators.concat"]], "neural_compressor.adaptor.ox_utils.operators.conv": [[10, "module-neural_compressor.adaptor.ox_utils.operators.conv"]], "neural_compressor.adaptor.ox_utils.operators.direct_q8": [[11, "module-neural_compressor.adaptor.ox_utils.operators.direct_q8"]], "neural_compressor.adaptor.ox_utils.operators.embed_layernorm": [[12, "module-neural_compressor.adaptor.ox_utils.operators.embed_layernorm"]], "neural_compressor.adaptor.ox_utils.operators.gather": [[13, "module-neural_compressor.adaptor.ox_utils.operators.gather"]], "neural_compressor.adaptor.ox_utils.operators.gavgpool": [[14, "module-neural_compressor.adaptor.ox_utils.operators.gavgpool"]], "neural_compressor.adaptor.ox_utils.operators.gemm": [[15, "module-neural_compressor.adaptor.ox_utils.operators.gemm"]], "neural_compressor.adaptor.ox_utils.operators.lstm": [[17, "module-neural_compressor.adaptor.ox_utils.operators.lstm"]], "neural_compressor.adaptor.ox_utils.operators.matmul": [[18, "module-neural_compressor.adaptor.ox_utils.operators.matmul"]], "neural_compressor.adaptor.ox_utils.operators.maxpool": [[19, "module-neural_compressor.adaptor.ox_utils.operators.maxpool"]], "neural_compressor.adaptor.ox_utils.operators.norm": [[20, "module-neural_compressor.adaptor.ox_utils.operators.norm"]], "neural_compressor.adaptor.ox_utils.operators.ops": [[21, "module-neural_compressor.adaptor.ox_utils.operators.ops"]], "neural_compressor.adaptor.ox_utils.operators.pad": [[22, "module-neural_compressor.adaptor.ox_utils.operators.pad"]], "neural_compressor.adaptor.ox_utils.operators.pooling": [[23, "module-neural_compressor.adaptor.ox_utils.operators.pooling"]], "neural_compressor.adaptor.ox_utils.operators.reduce": [[24, "module-neural_compressor.adaptor.ox_utils.operators.reduce"]], "neural_compressor.adaptor.ox_utils.operators.resize": [[25, "module-neural_compressor.adaptor.ox_utils.operators.resize"]], "neural_compressor.adaptor.ox_utils.operators.split": [[26, "module-neural_compressor.adaptor.ox_utils.operators.split"]], "neural_compressor.adaptor.ox_utils.operators.unary_op": [[27, "module-neural_compressor.adaptor.ox_utils.operators.unary_op"]], "neural_compressor.adaptor.ox_utils.quantizer": [[28, "module-neural_compressor.adaptor.ox_utils.quantizer"]], "neural_compressor.adaptor.ox_utils.smooth_quant": [[29, "module-neural_compressor.adaptor.ox_utils.smooth_quant"]], "neural_compressor.adaptor.ox_utils.util": [[30, "module-neural_compressor.adaptor.ox_utils.util"]], "neural_compressor.adaptor.ox_utils.weight_only": [[31, "module-neural_compressor.adaptor.ox_utils.weight_only"]], "neural_compressor.adaptor.tensorflow": [[32, "module-neural_compressor.adaptor.tensorflow"]], "neural_compressor.adaptor.tf_utils": [[96, "module-neural_compressor.adaptor.tf_utils"]], "neural_compressor.adaptor.tf_utils.graph_converter": [[33, "module-neural_compressor.adaptor.tf_utils.graph_converter"]], "neural_compressor.adaptor.tf_utils.graph_converter_without_calib": [[34, "module-neural_compressor.adaptor.tf_utils.graph_converter_without_calib"]], "neural_compressor.adaptor.tf_utils.graph_rewriter": [[72, "module-neural_compressor.adaptor.tf_utils.graph_rewriter"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16": [[37, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert": [[35, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer": [[36, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic": [[61, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd": [[38, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout": [[39, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu": [[40, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random": [[41, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const": [[42, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction": [[43, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd": [[44, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer": [[45, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape": [[46, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm": [[47, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant": [[48, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add": [[49, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul": [[50, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math": [[51, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn": [[52, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in": [[53, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu": [[54, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm": [[55, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv": [[56, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv": [[57, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose": [[58, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer": [[59, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass": [[60, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node": [[62, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu": [[63, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize": [[64, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes": [[65, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm": [[66, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input": [[67, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes": [[68, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes": [[69, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer": [[70, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base": [[71, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8": [[80, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant": [[73, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value": [[74, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib": [[75, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize": [[76, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize": [[77, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize": [[78, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize": [[79, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer": [[81, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter": [[82, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse": [[83, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert": [[84, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation": [[85, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx": [[86, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph": [[87, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node": [[88, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema": [[89, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils": [[90, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq": [[91, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern": [[92, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq": [[93, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern": [[94, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern"]], "neural_compressor.adaptor.tf_utils.graph_util": [[95, "module-neural_compressor.adaptor.tf_utils.graph_util"]], "neural_compressor.adaptor.tf_utils.quantize_graph": [[97, "module-neural_compressor.adaptor.tf_utils.quantize_graph"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat": [[99, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize": [[98, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config": [[100, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper": [[101, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers": [[102, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer": [[103, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add": [[104, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base": [[105, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn": [[106, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper": [[107, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq": [[115, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn": [[108, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2": [[109, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv": [[110, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv": [[111, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in": [[112, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul": [[113, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling": [[114, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq": [[116, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base": [[117, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn": [[118, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2": [[119, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv": [[120, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu": [[121, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul": [[122, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling": [[123, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling"]], "neural_compressor.adaptor.tf_utils.quantize_graph_common": [[124, "module-neural_compressor.adaptor.tf_utils.quantize_graph_common"]], "neural_compressor.adaptor.tf_utils.smooth_quant_calibration": [[125, "module-neural_compressor.adaptor.tf_utils.smooth_quant_calibration"]], "neural_compressor.adaptor.tf_utils.smooth_quant_scaler": [[126, "module-neural_compressor.adaptor.tf_utils.smooth_quant_scaler"]], "neural_compressor.adaptor.tf_utils.tf2onnx_converter": [[127, "module-neural_compressor.adaptor.tf_utils.tf2onnx_converter"]], "neural_compressor.adaptor.tf_utils.transform_graph": [[130, "module-neural_compressor.adaptor.tf_utils.transform_graph"]], "neural_compressor.adaptor.tf_utils.transform_graph.bias_correction": [[128, "module-neural_compressor.adaptor.tf_utils.transform_graph.bias_correction"]], "neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base": [[129, "module-neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base"]], "neural_compressor.adaptor.tf_utils.transform_graph.insert_logging": [[131, "module-neural_compressor.adaptor.tf_utils.transform_graph.insert_logging"]], "neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat": [[132, "module-neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat"]], "neural_compressor.adaptor.tf_utils.util": [[133, "module-neural_compressor.adaptor.tf_utils.util"]], "neural_compressor.adaptor.torch_utils": [[136, "module-neural_compressor.adaptor.torch_utils"]], "neural_compressor.adaptor.torch_utils.bf16_convert": [[134, "module-neural_compressor.adaptor.torch_utils.bf16_convert"]], "neural_compressor.adaptor.torch_utils.hawq_metric": [[135, "module-neural_compressor.adaptor.torch_utils.hawq_metric"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant": [[137, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle": [[138, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize": [[139, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load": [[140, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils": [[141, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.utils"]], "neural_compressor.adaptor.torch_utils.model_wrapper": [[142, "module-neural_compressor.adaptor.torch_utils.model_wrapper"]], "neural_compressor.adaptor.torch_utils.pattern_detector": [[143, "module-neural_compressor.adaptor.torch_utils.pattern_detector"]], "neural_compressor.adaptor.torch_utils.symbolic_trace": [[144, "module-neural_compressor.adaptor.torch_utils.symbolic_trace"]], "neural_compressor.adaptor.torch_utils.util": [[145, "module-neural_compressor.adaptor.torch_utils.util"]], "neural_compressor.algorithm": [[148, "module-neural_compressor.algorithm"]], "neural_compressor.algorithm.algorithm": [[146, "module-neural_compressor.algorithm.algorithm"]], "neural_compressor.algorithm.fast_bias_correction": [[147, "module-neural_compressor.algorithm.fast_bias_correction"]], "neural_compressor.algorithm.smooth_quant": [[149, "module-neural_compressor.algorithm.smooth_quant"]], "neural_compressor.algorithm.weight_correction": [[150, "module-neural_compressor.algorithm.weight_correction"]], "neural_compressor.benchmark": [[151, "module-neural_compressor.benchmark"]], "neural_compressor.common": [[155, "module-neural_compressor.common"]], "neural_compressor.common.base_config": [[152, "module-neural_compressor.common.base_config"]], "neural_compressor.common.base_tuning": [[153, "module-neural_compressor.common.base_tuning"]], "neural_compressor.common.benchmark": [[154, "module-neural_compressor.common.benchmark"]], "neural_compressor.common.tuning_param": [[156, "module-neural_compressor.common.tuning_param"]], "neural_compressor.common.utils": [[158, "module-neural_compressor.common.utils"]], "neural_compressor.common.utils.constants": [[157, "module-neural_compressor.common.utils.constants"]], "neural_compressor.common.utils.logger": [[159, "module-neural_compressor.common.utils.logger"]], "neural_compressor.common.utils.save_load": [[160, "module-neural_compressor.common.utils.save_load"]], "neural_compressor.common.utils.utility": [[161, "module-neural_compressor.common.utils.utility"]], "neural_compressor.compression.callbacks": [[162, "module-neural_compressor.compression.callbacks"]], "neural_compressor.compression.distillation": [[164, "module-neural_compressor.compression.distillation"]], "neural_compressor.compression.distillation.criterions": [[163, "module-neural_compressor.compression.distillation.criterions"]], "neural_compressor.compression.distillation.optimizers": [[165, "module-neural_compressor.compression.distillation.optimizers"]], "neural_compressor.compression.distillation.utility": [[166, "module-neural_compressor.compression.distillation.utility"]], "neural_compressor.compression.hpo": [[167, "module-neural_compressor.compression.hpo"]], "neural_compressor.compression.hpo.sa_optimizer": [[168, "module-neural_compressor.compression.hpo.sa_optimizer"]], "neural_compressor.compression.pruner": [[170, "module-neural_compressor.compression.pruner"]], "neural_compressor.compression.pruner.criteria": [[169, "module-neural_compressor.compression.pruner.criteria"]], "neural_compressor.compression.pruner.model_slim": [[172, "module-neural_compressor.compression.pruner.model_slim"]], "neural_compressor.compression.pruner.model_slim.auto_slim": [[171, "module-neural_compressor.compression.pruner.model_slim.auto_slim"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer": [[173, "module-neural_compressor.compression.pruner.model_slim.pattern_analyzer"]], "neural_compressor.compression.pruner.model_slim.weight_slim": [[174, "module-neural_compressor.compression.pruner.model_slim.weight_slim"]], "neural_compressor.compression.pruner.patterns": [[176, "module-neural_compressor.compression.pruner.patterns"]], "neural_compressor.compression.pruner.patterns.base": [[175, "module-neural_compressor.compression.pruner.patterns.base"]], "neural_compressor.compression.pruner.patterns.mha": [[177, "module-neural_compressor.compression.pruner.patterns.mha"]], "neural_compressor.compression.pruner.patterns.ninm": [[178, "module-neural_compressor.compression.pruner.patterns.ninm"]], "neural_compressor.compression.pruner.patterns.nxm": [[179, "module-neural_compressor.compression.pruner.patterns.nxm"]], "neural_compressor.compression.pruner.pruners": [[183, "module-neural_compressor.compression.pruner.pruners"]], "neural_compressor.compression.pruner.pruners.base": [[180, "module-neural_compressor.compression.pruner.pruners.base"]], "neural_compressor.compression.pruner.pruners.basic": [[181, "module-neural_compressor.compression.pruner.pruners.basic"]], "neural_compressor.compression.pruner.pruners.block_mask": [[182, "module-neural_compressor.compression.pruner.pruners.block_mask"]], "neural_compressor.compression.pruner.pruners.mha": [[184, "module-neural_compressor.compression.pruner.pruners.mha"]], "neural_compressor.compression.pruner.pruners.pattern_lock": [[185, "module-neural_compressor.compression.pruner.pruners.pattern_lock"]], "neural_compressor.compression.pruner.pruners.progressive": [[186, "module-neural_compressor.compression.pruner.pruners.progressive"]], "neural_compressor.compression.pruner.pruners.retrain_free": [[187, "module-neural_compressor.compression.pruner.pruners.retrain_free"]], "neural_compressor.compression.pruner.pruning": [[188, "module-neural_compressor.compression.pruner.pruning"]], "neural_compressor.compression.pruner.regs": [[189, "module-neural_compressor.compression.pruner.regs"]], "neural_compressor.compression.pruner.schedulers": [[190, "module-neural_compressor.compression.pruner.schedulers"]], "neural_compressor.compression.pruner.tf_criteria": [[191, "module-neural_compressor.compression.pruner.tf_criteria"]], "neural_compressor.compression.pruner.utils": [[192, "module-neural_compressor.compression.pruner.utils"]], "neural_compressor.compression.pruner.wanda": [[193, "module-neural_compressor.compression.pruner.wanda"]], "neural_compressor.compression.pruner.wanda.utils": [[194, "module-neural_compressor.compression.pruner.wanda.utils"]], "neural_compressor.config": [[195, "module-neural_compressor.config"]], "neural_compressor.contrib": [[196, "module-neural_compressor.contrib"]], "neural_compressor.contrib.strategy": [[197, "module-neural_compressor.contrib.strategy"]], "neural_compressor.contrib.strategy.sigopt": [[198, "module-neural_compressor.contrib.strategy.sigopt"]], "neural_compressor.contrib.strategy.tpe": [[199, "module-neural_compressor.contrib.strategy.tpe"]], "neural_compressor.data": [[220, "module-neural_compressor.data"]], "neural_compressor.data.dataloaders.base_dataloader": [[200, "module-neural_compressor.data.dataloaders.base_dataloader"]], "neural_compressor.data.dataloaders.dataloader": [[201, "module-neural_compressor.data.dataloaders.dataloader"]], "neural_compressor.data.dataloaders.default_dataloader": [[202, "module-neural_compressor.data.dataloaders.default_dataloader"]], "neural_compressor.data.dataloaders.fetcher": [[203, "module-neural_compressor.data.dataloaders.fetcher"]], "neural_compressor.data.dataloaders.mxnet_dataloader": [[204, "module-neural_compressor.data.dataloaders.mxnet_dataloader"]], "neural_compressor.data.dataloaders.onnxrt_dataloader": [[205, "module-neural_compressor.data.dataloaders.onnxrt_dataloader"]], "neural_compressor.data.dataloaders.pytorch_dataloader": [[206, "module-neural_compressor.data.dataloaders.pytorch_dataloader"]], "neural_compressor.data.dataloaders.sampler": [[207, "module-neural_compressor.data.dataloaders.sampler"]], "neural_compressor.data.dataloaders.tensorflow_dataloader": [[208, "module-neural_compressor.data.dataloaders.tensorflow_dataloader"]], "neural_compressor.data.datasets": [[215, "module-neural_compressor.data.datasets"]], "neural_compressor.data.datasets.bert_dataset": [[209, "module-neural_compressor.data.datasets.bert_dataset"]], "neural_compressor.data.datasets.coco_dataset": [[210, "module-neural_compressor.data.datasets.coco_dataset"]], "neural_compressor.data.datasets.dataset": [[211, "module-neural_compressor.data.datasets.dataset"]], "neural_compressor.data.datasets.dummy_dataset": [[212, "module-neural_compressor.data.datasets.dummy_dataset"]], "neural_compressor.data.datasets.dummy_dataset_v2": [[213, "module-neural_compressor.data.datasets.dummy_dataset_v2"]], "neural_compressor.data.datasets.imagenet_dataset": [[214, "module-neural_compressor.data.datasets.imagenet_dataset"]], "neural_compressor.data.datasets.style_transfer_dataset": [[216, "module-neural_compressor.data.datasets.style_transfer_dataset"]], "neural_compressor.data.filters": [[219, "module-neural_compressor.data.filters"]], "neural_compressor.data.filters.coco_filter": [[217, "module-neural_compressor.data.filters.coco_filter"]], "neural_compressor.data.filters.filter": [[218, "module-neural_compressor.data.filters.filter"]], "neural_compressor.data.transforms": [[222, "module-neural_compressor.data.transforms"]], "neural_compressor.data.transforms.imagenet_transform": [[221, "module-neural_compressor.data.transforms.imagenet_transform"]], "neural_compressor.data.transforms.postprocess": [[223, "module-neural_compressor.data.transforms.postprocess"]], "neural_compressor.data.transforms.tokenization": [[224, "module-neural_compressor.data.transforms.tokenization"]], "neural_compressor.data.transforms.transform": [[225, "module-neural_compressor.data.transforms.transform"]], "neural_compressor.metric": [[233, "module-neural_compressor.metric"]], "neural_compressor.metric.bleu": [[227, "module-neural_compressor.metric.bleu"]], "neural_compressor.metric.bleu_util": [[228, "module-neural_compressor.metric.bleu_util"]], "neural_compressor.metric.coco_label_map": [[229, "module-neural_compressor.metric.coco_label_map"]], "neural_compressor.metric.coco_tools": [[230, "module-neural_compressor.metric.coco_tools"]], "neural_compressor.metric.evaluate_squad": [[231, "module-neural_compressor.metric.evaluate_squad"]], "neural_compressor.metric.f1": [[232, "module-neural_compressor.metric.f1"]], "neural_compressor.metric.metric": [[234, "module-neural_compressor.metric.metric"]], "neural_compressor.mix_precision": [[235, "module-neural_compressor.mix_precision"]], "neural_compressor.model": [[237, "module-neural_compressor.model"]], "neural_compressor.model.base_model": [[236, "module-neural_compressor.model.base_model"]], "neural_compressor.model.keras_model": [[238, "module-neural_compressor.model.keras_model"]], "neural_compressor.model.model": [[239, "module-neural_compressor.model.model"]], "neural_compressor.model.mxnet_model": [[240, "module-neural_compressor.model.mxnet_model"]], "neural_compressor.model.nets_factory": [[241, "module-neural_compressor.model.nets_factory"]], "neural_compressor.model.onnx_model": [[242, "module-neural_compressor.model.onnx_model"]], "neural_compressor.model.tensorflow_model": [[243, "module-neural_compressor.model.tensorflow_model"]], "neural_compressor.model.torch_model": [[244, "module-neural_compressor.model.torch_model"]], "neural_compressor.objective": [[245, "module-neural_compressor.objective"]], "neural_compressor.profiling": [[246, "module-neural_compressor.profiling"]], "neural_compressor.profiling.parser.factory": [[247, "module-neural_compressor.profiling.parser.factory"]], "neural_compressor.profiling.parser.onnx_parser.factory": [[248, "module-neural_compressor.profiling.parser.onnx_parser.factory"]], "neural_compressor.profiling.parser.onnx_parser.parser": [[249, "module-neural_compressor.profiling.parser.onnx_parser.parser"]], "neural_compressor.profiling.parser.parser": [[250, "module-neural_compressor.profiling.parser.parser"]], "neural_compressor.profiling.parser.result": [[251, "module-neural_compressor.profiling.parser.result"]], "neural_compressor.profiling.parser.tensorflow_parser.factory": [[252, "module-neural_compressor.profiling.parser.tensorflow_parser.factory"]], "neural_compressor.profiling.parser.tensorflow_parser.parser": [[253, "module-neural_compressor.profiling.parser.tensorflow_parser.parser"]], "neural_compressor.profiling.profiler.factory": [[254, "module-neural_compressor.profiling.profiler.factory"]], "neural_compressor.profiling.profiler.onnxrt_profiler.factory": [[255, "module-neural_compressor.profiling.profiler.onnxrt_profiler.factory"]], "neural_compressor.profiling.profiler.onnxrt_profiler.profiler": [[256, "module-neural_compressor.profiling.profiler.onnxrt_profiler.profiler"]], "neural_compressor.profiling.profiler.onnxrt_profiler.utils": [[257, "module-neural_compressor.profiling.profiler.onnxrt_profiler.utils"]], "neural_compressor.profiling.profiler.profiler": [[258, "module-neural_compressor.profiling.profiler.profiler"]], "neural_compressor.profiling.profiler.tensorflow_profiler.factory": [[259, "module-neural_compressor.profiling.profiler.tensorflow_profiler.factory"]], "neural_compressor.profiling.profiler.tensorflow_profiler.profiler": [[260, "module-neural_compressor.profiling.profiler.tensorflow_profiler.profiler"]], "neural_compressor.profiling.profiler.tensorflow_profiler.utils": [[261, "module-neural_compressor.profiling.profiler.tensorflow_profiler.utils"]], "neural_compressor.quantization": [[262, "module-neural_compressor.quantization"]], "neural_compressor.strategy": [[270, "module-neural_compressor.strategy"]], "neural_compressor.strategy.auto": [[263, "module-neural_compressor.strategy.auto"]], "neural_compressor.strategy.auto_mixed_precision": [[264, "module-neural_compressor.strategy.auto_mixed_precision"]], "neural_compressor.strategy.basic": [[265, "module-neural_compressor.strategy.basic"]], "neural_compressor.strategy.bayesian": [[266, "module-neural_compressor.strategy.bayesian"]], "neural_compressor.strategy.conservative": [[267, "module-neural_compressor.strategy.conservative"]], "neural_compressor.strategy.exhaustive": [[268, "module-neural_compressor.strategy.exhaustive"]], "neural_compressor.strategy.hawq_v2": [[269, "module-neural_compressor.strategy.hawq_v2"]], "neural_compressor.strategy.mse": [[271, "module-neural_compressor.strategy.mse"]], "neural_compressor.strategy.mse_v2": [[272, "module-neural_compressor.strategy.mse_v2"]], "neural_compressor.strategy.random": [[273, "module-neural_compressor.strategy.random"]], "neural_compressor.strategy.strategy": [[274, "module-neural_compressor.strategy.strategy"]], "neural_compressor.strategy.utils": [[276, "module-neural_compressor.strategy.utils"]], "neural_compressor.strategy.utils.constant": [[275, "module-neural_compressor.strategy.utils.constant"]], "neural_compressor.strategy.utils.tuning_sampler": [[277, "module-neural_compressor.strategy.utils.tuning_sampler"]], "neural_compressor.strategy.utils.tuning_space": [[278, "module-neural_compressor.strategy.utils.tuning_space"]], "neural_compressor.strategy.utils.tuning_structs": [[279, "module-neural_compressor.strategy.utils.tuning_structs"]], "neural_compressor.strategy.utils.utility": [[280, "module-neural_compressor.strategy.utils.utility"]], "neural_compressor.template.api_doc_example": [[281, "module-neural_compressor.template.api_doc_example"]], "neural_compressor.tensorflow": [[290, "module-neural_compressor.tensorflow"]], "neural_compressor.tensorflow.algorithms": [[282, "module-neural_compressor.tensorflow.algorithms"]], "neural_compressor.tensorflow.algorithms.smoother": [[285, "module-neural_compressor.tensorflow.algorithms.smoother"]], "neural_compressor.tensorflow.algorithms.smoother.calibration": [[283, "module-neural_compressor.tensorflow.algorithms.smoother.calibration"]], "neural_compressor.tensorflow.algorithms.smoother.core": [[284, "module-neural_compressor.tensorflow.algorithms.smoother.core"]], "neural_compressor.tensorflow.algorithms.smoother.scaler": [[286, "module-neural_compressor.tensorflow.algorithms.smoother.scaler"]], "neural_compressor.tensorflow.algorithms.static_quant": [[287, "module-neural_compressor.tensorflow.algorithms.static_quant"]], "neural_compressor.tensorflow.algorithms.static_quant.keras": [[288, "module-neural_compressor.tensorflow.algorithms.static_quant.keras"]], "neural_compressor.tensorflow.algorithms.static_quant.tensorflow": [[289, "module-neural_compressor.tensorflow.algorithms.static_quant.tensorflow"]], "neural_compressor.tensorflow.keras": [[291, "module-neural_compressor.tensorflow.keras"]], "neural_compressor.tensorflow.keras.layers": [[295, "module-neural_compressor.tensorflow.keras.layers"]], "neural_compressor.tensorflow.keras.layers.conv2d": [[292, "module-neural_compressor.tensorflow.keras.layers.conv2d"]], "neural_compressor.tensorflow.keras.layers.dense": [[293, "module-neural_compressor.tensorflow.keras.layers.dense"]], "neural_compressor.tensorflow.keras.layers.depthwise_conv2d": [[294, "module-neural_compressor.tensorflow.keras.layers.depthwise_conv2d"]], "neural_compressor.tensorflow.keras.layers.layer_initializer": [[296, "module-neural_compressor.tensorflow.keras.layers.layer_initializer"]], "neural_compressor.tensorflow.keras.layers.pool2d": [[297, "module-neural_compressor.tensorflow.keras.layers.pool2d"]], "neural_compressor.tensorflow.keras.layers.separable_conv2d": [[298, "module-neural_compressor.tensorflow.keras.layers.separable_conv2d"]], "neural_compressor.tensorflow.keras.quantization": [[300, "module-neural_compressor.tensorflow.keras.quantization"]], "neural_compressor.tensorflow.keras.quantization.config": [[299, "module-neural_compressor.tensorflow.keras.quantization.config"]], "neural_compressor.tensorflow.quantization": [[304, "module-neural_compressor.tensorflow.quantization"]], "neural_compressor.tensorflow.quantization.algorithm_entry": [[301, "module-neural_compressor.tensorflow.quantization.algorithm_entry"]], "neural_compressor.tensorflow.quantization.autotune": [[302, "module-neural_compressor.tensorflow.quantization.autotune"]], "neural_compressor.tensorflow.quantization.config": [[303, "module-neural_compressor.tensorflow.quantization.config"]], "neural_compressor.tensorflow.quantization.quantize": [[305, "module-neural_compressor.tensorflow.quantization.quantize"]], "neural_compressor.tensorflow.quantization.utils": [[361, "module-neural_compressor.tensorflow.quantization.utils"]], "neural_compressor.tensorflow.quantization.utils.graph_converter": [[306, "module-neural_compressor.tensorflow.quantization.utils.graph_converter"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter": [[344, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16": [[309, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert": [[307, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer": [[308, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic": [[333, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd": [[310, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout": [[311, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu": [[312, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random": [[313, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const": [[314, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction": [[315, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd": [[316, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer": [[317, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape": [[318, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm": [[319, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant": [[320, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add": [[321, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul": [[322, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math": [[323, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn": [[324, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in": [[325, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu": [[326, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm": [[327, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv": [[328, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv": [[329, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose": [[330, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer": [[331, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass": [[332, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node": [[334, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu": [[335, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize": [[336, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes": [[337, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm": [[338, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input": [[339, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes": [[340, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes": [[341, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer": [[342, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base": [[343, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8": [[351, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant": [[345, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value": [[346, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize": [[347, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize": [[348, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize": [[349, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize": [[350, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer": [[352, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter": [[353, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse": [[354, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation": [[355, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq": [[356, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern": [[357, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq": [[358, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern": [[359, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern"]], "neural_compressor.tensorflow.quantization.utils.graph_util": [[360, "module-neural_compressor.tensorflow.quantization.utils.graph_util"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph": [[362, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq": [[370, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn": [[363, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2": [[364, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv": [[365, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv": [[366, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in": [[367, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul": [[368, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling": [[369, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq": [[371, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base": [[372, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn": [[373, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2": [[374, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv": [[375, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu": [[376, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul": [[377, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling": [[378, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph_common": [[379, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph_common"]], "neural_compressor.tensorflow.quantization.utils.transform_graph": [[382, "module-neural_compressor.tensorflow.quantization.utils.transform_graph"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction": [[380, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base": [[381, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging": [[383, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat": [[384, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat"]], "neural_compressor.tensorflow.quantization.utils.utility": [[385, "module-neural_compressor.tensorflow.quantization.utils.utility"]], "neural_compressor.tensorflow.utils": [[388, "module-neural_compressor.tensorflow.utils"]], "neural_compressor.tensorflow.utils.constants": [[386, "module-neural_compressor.tensorflow.utils.constants"]], "neural_compressor.tensorflow.utils.data": [[387, "module-neural_compressor.tensorflow.utils.data"]], "neural_compressor.tensorflow.utils.model": [[389, "module-neural_compressor.tensorflow.utils.model"]], "neural_compressor.tensorflow.utils.model_wrappers": [[390, "module-neural_compressor.tensorflow.utils.model_wrappers"]], "neural_compressor.tensorflow.utils.utility": [[391, "module-neural_compressor.tensorflow.utils.utility"]], "neural_compressor.torch": [[436, "module-neural_compressor.torch"]], "neural_compressor.torch.algorithms": [[394, "module-neural_compressor.torch.algorithms"]], "neural_compressor.torch.algorithms.base_algorithm": [[392, "module-neural_compressor.torch.algorithms.base_algorithm"]], "neural_compressor.torch.algorithms.fp8_quant.utils.logger": [[393, "module-neural_compressor.torch.algorithms.fp8_quant.utils.logger"]], "neural_compressor.torch.algorithms.layer_wise": [[395, "module-neural_compressor.torch.algorithms.layer_wise"]], "neural_compressor.torch.algorithms.layer_wise.load": [[396, "module-neural_compressor.torch.algorithms.layer_wise.load"]], "neural_compressor.torch.algorithms.layer_wise.modified_pickle": [[397, "module-neural_compressor.torch.algorithms.layer_wise.modified_pickle"]], "neural_compressor.torch.algorithms.layer_wise.utils": [[398, "module-neural_compressor.torch.algorithms.layer_wise.utils"]], "neural_compressor.torch.algorithms.mixed_precision": [[400, "module-neural_compressor.torch.algorithms.mixed_precision"]], "neural_compressor.torch.algorithms.mixed_precision.half_precision_convert": [[399, "module-neural_compressor.torch.algorithms.mixed_precision.half_precision_convert"]], "neural_compressor.torch.algorithms.mixed_precision.module_wrappers": [[401, "module-neural_compressor.torch.algorithms.mixed_precision.module_wrappers"]], "neural_compressor.torch.algorithms.mx_quant": [[402, "module-neural_compressor.torch.algorithms.mx_quant"]], "neural_compressor.torch.algorithms.mx_quant.mx": [[403, "module-neural_compressor.torch.algorithms.mx_quant.mx"]], "neural_compressor.torch.algorithms.mx_quant.utils": [[404, "module-neural_compressor.torch.algorithms.mx_quant.utils"]], "neural_compressor.torch.algorithms.pt2e_quant": [[407, "module-neural_compressor.torch.algorithms.pt2e_quant"]], "neural_compressor.torch.algorithms.pt2e_quant.core": [[405, "module-neural_compressor.torch.algorithms.pt2e_quant.core"]], "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter": [[406, "module-neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter"]], "neural_compressor.torch.algorithms.pt2e_quant.save_load": [[408, "module-neural_compressor.torch.algorithms.pt2e_quant.save_load"]], "neural_compressor.torch.algorithms.pt2e_quant.utility": [[409, "module-neural_compressor.torch.algorithms.pt2e_quant.utility"]], "neural_compressor.torch.algorithms.smooth_quant": [[410, "module-neural_compressor.torch.algorithms.smooth_quant"]], "neural_compressor.torch.algorithms.smooth_quant.save_load": [[411, "module-neural_compressor.torch.algorithms.smooth_quant.save_load"]], "neural_compressor.torch.algorithms.smooth_quant.smooth_quant": [[412, "module-neural_compressor.torch.algorithms.smooth_quant.smooth_quant"]], "neural_compressor.torch.algorithms.smooth_quant.utility": [[413, "module-neural_compressor.torch.algorithms.smooth_quant.utility"]], "neural_compressor.torch.algorithms.static_quant": [[414, "module-neural_compressor.torch.algorithms.static_quant"]], "neural_compressor.torch.algorithms.static_quant.save_load": [[415, "module-neural_compressor.torch.algorithms.static_quant.save_load"]], "neural_compressor.torch.algorithms.static_quant.static_quant": [[416, "module-neural_compressor.torch.algorithms.static_quant.static_quant"]], "neural_compressor.torch.algorithms.static_quant.utility": [[417, "module-neural_compressor.torch.algorithms.static_quant.utility"]], "neural_compressor.torch.algorithms.weight_only": [[428, "module-neural_compressor.torch.algorithms.weight_only"]], "neural_compressor.torch.algorithms.weight_only.autoround": [[418, "module-neural_compressor.torch.algorithms.weight_only.autoround"]], "neural_compressor.torch.algorithms.weight_only.awq": [[419, "module-neural_compressor.torch.algorithms.weight_only.awq"]], "neural_compressor.torch.algorithms.weight_only.gptq": [[420, "module-neural_compressor.torch.algorithms.weight_only.gptq"]], "neural_compressor.torch.algorithms.weight_only.hqq": [[424, "module-neural_compressor.torch.algorithms.weight_only.hqq"]], "neural_compressor.torch.algorithms.weight_only.hqq.bitpack": [[421, "module-neural_compressor.torch.algorithms.weight_only.hqq.bitpack"]], "neural_compressor.torch.algorithms.weight_only.hqq.config": [[422, "module-neural_compressor.torch.algorithms.weight_only.hqq.config"]], "neural_compressor.torch.algorithms.weight_only.hqq.core": [[423, "module-neural_compressor.torch.algorithms.weight_only.hqq.core"]], "neural_compressor.torch.algorithms.weight_only.hqq.optimizer": [[425, "module-neural_compressor.torch.algorithms.weight_only.hqq.optimizer"]], "neural_compressor.torch.algorithms.weight_only.hqq.qtensor": [[426, "module-neural_compressor.torch.algorithms.weight_only.hqq.qtensor"]], "neural_compressor.torch.algorithms.weight_only.hqq.quantizer": [[427, "module-neural_compressor.torch.algorithms.weight_only.hqq.quantizer"]], "neural_compressor.torch.algorithms.weight_only.modules": [[429, "module-neural_compressor.torch.algorithms.weight_only.modules"]], "neural_compressor.torch.algorithms.weight_only.rtn": [[430, "module-neural_compressor.torch.algorithms.weight_only.rtn"]], "neural_compressor.torch.algorithms.weight_only.save_load": [[431, "module-neural_compressor.torch.algorithms.weight_only.save_load"]], "neural_compressor.torch.algorithms.weight_only.teq": [[432, "module-neural_compressor.torch.algorithms.weight_only.teq"]], "neural_compressor.torch.algorithms.weight_only.utility": [[433, "module-neural_compressor.torch.algorithms.weight_only.utility"]], "neural_compressor.torch.export": [[434, "module-neural_compressor.torch.export"]], "neural_compressor.torch.export.pt2e_export": [[435, "module-neural_compressor.torch.export.pt2e_export"]], "neural_compressor.torch.quantization": [[440, "module-neural_compressor.torch.quantization"]], "neural_compressor.torch.quantization.algorithm_entry": [[437, "module-neural_compressor.torch.quantization.algorithm_entry"]], "neural_compressor.torch.quantization.autotune": [[438, "module-neural_compressor.torch.quantization.autotune"]], "neural_compressor.torch.quantization.config": [[439, "module-neural_compressor.torch.quantization.config"]], "neural_compressor.torch.quantization.load_entry": [[441, "module-neural_compressor.torch.quantization.load_entry"]], "neural_compressor.torch.quantization.quantize": [[442, "module-neural_compressor.torch.quantization.quantize"]], "neural_compressor.torch.utils": [[447, "module-neural_compressor.torch.utils"]], "neural_compressor.torch.utils.auto_accelerator": [[443, "module-neural_compressor.torch.utils.auto_accelerator"]], "neural_compressor.torch.utils.bit_packer": [[444, "module-neural_compressor.torch.utils.bit_packer"]], "neural_compressor.torch.utils.constants": [[445, "module-neural_compressor.torch.utils.constants"]], "neural_compressor.torch.utils.environ": [[446, "module-neural_compressor.torch.utils.environ"]], "neural_compressor.torch.utils.utility": [[448, "module-neural_compressor.torch.utils.utility"]], "neural_compressor.training": [[449, "module-neural_compressor.training"]], "neural_compressor.transformers.quantization.utils": [[450, "module-neural_compressor.transformers.quantization.utils"]], "neural_compressor.transformers.utils": [[451, "module-neural_compressor.transformers.utils"]], "neural_compressor.transformers.utils.quantization_config": [[452, "module-neural_compressor.transformers.utils.quantization_config"]], "neural_compressor.utils": [[460, "module-neural_compressor.utils"]], "neural_compressor.utils.collect_layer_histogram": [[453, "module-neural_compressor.utils.collect_layer_histogram"]], "neural_compressor.utils.constant": [[454, "module-neural_compressor.utils.constant"]], "neural_compressor.utils.create_obj_from_config": [[455, "module-neural_compressor.utils.create_obj_from_config"]], "neural_compressor.utils.export": [[456, "module-neural_compressor.utils.export"]], "neural_compressor.utils.export.qlinear2qdq": [[457, "module-neural_compressor.utils.export.qlinear2qdq"]], "neural_compressor.utils.export.tf2onnx": [[458, "module-neural_compressor.utils.export.tf2onnx"]], "neural_compressor.utils.export.torch2onnx": [[459, "module-neural_compressor.utils.export.torch2onnx"]], "neural_compressor.utils.kl_divergence": [[461, "module-neural_compressor.utils.kl_divergence"]], "neural_compressor.utils.load_huggingface": [[462, "module-neural_compressor.utils.load_huggingface"]], "neural_compressor.utils.logger": [[463, "module-neural_compressor.utils.logger"]], "neural_compressor.utils.options": [[464, "module-neural_compressor.utils.options"]], "neural_compressor.utils.pytorch": [[465, "module-neural_compressor.utils.pytorch"]], "neural_compressor.utils.utility": [[466, "module-neural_compressor.utils.utility"]], "neural_compressor.utils.weights_details": [[467, "module-neural_compressor.utils.weights_details"]], "neural_compressor.version": [[468, "module-neural_compressor.version"]], "}": [[145, "id3"]]}, "docnames": ["autoapi/neural_compressor/adaptor/mxnet_utils/index", "autoapi/neural_compressor/adaptor/mxnet_utils/util/index", "autoapi/neural_compressor/adaptor/ox_utils/calibration/index", "autoapi/neural_compressor/adaptor/ox_utils/calibrator/index", "autoapi/neural_compressor/adaptor/ox_utils/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/activation/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/argmax/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/attention/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/binary_op/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/concat/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/conv/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/direct_q8/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/embed_layernorm/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/gather/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/gavgpool/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/gemm/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/lstm/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/matmul/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/maxpool/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/norm/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/ops/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/pad/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/pooling/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/reduce/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/resize/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/split/index", "autoapi/neural_compressor/adaptor/ox_utils/operators/unary_op/index", "autoapi/neural_compressor/adaptor/ox_utils/quantizer/index", "autoapi/neural_compressor/adaptor/ox_utils/smooth_quant/index", "autoapi/neural_compressor/adaptor/ox_utils/util/index", "autoapi/neural_compressor/adaptor/ox_utils/weight_only/index", "autoapi/neural_compressor/adaptor/tensorflow/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_converter/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_converter_without_calib/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/bf16_convert/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/dequantize_cast_optimizer/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_add_to_biasadd/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_layout/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_leakyrelu/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_nan_to_random/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_placeholder_to_const/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dilated_contraction/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dummy_biasadd/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/expanddims_optimizer/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fetch_weight_from_reshape/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_batch_norm/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_constant/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_biasadd_add/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_column_wise_mul/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_bn/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_in/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_layer_norm/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_conv/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_reshape_transpose/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/graph_cse_optimizer/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/grappler_pass/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/insert_print_node/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/move_squeeze_after_relu/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/pre_optimize/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/remove_training_nodes/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/rename_batch_norm/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/split_shared_input/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_equivalent_nodes/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/switch_optimizer/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/graph_base/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_fake_quant/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value_without_calib/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_requantize/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_requantize/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/meta_op_optimizer/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_hostconst_converter/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_quantized_op_cse/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/rnn_convert/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/scale_propagation/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_graph/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_node/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_schema/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/tf2onnx_utils/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/merge_duplicated_qdq/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/share_qdq_y_pattern/index", "autoapi/neural_compressor/adaptor/tf_utils/graph_util/index", "autoapi/neural_compressor/adaptor/tf_utils/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/fake_quantize/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_config/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_helper/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/optimize_layer/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_add/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_base/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_bn/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_wrapper/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_bn/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_concatv2/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_conv/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_deconv/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_in/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_matmul/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_pooling/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/optimize_qdq/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_base/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_bn/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_concatv2/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_conv/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_for_intel_cpu/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_matmul/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_pooling/index", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph_common/index", "autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_calibration/index", "autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_scaler/index", "autoapi/neural_compressor/adaptor/tf_utils/tf2onnx_converter/index", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/bias_correction/index", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/graph_transform_base/index", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/index", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/insert_logging/index", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/rerange_quantized_concat/index", "autoapi/neural_compressor/adaptor/tf_utils/util/index", "autoapi/neural_compressor/adaptor/torch_utils/bf16_convert/index", "autoapi/neural_compressor/adaptor/torch_utils/hawq_metric/index", "autoapi/neural_compressor/adaptor/torch_utils/index", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/index", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/modified_pickle/index", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/quantize/index", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/torch_load/index", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/utils/index", "autoapi/neural_compressor/adaptor/torch_utils/model_wrapper/index", "autoapi/neural_compressor/adaptor/torch_utils/pattern_detector/index", "autoapi/neural_compressor/adaptor/torch_utils/symbolic_trace/index", "autoapi/neural_compressor/adaptor/torch_utils/util/index", "autoapi/neural_compressor/algorithm/algorithm/index", "autoapi/neural_compressor/algorithm/fast_bias_correction/index", "autoapi/neural_compressor/algorithm/index", "autoapi/neural_compressor/algorithm/smooth_quant/index", "autoapi/neural_compressor/algorithm/weight_correction/index", "autoapi/neural_compressor/benchmark/index", "autoapi/neural_compressor/common/base_config/index", "autoapi/neural_compressor/common/base_tuning/index", "autoapi/neural_compressor/common/benchmark/index", "autoapi/neural_compressor/common/index", "autoapi/neural_compressor/common/tuning_param/index", "autoapi/neural_compressor/common/utils/constants/index", "autoapi/neural_compressor/common/utils/index", "autoapi/neural_compressor/common/utils/logger/index", "autoapi/neural_compressor/common/utils/save_load/index", "autoapi/neural_compressor/common/utils/utility/index", "autoapi/neural_compressor/compression/callbacks/index", "autoapi/neural_compressor/compression/distillation/criterions/index", "autoapi/neural_compressor/compression/distillation/index", "autoapi/neural_compressor/compression/distillation/optimizers/index", "autoapi/neural_compressor/compression/distillation/utility/index", "autoapi/neural_compressor/compression/hpo/index", "autoapi/neural_compressor/compression/hpo/sa_optimizer/index", "autoapi/neural_compressor/compression/pruner/criteria/index", "autoapi/neural_compressor/compression/pruner/index", "autoapi/neural_compressor/compression/pruner/model_slim/auto_slim/index", "autoapi/neural_compressor/compression/pruner/model_slim/index", "autoapi/neural_compressor/compression/pruner/model_slim/pattern_analyzer/index", "autoapi/neural_compressor/compression/pruner/model_slim/weight_slim/index", "autoapi/neural_compressor/compression/pruner/patterns/base/index", "autoapi/neural_compressor/compression/pruner/patterns/index", "autoapi/neural_compressor/compression/pruner/patterns/mha/index", "autoapi/neural_compressor/compression/pruner/patterns/ninm/index", "autoapi/neural_compressor/compression/pruner/patterns/nxm/index", "autoapi/neural_compressor/compression/pruner/pruners/base/index", "autoapi/neural_compressor/compression/pruner/pruners/basic/index", "autoapi/neural_compressor/compression/pruner/pruners/block_mask/index", "autoapi/neural_compressor/compression/pruner/pruners/index", "autoapi/neural_compressor/compression/pruner/pruners/mha/index", "autoapi/neural_compressor/compression/pruner/pruners/pattern_lock/index", "autoapi/neural_compressor/compression/pruner/pruners/progressive/index", "autoapi/neural_compressor/compression/pruner/pruners/retrain_free/index", "autoapi/neural_compressor/compression/pruner/pruning/index", "autoapi/neural_compressor/compression/pruner/regs/index", "autoapi/neural_compressor/compression/pruner/schedulers/index", "autoapi/neural_compressor/compression/pruner/tf_criteria/index", "autoapi/neural_compressor/compression/pruner/utils/index", "autoapi/neural_compressor/compression/pruner/wanda/index", "autoapi/neural_compressor/compression/pruner/wanda/utils/index", "autoapi/neural_compressor/config/index", "autoapi/neural_compressor/contrib/index", "autoapi/neural_compressor/contrib/strategy/index", "autoapi/neural_compressor/contrib/strategy/sigopt/index", "autoapi/neural_compressor/contrib/strategy/tpe/index", "autoapi/neural_compressor/data/dataloaders/base_dataloader/index", "autoapi/neural_compressor/data/dataloaders/dataloader/index", "autoapi/neural_compressor/data/dataloaders/default_dataloader/index", "autoapi/neural_compressor/data/dataloaders/fetcher/index", "autoapi/neural_compressor/data/dataloaders/mxnet_dataloader/index", "autoapi/neural_compressor/data/dataloaders/onnxrt_dataloader/index", "autoapi/neural_compressor/data/dataloaders/pytorch_dataloader/index", "autoapi/neural_compressor/data/dataloaders/sampler/index", "autoapi/neural_compressor/data/dataloaders/tensorflow_dataloader/index", "autoapi/neural_compressor/data/datasets/bert_dataset/index", "autoapi/neural_compressor/data/datasets/coco_dataset/index", "autoapi/neural_compressor/data/datasets/dataset/index", "autoapi/neural_compressor/data/datasets/dummy_dataset/index", "autoapi/neural_compressor/data/datasets/dummy_dataset_v2/index", "autoapi/neural_compressor/data/datasets/imagenet_dataset/index", "autoapi/neural_compressor/data/datasets/index", "autoapi/neural_compressor/data/datasets/style_transfer_dataset/index", "autoapi/neural_compressor/data/filters/coco_filter/index", "autoapi/neural_compressor/data/filters/filter/index", "autoapi/neural_compressor/data/filters/index", "autoapi/neural_compressor/data/index", "autoapi/neural_compressor/data/transforms/imagenet_transform/index", "autoapi/neural_compressor/data/transforms/index", "autoapi/neural_compressor/data/transforms/postprocess/index", "autoapi/neural_compressor/data/transforms/tokenization/index", "autoapi/neural_compressor/data/transforms/transform/index", "autoapi/neural_compressor/index", "autoapi/neural_compressor/metric/bleu/index", "autoapi/neural_compressor/metric/bleu_util/index", "autoapi/neural_compressor/metric/coco_label_map/index", "autoapi/neural_compressor/metric/coco_tools/index", "autoapi/neural_compressor/metric/evaluate_squad/index", "autoapi/neural_compressor/metric/f1/index", "autoapi/neural_compressor/metric/index", "autoapi/neural_compressor/metric/metric/index", "autoapi/neural_compressor/mix_precision/index", "autoapi/neural_compressor/model/base_model/index", "autoapi/neural_compressor/model/index", "autoapi/neural_compressor/model/keras_model/index", "autoapi/neural_compressor/model/model/index", "autoapi/neural_compressor/model/mxnet_model/index", "autoapi/neural_compressor/model/nets_factory/index", "autoapi/neural_compressor/model/onnx_model/index", "autoapi/neural_compressor/model/tensorflow_model/index", "autoapi/neural_compressor/model/torch_model/index", "autoapi/neural_compressor/objective/index", "autoapi/neural_compressor/profiling/index", "autoapi/neural_compressor/profiling/parser/factory/index", "autoapi/neural_compressor/profiling/parser/onnx_parser/factory/index", "autoapi/neural_compressor/profiling/parser/onnx_parser/parser/index", "autoapi/neural_compressor/profiling/parser/parser/index", "autoapi/neural_compressor/profiling/parser/result/index", "autoapi/neural_compressor/profiling/parser/tensorflow_parser/factory/index", "autoapi/neural_compressor/profiling/parser/tensorflow_parser/parser/index", "autoapi/neural_compressor/profiling/profiler/factory/index", "autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/factory/index", "autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/profiler/index", "autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/utils/index", "autoapi/neural_compressor/profiling/profiler/profiler/index", "autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/factory/index", "autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/profiler/index", "autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/utils/index", "autoapi/neural_compressor/quantization/index", "autoapi/neural_compressor/strategy/auto/index", "autoapi/neural_compressor/strategy/auto_mixed_precision/index", "autoapi/neural_compressor/strategy/basic/index", "autoapi/neural_compressor/strategy/bayesian/index", "autoapi/neural_compressor/strategy/conservative/index", "autoapi/neural_compressor/strategy/exhaustive/index", "autoapi/neural_compressor/strategy/hawq_v2/index", "autoapi/neural_compressor/strategy/index", "autoapi/neural_compressor/strategy/mse/index", "autoapi/neural_compressor/strategy/mse_v2/index", "autoapi/neural_compressor/strategy/random/index", "autoapi/neural_compressor/strategy/strategy/index", "autoapi/neural_compressor/strategy/utils/constant/index", "autoapi/neural_compressor/strategy/utils/index", "autoapi/neural_compressor/strategy/utils/tuning_sampler/index", "autoapi/neural_compressor/strategy/utils/tuning_space/index", "autoapi/neural_compressor/strategy/utils/tuning_structs/index", "autoapi/neural_compressor/strategy/utils/utility/index", "autoapi/neural_compressor/template/api_doc_example/index", "autoapi/neural_compressor/tensorflow/algorithms/index", "autoapi/neural_compressor/tensorflow/algorithms/smoother/calibration/index", "autoapi/neural_compressor/tensorflow/algorithms/smoother/core/index", "autoapi/neural_compressor/tensorflow/algorithms/smoother/index", "autoapi/neural_compressor/tensorflow/algorithms/smoother/scaler/index", "autoapi/neural_compressor/tensorflow/algorithms/static_quant/index", "autoapi/neural_compressor/tensorflow/algorithms/static_quant/keras/index", "autoapi/neural_compressor/tensorflow/algorithms/static_quant/tensorflow/index", "autoapi/neural_compressor/tensorflow/index", "autoapi/neural_compressor/tensorflow/keras/index", "autoapi/neural_compressor/tensorflow/keras/layers/conv2d/index", "autoapi/neural_compressor/tensorflow/keras/layers/dense/index", "autoapi/neural_compressor/tensorflow/keras/layers/depthwise_conv2d/index", "autoapi/neural_compressor/tensorflow/keras/layers/index", "autoapi/neural_compressor/tensorflow/keras/layers/layer_initializer/index", "autoapi/neural_compressor/tensorflow/keras/layers/pool2d/index", "autoapi/neural_compressor/tensorflow/keras/layers/separable_conv2d/index", "autoapi/neural_compressor/tensorflow/keras/quantization/config/index", "autoapi/neural_compressor/tensorflow/keras/quantization/index", "autoapi/neural_compressor/tensorflow/quantization/algorithm_entry/index", "autoapi/neural_compressor/tensorflow/quantization/autotune/index", "autoapi/neural_compressor/tensorflow/quantization/config/index", "autoapi/neural_compressor/tensorflow/quantization/index", "autoapi/neural_compressor/tensorflow/quantization/quantize/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_converter/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/bf16_convert/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/dequantize_cast_optimizer/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_add_to_biasadd/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_layout/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_leakyrelu/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_nan_to_random/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_placeholder_to_const/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dilated_contraction/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dummy_biasadd/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/expanddims_optimizer/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fetch_weight_from_reshape/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_batch_norm/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_constant/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_biasadd_add/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_column_wise_mul/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_conv_with_math/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_bn/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_in/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_gelu/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_layer_norm/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_conv/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_reshape_transpose/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/graph_cse_optimizer/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/grappler_pass/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/insert_print_node/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/move_squeeze_after_relu/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/remove_training_nodes/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/rename_batch_norm/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/split_shared_input/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_equivalent_nodes/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_unused_nodes/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/switch_optimizer/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/graph_base/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_fake_quant/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_value/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_requantize/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_requantize/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/meta_op_optimizer/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_hostconst_converter/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_quantized_op_cse/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/scale_propagation/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/insert_qdq_pattern/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/merge_duplicated_qdq/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/share_qdq_y_pattern/index", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_util/index", "autoapi/neural_compressor/tensorflow/quantization/utils/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_bn/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_concatv2/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_conv/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_deconv/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_in/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_matmul/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_pooling/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/optimize_qdq/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_base/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_bn/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_concatv2/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_conv/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_for_intel_cpu/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_matmul/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_pooling/index", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph_common/index", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/bias_correction/index", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/graph_transform_base/index", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/index", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/insert_logging/index", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/rerange_quantized_concat/index", "autoapi/neural_compressor/tensorflow/quantization/utils/utility/index", "autoapi/neural_compressor/tensorflow/utils/constants/index", "autoapi/neural_compressor/tensorflow/utils/data/index", "autoapi/neural_compressor/tensorflow/utils/index", "autoapi/neural_compressor/tensorflow/utils/model/index", "autoapi/neural_compressor/tensorflow/utils/model_wrappers/index", "autoapi/neural_compressor/tensorflow/utils/utility/index", "autoapi/neural_compressor/torch/algorithms/base_algorithm/index", "autoapi/neural_compressor/torch/algorithms/fp8_quant/utils/logger/index", "autoapi/neural_compressor/torch/algorithms/index", "autoapi/neural_compressor/torch/algorithms/layer_wise/index", "autoapi/neural_compressor/torch/algorithms/layer_wise/load/index", "autoapi/neural_compressor/torch/algorithms/layer_wise/modified_pickle/index", "autoapi/neural_compressor/torch/algorithms/layer_wise/utils/index", "autoapi/neural_compressor/torch/algorithms/mixed_precision/half_precision_convert/index", "autoapi/neural_compressor/torch/algorithms/mixed_precision/index", "autoapi/neural_compressor/torch/algorithms/mixed_precision/module_wrappers/index", "autoapi/neural_compressor/torch/algorithms/mx_quant/index", "autoapi/neural_compressor/torch/algorithms/mx_quant/mx/index", "autoapi/neural_compressor/torch/algorithms/mx_quant/utils/index", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/core/index", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/half_precision_rewriter/index", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/index", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/save_load/index", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/utility/index", "autoapi/neural_compressor/torch/algorithms/smooth_quant/index", "autoapi/neural_compressor/torch/algorithms/smooth_quant/save_load/index", "autoapi/neural_compressor/torch/algorithms/smooth_quant/smooth_quant/index", "autoapi/neural_compressor/torch/algorithms/smooth_quant/utility/index", "autoapi/neural_compressor/torch/algorithms/static_quant/index", "autoapi/neural_compressor/torch/algorithms/static_quant/save_load/index", "autoapi/neural_compressor/torch/algorithms/static_quant/static_quant/index", "autoapi/neural_compressor/torch/algorithms/static_quant/utility/index", "autoapi/neural_compressor/torch/algorithms/weight_only/autoround/index", "autoapi/neural_compressor/torch/algorithms/weight_only/awq/index", "autoapi/neural_compressor/torch/algorithms/weight_only/gptq/index", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/bitpack/index", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/config/index", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/core/index", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/index", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/optimizer/index", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/qtensor/index", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/quantizer/index", "autoapi/neural_compressor/torch/algorithms/weight_only/index", "autoapi/neural_compressor/torch/algorithms/weight_only/modules/index", "autoapi/neural_compressor/torch/algorithms/weight_only/rtn/index", "autoapi/neural_compressor/torch/algorithms/weight_only/save_load/index", "autoapi/neural_compressor/torch/algorithms/weight_only/teq/index", "autoapi/neural_compressor/torch/algorithms/weight_only/utility/index", "autoapi/neural_compressor/torch/export/index", "autoapi/neural_compressor/torch/export/pt2e_export/index", "autoapi/neural_compressor/torch/index", "autoapi/neural_compressor/torch/quantization/algorithm_entry/index", "autoapi/neural_compressor/torch/quantization/autotune/index", "autoapi/neural_compressor/torch/quantization/config/index", "autoapi/neural_compressor/torch/quantization/index", "autoapi/neural_compressor/torch/quantization/load_entry/index", "autoapi/neural_compressor/torch/quantization/quantize/index", "autoapi/neural_compressor/torch/utils/auto_accelerator/index", "autoapi/neural_compressor/torch/utils/bit_packer/index", "autoapi/neural_compressor/torch/utils/constants/index", "autoapi/neural_compressor/torch/utils/environ/index", "autoapi/neural_compressor/torch/utils/index", "autoapi/neural_compressor/torch/utils/utility/index", "autoapi/neural_compressor/training/index", "autoapi/neural_compressor/transformers/quantization/utils/index", "autoapi/neural_compressor/transformers/utils/index", "autoapi/neural_compressor/transformers/utils/quantization_config/index", "autoapi/neural_compressor/utils/collect_layer_histogram/index", "autoapi/neural_compressor/utils/constant/index", "autoapi/neural_compressor/utils/create_obj_from_config/index", "autoapi/neural_compressor/utils/export/index", "autoapi/neural_compressor/utils/export/qlinear2qdq/index", "autoapi/neural_compressor/utils/export/tf2onnx/index", "autoapi/neural_compressor/utils/export/torch2onnx/index", "autoapi/neural_compressor/utils/index", "autoapi/neural_compressor/utils/kl_divergence/index", "autoapi/neural_compressor/utils/load_huggingface/index", "autoapi/neural_compressor/utils/logger/index", "autoapi/neural_compressor/utils/options/index", "autoapi/neural_compressor/utils/pytorch/index", "autoapi/neural_compressor/utils/utility/index", "autoapi/neural_compressor/utils/weights_details/index", "autoapi/neural_compressor/version/index", "docs/build_docs/source/index", "docs/source/2x_user_guide", "docs/source/3x/PT_DynamicQuant", "docs/source/3x/PT_FP8Quant", "docs/source/3x/PT_MXQuant", "docs/source/3x/PT_MixedPrecision", "docs/source/3x/PT_SmoothQuant", "docs/source/3x/PT_StaticQuant", "docs/source/3x/PT_WeightOnlyQuant", "docs/source/3x/PyTorch", "docs/source/3x/TF_Quant", "docs/source/3x/TF_SQ", "docs/source/3x/TensorFlow", "docs/source/3x/autotune", "docs/source/3x/benchmark", "docs/source/3x/client_quant", "docs/source/3x/design", "docs/source/3x/gaudi_version_map", "docs/source/3x/llm_recipes", "docs/source/3x/quantization", "docs/source/3x/transformers_like_api", "docs/source/CODE_OF_CONDUCT", "docs/source/CONTRIBUTING", "docs/source/FX", "docs/source/SECURITY", "docs/source/Welcome", "docs/source/adaptor", "docs/source/add_new_adaptor", "docs/source/add_new_data_type", "docs/source/api-doc/adaptor", "docs/source/api-doc/adaptor/onnxrt", "docs/source/api-doc/adaptor/torch_utils", "docs/source/api-doc/api_2", "docs/source/api-doc/api_3", "docs/source/api-doc/api_doc_example", "docs/source/api-doc/apis", "docs/source/api-doc/benchmark", "docs/source/api-doc/compression", "docs/source/api-doc/config", "docs/source/api-doc/mix_precision", "docs/source/api-doc/model", "docs/source/api-doc/objective", "docs/source/api-doc/quantization", "docs/source/api-doc/strategy", "docs/source/api-doc/tf_quantization_autotune", "docs/source/api-doc/tf_quantization_common", "docs/source/api-doc/tf_quantization_config", "docs/source/api-doc/torch_quantization_autotune", "docs/source/api-doc/torch_quantization_common", "docs/source/api-doc/torch_quantization_config", "docs/source/api-doc/training", "docs/source/benchmark", "docs/source/calibration", "docs/source/coding_style", "docs/source/dataloader", "docs/source/design", "docs/source/distillation_quantization", "docs/source/distributed", "docs/source/examples_readme", "docs/source/export", "docs/source/faq", "docs/source/framework_yaml", "docs/source/get_started", "docs/source/incompatible_changes", "docs/source/infrastructure", "docs/source/installation_guide", "docs/source/legal_information", "docs/source/llm_recipes", "docs/source/metric", "docs/source/migration", "docs/source/mixed_precision", "docs/source/model", "docs/source/mx_quantization", "docs/source/objective", "docs/source/orchestration", "docs/source/pruning", "docs/source/publication_list", "docs/source/quantization", "docs/source/quantization_layer_wise", "docs/source/quantization_mixed_precision", "docs/source/quantization_weight_only", "docs/source/releases_info", "docs/source/sigopt_strategy", "docs/source/smooth_quant", "docs/source/transform", "docs/source/tuning_strategies", "docs/source/validated_model_list", "index"], "envversion": {"sphinx": 61, "sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2}, "filenames": ["autoapi/neural_compressor/adaptor/mxnet_utils/index.rst", "autoapi/neural_compressor/adaptor/mxnet_utils/util/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/calibration/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/calibrator/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/activation/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/argmax/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/attention/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/binary_op/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/concat/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/conv/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/direct_q8/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/embed_layernorm/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/gather/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/gavgpool/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/gemm/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/lstm/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/matmul/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/maxpool/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/norm/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/ops/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/pad/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/pooling/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/reduce/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/resize/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/split/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/operators/unary_op/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/quantizer/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/smooth_quant/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/util/index.rst", "autoapi/neural_compressor/adaptor/ox_utils/weight_only/index.rst", "autoapi/neural_compressor/adaptor/tensorflow/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_converter/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_converter_without_calib/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/bf16_convert/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/dequantize_cast_optimizer/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_add_to_biasadd/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_layout/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_leakyrelu/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_nan_to_random/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_placeholder_to_const/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dilated_contraction/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dummy_biasadd/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/expanddims_optimizer/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fetch_weight_from_reshape/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_batch_norm/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_constant/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_biasadd_add/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_column_wise_mul/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_bn/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_in/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_layer_norm/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_conv/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_reshape_transpose/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/graph_cse_optimizer/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/grappler_pass/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/insert_print_node/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/move_squeeze_after_relu/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/pre_optimize/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/remove_training_nodes/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/rename_batch_norm/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/split_shared_input/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_equivalent_nodes/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/switch_optimizer/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/graph_base/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_fake_quant/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value_without_calib/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_requantize/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_requantize/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/meta_op_optimizer/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_hostconst_converter/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_quantized_op_cse/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/rnn_convert/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/scale_propagation/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_graph/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_node/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/onnx_schema/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/onnx/tf2onnx_utils/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/insert_qdq_pattern/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/merge_duplicated_qdq/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_rewriter/qdq/share_qdq_y_pattern/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/graph_util/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/fake_quantize/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_config/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_helper/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/optimize_layer/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_add/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_base/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_layers/quantize_layer_bn/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qat/quantize_wrapper/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_bn/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_concatv2/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_conv/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_deconv/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_in/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_matmul/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_pooling/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/optimize_qdq/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_base/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_bn/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_concatv2/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_conv/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_for_intel_cpu/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_matmul/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_pooling/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/quantize_graph_common/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_calibration/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/smooth_quant_scaler/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/tf2onnx_converter/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/bias_correction/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/graph_transform_base/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/insert_logging/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/transform_graph/rerange_quantized_concat/index.rst", "autoapi/neural_compressor/adaptor/tf_utils/util/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/bf16_convert/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/hawq_metric/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/modified_pickle/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/quantize/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/torch_load/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/layer_wise_quant/utils/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/model_wrapper/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/pattern_detector/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/symbolic_trace/index.rst", "autoapi/neural_compressor/adaptor/torch_utils/util/index.rst", "autoapi/neural_compressor/algorithm/algorithm/index.rst", "autoapi/neural_compressor/algorithm/fast_bias_correction/index.rst", "autoapi/neural_compressor/algorithm/index.rst", "autoapi/neural_compressor/algorithm/smooth_quant/index.rst", "autoapi/neural_compressor/algorithm/weight_correction/index.rst", "autoapi/neural_compressor/benchmark/index.rst", "autoapi/neural_compressor/common/base_config/index.rst", "autoapi/neural_compressor/common/base_tuning/index.rst", "autoapi/neural_compressor/common/benchmark/index.rst", "autoapi/neural_compressor/common/index.rst", "autoapi/neural_compressor/common/tuning_param/index.rst", "autoapi/neural_compressor/common/utils/constants/index.rst", "autoapi/neural_compressor/common/utils/index.rst", "autoapi/neural_compressor/common/utils/logger/index.rst", "autoapi/neural_compressor/common/utils/save_load/index.rst", "autoapi/neural_compressor/common/utils/utility/index.rst", "autoapi/neural_compressor/compression/callbacks/index.rst", "autoapi/neural_compressor/compression/distillation/criterions/index.rst", "autoapi/neural_compressor/compression/distillation/index.rst", "autoapi/neural_compressor/compression/distillation/optimizers/index.rst", "autoapi/neural_compressor/compression/distillation/utility/index.rst", "autoapi/neural_compressor/compression/hpo/index.rst", "autoapi/neural_compressor/compression/hpo/sa_optimizer/index.rst", "autoapi/neural_compressor/compression/pruner/criteria/index.rst", "autoapi/neural_compressor/compression/pruner/index.rst", "autoapi/neural_compressor/compression/pruner/model_slim/auto_slim/index.rst", "autoapi/neural_compressor/compression/pruner/model_slim/index.rst", "autoapi/neural_compressor/compression/pruner/model_slim/pattern_analyzer/index.rst", "autoapi/neural_compressor/compression/pruner/model_slim/weight_slim/index.rst", "autoapi/neural_compressor/compression/pruner/patterns/base/index.rst", "autoapi/neural_compressor/compression/pruner/patterns/index.rst", "autoapi/neural_compressor/compression/pruner/patterns/mha/index.rst", "autoapi/neural_compressor/compression/pruner/patterns/ninm/index.rst", "autoapi/neural_compressor/compression/pruner/patterns/nxm/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/base/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/basic/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/block_mask/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/mha/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/pattern_lock/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/progressive/index.rst", "autoapi/neural_compressor/compression/pruner/pruners/retrain_free/index.rst", "autoapi/neural_compressor/compression/pruner/pruning/index.rst", "autoapi/neural_compressor/compression/pruner/regs/index.rst", "autoapi/neural_compressor/compression/pruner/schedulers/index.rst", "autoapi/neural_compressor/compression/pruner/tf_criteria/index.rst", "autoapi/neural_compressor/compression/pruner/utils/index.rst", "autoapi/neural_compressor/compression/pruner/wanda/index.rst", "autoapi/neural_compressor/compression/pruner/wanda/utils/index.rst", "autoapi/neural_compressor/config/index.rst", "autoapi/neural_compressor/contrib/index.rst", "autoapi/neural_compressor/contrib/strategy/index.rst", "autoapi/neural_compressor/contrib/strategy/sigopt/index.rst", "autoapi/neural_compressor/contrib/strategy/tpe/index.rst", "autoapi/neural_compressor/data/dataloaders/base_dataloader/index.rst", "autoapi/neural_compressor/data/dataloaders/dataloader/index.rst", "autoapi/neural_compressor/data/dataloaders/default_dataloader/index.rst", "autoapi/neural_compressor/data/dataloaders/fetcher/index.rst", "autoapi/neural_compressor/data/dataloaders/mxnet_dataloader/index.rst", "autoapi/neural_compressor/data/dataloaders/onnxrt_dataloader/index.rst", "autoapi/neural_compressor/data/dataloaders/pytorch_dataloader/index.rst", "autoapi/neural_compressor/data/dataloaders/sampler/index.rst", "autoapi/neural_compressor/data/dataloaders/tensorflow_dataloader/index.rst", "autoapi/neural_compressor/data/datasets/bert_dataset/index.rst", "autoapi/neural_compressor/data/datasets/coco_dataset/index.rst", "autoapi/neural_compressor/data/datasets/dataset/index.rst", "autoapi/neural_compressor/data/datasets/dummy_dataset/index.rst", "autoapi/neural_compressor/data/datasets/dummy_dataset_v2/index.rst", "autoapi/neural_compressor/data/datasets/imagenet_dataset/index.rst", "autoapi/neural_compressor/data/datasets/index.rst", "autoapi/neural_compressor/data/datasets/style_transfer_dataset/index.rst", "autoapi/neural_compressor/data/filters/coco_filter/index.rst", "autoapi/neural_compressor/data/filters/filter/index.rst", "autoapi/neural_compressor/data/filters/index.rst", "autoapi/neural_compressor/data/index.rst", "autoapi/neural_compressor/data/transforms/imagenet_transform/index.rst", "autoapi/neural_compressor/data/transforms/index.rst", "autoapi/neural_compressor/data/transforms/postprocess/index.rst", "autoapi/neural_compressor/data/transforms/tokenization/index.rst", "autoapi/neural_compressor/data/transforms/transform/index.rst", "autoapi/neural_compressor/index.rst", "autoapi/neural_compressor/metric/bleu/index.rst", "autoapi/neural_compressor/metric/bleu_util/index.rst", "autoapi/neural_compressor/metric/coco_label_map/index.rst", "autoapi/neural_compressor/metric/coco_tools/index.rst", "autoapi/neural_compressor/metric/evaluate_squad/index.rst", "autoapi/neural_compressor/metric/f1/index.rst", "autoapi/neural_compressor/metric/index.rst", "autoapi/neural_compressor/metric/metric/index.rst", "autoapi/neural_compressor/mix_precision/index.rst", "autoapi/neural_compressor/model/base_model/index.rst", "autoapi/neural_compressor/model/index.rst", "autoapi/neural_compressor/model/keras_model/index.rst", "autoapi/neural_compressor/model/model/index.rst", "autoapi/neural_compressor/model/mxnet_model/index.rst", "autoapi/neural_compressor/model/nets_factory/index.rst", "autoapi/neural_compressor/model/onnx_model/index.rst", "autoapi/neural_compressor/model/tensorflow_model/index.rst", "autoapi/neural_compressor/model/torch_model/index.rst", "autoapi/neural_compressor/objective/index.rst", "autoapi/neural_compressor/profiling/index.rst", "autoapi/neural_compressor/profiling/parser/factory/index.rst", "autoapi/neural_compressor/profiling/parser/onnx_parser/factory/index.rst", "autoapi/neural_compressor/profiling/parser/onnx_parser/parser/index.rst", "autoapi/neural_compressor/profiling/parser/parser/index.rst", "autoapi/neural_compressor/profiling/parser/result/index.rst", "autoapi/neural_compressor/profiling/parser/tensorflow_parser/factory/index.rst", "autoapi/neural_compressor/profiling/parser/tensorflow_parser/parser/index.rst", "autoapi/neural_compressor/profiling/profiler/factory/index.rst", "autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/factory/index.rst", "autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/profiler/index.rst", "autoapi/neural_compressor/profiling/profiler/onnxrt_profiler/utils/index.rst", "autoapi/neural_compressor/profiling/profiler/profiler/index.rst", "autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/factory/index.rst", "autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/profiler/index.rst", "autoapi/neural_compressor/profiling/profiler/tensorflow_profiler/utils/index.rst", "autoapi/neural_compressor/quantization/index.rst", "autoapi/neural_compressor/strategy/auto/index.rst", "autoapi/neural_compressor/strategy/auto_mixed_precision/index.rst", "autoapi/neural_compressor/strategy/basic/index.rst", "autoapi/neural_compressor/strategy/bayesian/index.rst", "autoapi/neural_compressor/strategy/conservative/index.rst", "autoapi/neural_compressor/strategy/exhaustive/index.rst", "autoapi/neural_compressor/strategy/hawq_v2/index.rst", "autoapi/neural_compressor/strategy/index.rst", "autoapi/neural_compressor/strategy/mse/index.rst", "autoapi/neural_compressor/strategy/mse_v2/index.rst", "autoapi/neural_compressor/strategy/random/index.rst", "autoapi/neural_compressor/strategy/strategy/index.rst", "autoapi/neural_compressor/strategy/utils/constant/index.rst", "autoapi/neural_compressor/strategy/utils/index.rst", "autoapi/neural_compressor/strategy/utils/tuning_sampler/index.rst", "autoapi/neural_compressor/strategy/utils/tuning_space/index.rst", "autoapi/neural_compressor/strategy/utils/tuning_structs/index.rst", "autoapi/neural_compressor/strategy/utils/utility/index.rst", "autoapi/neural_compressor/template/api_doc_example/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/smoother/calibration/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/smoother/core/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/smoother/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/smoother/scaler/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/static_quant/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/static_quant/keras/index.rst", "autoapi/neural_compressor/tensorflow/algorithms/static_quant/tensorflow/index.rst", "autoapi/neural_compressor/tensorflow/index.rst", "autoapi/neural_compressor/tensorflow/keras/index.rst", "autoapi/neural_compressor/tensorflow/keras/layers/conv2d/index.rst", "autoapi/neural_compressor/tensorflow/keras/layers/dense/index.rst", "autoapi/neural_compressor/tensorflow/keras/layers/depthwise_conv2d/index.rst", "autoapi/neural_compressor/tensorflow/keras/layers/index.rst", "autoapi/neural_compressor/tensorflow/keras/layers/layer_initializer/index.rst", "autoapi/neural_compressor/tensorflow/keras/layers/pool2d/index.rst", "autoapi/neural_compressor/tensorflow/keras/layers/separable_conv2d/index.rst", "autoapi/neural_compressor/tensorflow/keras/quantization/config/index.rst", "autoapi/neural_compressor/tensorflow/keras/quantization/index.rst", "autoapi/neural_compressor/tensorflow/quantization/algorithm_entry/index.rst", "autoapi/neural_compressor/tensorflow/quantization/autotune/index.rst", "autoapi/neural_compressor/tensorflow/quantization/config/index.rst", "autoapi/neural_compressor/tensorflow/quantization/index.rst", "autoapi/neural_compressor/tensorflow/quantization/quantize/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_converter/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/bf16_convert/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/dequantize_cast_optimizer/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/bf16/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_add_to_biasadd/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_layout/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_leakyrelu/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_nan_to_random/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/convert_placeholder_to_const/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dilated_contraction/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/dummy_biasadd/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/expanddims_optimizer/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fetch_weight_from_reshape/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_batch_norm/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fold_constant/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_biasadd_add/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_column_wise_mul/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_conv_with_math/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_bn/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_decomposed_in/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_gelu/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_layer_norm/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_conv/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_pad_with_fp32_conv/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/fuse_reshape_transpose/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/graph_cse_optimizer/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/grappler_pass/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/insert_print_node/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/move_squeeze_after_relu/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/pre_optimize/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/remove_training_nodes/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/rename_batch_norm/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/split_shared_input/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_equivalent_nodes/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/strip_unused_nodes/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/generic/switch_optimizer/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/graph_base/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_fake_quant/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/freeze_value/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_redundant_dequantize/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_conv_requantize/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_redundant_dequantize/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/fuse_matmul_requantize/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/meta_op_optimizer/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_hostconst_converter/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/post_quantized_op_cse/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/int8/scale_propagation/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/insert_qdq_pattern/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/merge_duplicated_qdq/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_rewriter/qdq/share_qdq_y_pattern/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/graph_util/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_bn/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_concatv2/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_conv/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_deconv/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_in/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_matmul/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/fuse_qdq_pooling/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/qdq/optimize_qdq/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_base/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_bn/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_concatv2/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_conv/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_for_intel_cpu/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_matmul/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph/quantize_graph_pooling/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/quantize_graph_common/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/bias_correction/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/graph_transform_base/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/insert_logging/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/transform_graph/rerange_quantized_concat/index.rst", "autoapi/neural_compressor/tensorflow/quantization/utils/utility/index.rst", "autoapi/neural_compressor/tensorflow/utils/constants/index.rst", "autoapi/neural_compressor/tensorflow/utils/data/index.rst", "autoapi/neural_compressor/tensorflow/utils/index.rst", "autoapi/neural_compressor/tensorflow/utils/model/index.rst", "autoapi/neural_compressor/tensorflow/utils/model_wrappers/index.rst", "autoapi/neural_compressor/tensorflow/utils/utility/index.rst", "autoapi/neural_compressor/torch/algorithms/base_algorithm/index.rst", "autoapi/neural_compressor/torch/algorithms/fp8_quant/utils/logger/index.rst", "autoapi/neural_compressor/torch/algorithms/index.rst", "autoapi/neural_compressor/torch/algorithms/layer_wise/index.rst", "autoapi/neural_compressor/torch/algorithms/layer_wise/load/index.rst", "autoapi/neural_compressor/torch/algorithms/layer_wise/modified_pickle/index.rst", "autoapi/neural_compressor/torch/algorithms/layer_wise/utils/index.rst", "autoapi/neural_compressor/torch/algorithms/mixed_precision/half_precision_convert/index.rst", "autoapi/neural_compressor/torch/algorithms/mixed_precision/index.rst", "autoapi/neural_compressor/torch/algorithms/mixed_precision/module_wrappers/index.rst", "autoapi/neural_compressor/torch/algorithms/mx_quant/index.rst", "autoapi/neural_compressor/torch/algorithms/mx_quant/mx/index.rst", "autoapi/neural_compressor/torch/algorithms/mx_quant/utils/index.rst", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/core/index.rst", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/half_precision_rewriter/index.rst", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/index.rst", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/save_load/index.rst", "autoapi/neural_compressor/torch/algorithms/pt2e_quant/utility/index.rst", "autoapi/neural_compressor/torch/algorithms/smooth_quant/index.rst", "autoapi/neural_compressor/torch/algorithms/smooth_quant/save_load/index.rst", "autoapi/neural_compressor/torch/algorithms/smooth_quant/smooth_quant/index.rst", "autoapi/neural_compressor/torch/algorithms/smooth_quant/utility/index.rst", "autoapi/neural_compressor/torch/algorithms/static_quant/index.rst", "autoapi/neural_compressor/torch/algorithms/static_quant/save_load/index.rst", "autoapi/neural_compressor/torch/algorithms/static_quant/static_quant/index.rst", "autoapi/neural_compressor/torch/algorithms/static_quant/utility/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/autoround/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/awq/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/gptq/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/bitpack/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/config/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/core/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/optimizer/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/qtensor/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/hqq/quantizer/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/modules/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/rtn/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/save_load/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/teq/index.rst", "autoapi/neural_compressor/torch/algorithms/weight_only/utility/index.rst", "autoapi/neural_compressor/torch/export/index.rst", "autoapi/neural_compressor/torch/export/pt2e_export/index.rst", "autoapi/neural_compressor/torch/index.rst", "autoapi/neural_compressor/torch/quantization/algorithm_entry/index.rst", "autoapi/neural_compressor/torch/quantization/autotune/index.rst", "autoapi/neural_compressor/torch/quantization/config/index.rst", "autoapi/neural_compressor/torch/quantization/index.rst", "autoapi/neural_compressor/torch/quantization/load_entry/index.rst", "autoapi/neural_compressor/torch/quantization/quantize/index.rst", "autoapi/neural_compressor/torch/utils/auto_accelerator/index.rst", "autoapi/neural_compressor/torch/utils/bit_packer/index.rst", "autoapi/neural_compressor/torch/utils/constants/index.rst", "autoapi/neural_compressor/torch/utils/environ/index.rst", "autoapi/neural_compressor/torch/utils/index.rst", "autoapi/neural_compressor/torch/utils/utility/index.rst", "autoapi/neural_compressor/training/index.rst", "autoapi/neural_compressor/transformers/quantization/utils/index.rst", "autoapi/neural_compressor/transformers/utils/index.rst", "autoapi/neural_compressor/transformers/utils/quantization_config/index.rst", "autoapi/neural_compressor/utils/collect_layer_histogram/index.rst", "autoapi/neural_compressor/utils/constant/index.rst", "autoapi/neural_compressor/utils/create_obj_from_config/index.rst", "autoapi/neural_compressor/utils/export/index.rst", "autoapi/neural_compressor/utils/export/qlinear2qdq/index.rst", "autoapi/neural_compressor/utils/export/tf2onnx/index.rst", "autoapi/neural_compressor/utils/export/torch2onnx/index.rst", "autoapi/neural_compressor/utils/index.rst", "autoapi/neural_compressor/utils/kl_divergence/index.rst", "autoapi/neural_compressor/utils/load_huggingface/index.rst", "autoapi/neural_compressor/utils/logger/index.rst", "autoapi/neural_compressor/utils/options/index.rst", "autoapi/neural_compressor/utils/pytorch/index.rst", "autoapi/neural_compressor/utils/utility/index.rst", "autoapi/neural_compressor/utils/weights_details/index.rst", "autoapi/neural_compressor/version/index.rst", "docs/build_docs/source/index.rst", "docs/source/2x_user_guide.md", "docs/source/3x/PT_DynamicQuant.md", "docs/source/3x/PT_FP8Quant.md", "docs/source/3x/PT_MXQuant.md", "docs/source/3x/PT_MixedPrecision.md", "docs/source/3x/PT_SmoothQuant.md", "docs/source/3x/PT_StaticQuant.md", "docs/source/3x/PT_WeightOnlyQuant.md", "docs/source/3x/PyTorch.md", "docs/source/3x/TF_Quant.md", "docs/source/3x/TF_SQ.md", "docs/source/3x/TensorFlow.md", "docs/source/3x/autotune.md", "docs/source/3x/benchmark.md", "docs/source/3x/client_quant.md", "docs/source/3x/design.md", "docs/source/3x/gaudi_version_map.md", "docs/source/3x/llm_recipes.md", "docs/source/3x/quantization.md", "docs/source/3x/transformers_like_api.md", "docs/source/CODE_OF_CONDUCT.md", "docs/source/CONTRIBUTING.md", "docs/source/FX.md", "docs/source/SECURITY.md", "docs/source/Welcome.md", "docs/source/adaptor.md", "docs/source/add_new_adaptor.md", "docs/source/add_new_data_type.md", "docs/source/api-doc/adaptor.rst", "docs/source/api-doc/adaptor/onnxrt.rst", "docs/source/api-doc/adaptor/torch_utils.rst", "docs/source/api-doc/api_2.rst", "docs/source/api-doc/api_3.rst", "docs/source/api-doc/api_doc_example.rst", "docs/source/api-doc/apis.rst", "docs/source/api-doc/benchmark.rst", "docs/source/api-doc/compression.rst", "docs/source/api-doc/config.rst", "docs/source/api-doc/mix_precision.rst", "docs/source/api-doc/model.rst", "docs/source/api-doc/objective.rst", "docs/source/api-doc/quantization.rst", "docs/source/api-doc/strategy.rst", "docs/source/api-doc/tf_quantization_autotune.rst", "docs/source/api-doc/tf_quantization_common.rst", "docs/source/api-doc/tf_quantization_config.rst", "docs/source/api-doc/torch_quantization_autotune.rst", "docs/source/api-doc/torch_quantization_common.rst", "docs/source/api-doc/torch_quantization_config.rst", "docs/source/api-doc/training.rst", "docs/source/benchmark.md", "docs/source/calibration.md", "docs/source/coding_style.md", "docs/source/dataloader.md", "docs/source/design.md", "docs/source/distillation_quantization.md", "docs/source/distributed.md", "docs/source/examples_readme.md", "docs/source/export.md", "docs/source/faq.md", "docs/source/framework_yaml.md", "docs/source/get_started.md", "docs/source/incompatible_changes.md", "docs/source/infrastructure.md", "docs/source/installation_guide.md", "docs/source/legal_information.md", "docs/source/llm_recipes.md", "docs/source/metric.md", "docs/source/migration.md", "docs/source/mixed_precision.md", "docs/source/model.md", "docs/source/mx_quantization.md", "docs/source/objective.md", "docs/source/orchestration.md", "docs/source/pruning.md", "docs/source/publication_list.md", "docs/source/quantization.md", "docs/source/quantization_layer_wise.md", "docs/source/quantization_mixed_precision.md", "docs/source/quantization_weight_only.md", "docs/source/releases_info.md", "docs/source/sigopt_strategy.md", "docs/source/smooth_quant.md", "docs/source/transform.md", "docs/source/tuning_strategies.md", "docs/source/validated_model_list.md", "index.rst"], "indexentries": {"_epoch_ran (neural_compressor.compression.callbacks.distillationcallbacks attribute)": [[162, "neural_compressor.compression.callbacks.DistillationCallbacks._epoch_ran", false]], "acceleratorregistry (class in neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.AcceleratorRegistry", false]], "accuracy (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.Accuracy", false]], "accuracy (class in neural_compressor.objective)": [[245, "neural_compressor.objective.Accuracy", false]], "accuracycriterion (class in neural_compressor.config)": [[195, "neural_compressor.config.AccuracyCriterion", false]], "acq_max() (in module neural_compressor.strategy.bayesian)": [[266, "neural_compressor.strategy.bayesian.acq_max", false]], "activationoperator (class in neural_compressor.adaptor.ox_utils.operators.activation)": [[5, "neural_compressor.adaptor.ox_utils.operators.activation.ActivationOperator", false]], "add_port_to_name() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.add_port_to_name", false]], "algorithm (class in neural_compressor.algorithm.algorithm)": [[146, "neural_compressor.algorithm.algorithm.Algorithm", false]], "algorithm_registry() (in module neural_compressor.algorithm.algorithm)": [[146, "neural_compressor.algorithm.algorithm.algorithm_registry", false]], "algorithms (class in neural_compressor.algorithm.algorithm)": [[146, "neural_compressor.algorithm.algorithm.ALGORITHMS", false]], "algorithmscheduler (class in neural_compressor.algorithm.algorithm)": [[146, "neural_compressor.algorithm.algorithm.AlgorithmScheduler", false]], "alias_param() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.alias_param", false]], "alignimagechanneltransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.AlignImageChannelTransform", false]], "alpha (neural_compressor.compression.pruner.regs.grouplasso attribute)": [[189, "neural_compressor.compression.pruner.regs.GroupLasso.alpha", false]], "amp_convert() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.amp_convert", false]], "append_attr() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.append_attr", false]], "apply_awq_clip() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.apply_awq_clip", false]], "apply_awq_scale() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.apply_awq_scale", false]], "apply_inlining() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.apply_inlining", false]], "apply_inlining() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.apply_inlining", false]], "apply_single_pattern_pair() (in module neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.apply_single_pattern_pair", false]], "are_shapes_equal() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.are_shapes_equal", false]], "argmaxoperator (class in neural_compressor.adaptor.ox_utils.operators.argmax)": [[6, "neural_compressor.adaptor.ox_utils.operators.argmax.ArgMaxOperator", false]], "assert_error() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.assert_error", false]], "attentionoperator (class in neural_compressor.adaptor.ox_utils.operators.attention)": [[7, "neural_compressor.adaptor.ox_utils.operators.attention.AttentionOperator", false]], "attr1 (neural_compressor.template.api_doc_example.exampleclass attribute)": [[281, "neural_compressor.template.api_doc_example.ExampleClass.attr1", false]], "attr2 (neural_compressor.template.api_doc_example.exampleclass attribute)": [[281, "neural_compressor.template.api_doc_example.ExampleClass.attr2", false]], "attr5 (neural_compressor.template.api_doc_example.exampleclass attribute)": [[281, "neural_compressor.template.api_doc_example.ExampleClass.attr5", false]], "attribute1 (in module neural_compressor.template.api_doc_example)": [[281, "neural_compressor.template.api_doc_example.attribute1", false]], "attribute_to_kwarg() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.attribute_to_kwarg", false]], "auto_accelerator (class in neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.Auto_Accelerator", false]], "auto_copy() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.auto_copy", false]], "auto_detect_accelerator() (in module neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.auto_detect_accelerator", false]], "autoalpha (class in neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.AutoAlpha", false]], "automixedprecisiontunestrategy (class in neural_compressor.strategy.auto_mixed_precision)": [[264, "neural_compressor.strategy.auto_mixed_precision.AutoMixedPrecisionTuneStrategy", false]], "autoround_quantize_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.autoround_quantize_entry", false]], "autoroundconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.AutoRoundConfig", false]], "autoroundconfig (class in neural_compressor.transformers.utils.quantization_config)": [[452, "neural_compressor.transformers.utils.quantization_config.AutoRoundConfig", false]], "autoroundquantizer (class in neural_compressor.torch.algorithms.weight_only.autoround)": [[418, "neural_compressor.torch.algorithms.weight_only.autoround.AutoRoundQuantizer", false]], "autotune() (in module neural_compressor.tensorflow.quantization.autotune)": [[302, "neural_compressor.tensorflow.quantization.autotune.autotune", false]], "autotune() (in module neural_compressor.torch.quantization.autotune)": [[438, "neural_compressor.torch.quantization.autotune.autotune", false]], "autotunestrategy (class in neural_compressor.strategy.auto)": [[263, "neural_compressor.strategy.auto.AutoTuneStrategy", false]], "awq_quantize() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.awq_quantize", false]], "awq_quantize_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.awq_quantize_entry", false]], "awqconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.AWQConfig", false]], "awqconfig (class in neural_compressor.transformers.utils.quantization_config)": [[452, "neural_compressor.transformers.utils.quantization_config.AwqConfig", false]], "awqquantizer (class in neural_compressor.torch.algorithms.weight_only.awq)": [[419, "neural_compressor.torch.algorithms.weight_only.awq.AWQQuantizer", false]], "axis (neural_compressor.torch.algorithms.weight_only.hqq.qtensor.qtensormetainfo attribute)": [[426, "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensorMetaInfo.axis", false]], "basecallbacks (class in neural_compressor.compression.callbacks)": [[162, "neural_compressor.compression.callbacks.BaseCallbacks", false]], "baseconfig (class in neural_compressor.common.base_config)": [[152, "neural_compressor.common.base_config.BaseConfig", false]], "basedataloader (class in neural_compressor.data.dataloaders.base_dataloader)": [[200, "neural_compressor.data.dataloaders.base_dataloader.BaseDataLoader", false]], "basedataloader (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.BaseDataLoader", false]], "basemetric (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.BaseMetric", false]], "basemodel (class in neural_compressor.model.base_model)": [[236, "neural_compressor.model.base_model.BaseModel", false]], "basemodel (class in neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.BaseModel", false]], "basepattern (class in neural_compressor.compression.pruner.patterns.base)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern", false]], "basepruner (class in neural_compressor.compression.pruner.pruners.base)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner", false]], "basepruning (class in neural_compressor.compression.pruner.pruning)": [[188, "neural_compressor.compression.pruner.pruning.BasePruning", false]], "basereg (class in neural_compressor.compression.pruner.regs)": [[189, "neural_compressor.compression.pruner.regs.BaseReg", false]], "basetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.BaseTransform", false]], "basicpruning (class in neural_compressor.compression.pruner.pruning)": [[188, "neural_compressor.compression.pruner.pruning.BasicPruning", false]], "basictokenizer (class in neural_compressor.data.transforms.tokenization)": [[224, "neural_compressor.data.transforms.tokenization.BasicTokenizer", false]], "basictunestrategy (class in neural_compressor.strategy.basic)": [[265, "neural_compressor.strategy.basic.BasicTuneStrategy", false]], "batchnormalizationoperator (class in neural_compressor.adaptor.ox_utils.operators.norm)": [[20, "neural_compressor.adaptor.ox_utils.operators.norm.BatchNormalizationOperator", false]], "batchsampler (class in neural_compressor.data.dataloaders.sampler)": [[207, "neural_compressor.data.dataloaders.sampler.BatchSampler", false]], "batchsampler (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.BatchSampler", false]], "bayesianoptimization (class in neural_compressor.strategy.bayesian)": [[266, "neural_compressor.strategy.bayesian.BayesianOptimization", false]], "bayesiantunestrategy (class in neural_compressor.strategy.bayesian)": [[266, "neural_compressor.strategy.bayesian.BayesianTuneStrategy", false]], "benchmark() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.benchmark", false]], "benchmark_with_raw_cmd() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.benchmark_with_raw_cmd", false]], "benchmarkconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.BenchmarkConfig", false]], "best_model (neural_compressor.compression.callbacks.distillationcallbacks attribute)": [[162, "neural_compressor.compression.callbacks.DistillationCallbacks.best_model", false]], "best_score (neural_compressor.compression.callbacks.distillationcallbacks attribute)": [[162, "neural_compressor.compression.callbacks.DistillationCallbacks.best_score", false]], "bf16convert (class in neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert)": [[35, "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert.BF16Convert", false]], "bf16convert (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert)": [[307, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert.BF16Convert", false]], "bf16modulewrapper (class in neural_compressor.adaptor.torch_utils.bf16_convert)": [[134, "neural_compressor.adaptor.torch_utils.bf16_convert.BF16ModuleWrapper", false]], "biascorrection (class in neural_compressor.adaptor.tf_utils.transform_graph.bias_correction)": [[128, "neural_compressor.adaptor.tf_utils.transform_graph.bias_correction.BiasCorrection", false]], "biascorrection (class in neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction)": [[380, "neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction.BiasCorrection", false]], "bilinearimagenettransform (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.BilinearImagenetTransform", false]], "binarydirect8bitoperator (class in neural_compressor.adaptor.ox_utils.operators.binary_op)": [[8, "neural_compressor.adaptor.ox_utils.operators.binary_op.BinaryDirect8BitOperator", false]], "binaryoperator (class in neural_compressor.adaptor.ox_utils.operators.binary_op)": [[8, "neural_compressor.adaptor.ox_utils.operators.binary_op.BinaryOperator", false]], "bleu (class in neural_compressor.metric.bleu)": [[227, "neural_compressor.metric.bleu.BLEU", false]], "bleu_tokenize() (in module neural_compressor.metric.bleu)": [[227, "neural_compressor.metric.bleu.bleu_tokenize", false]], "block_size (neural_compressor.compression.pruner.patterns.nxm.keraspatternnxm attribute)": [[179, "neural_compressor.compression.pruner.patterns.nxm.KerasPatternNxM.block_size", false]], "block_size (neural_compressor.compression.pruner.patterns.nxm.pytorchpatternnxm attribute)": [[179, "neural_compressor.compression.pruner.patterns.nxm.PytorchPatternNxM.block_size", false]], "blockfallbacktuningsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.BlockFallbackTuningSampler", false]], "blockmaskcriterion (class in neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.BlockMaskCriterion", false]], "build_captured_dataloader() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.build_captured_dataloader", false]], "build_slave_faker_model() (in module neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.build_slave_faker_model", false]], "bypass_reshape() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn)": [[52, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn.bypass_reshape", false]], "bypass_reshape() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in)": [[53, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in.bypass_reshape", false]], "bypass_reshape() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn)": [[324, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn.bypass_reshape", false]], "bypass_reshape() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in)": [[325, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in.bypass_reshape", false]], "cal_scale() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.cal_scale", false]], "calculate_md5() (in module neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.calculate_md5", false]], "calculate_mse() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.calculate_mse", false]], "calculate_quant_min_max() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.calculate_quant_min_max", false]], "calculate_scale_zp() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.calculate_scale_zp", false]], "calib_model() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.calib_model", false]], "calib_registry() (in module neural_compressor.adaptor.ox_utils.calibrator)": [[3, "neural_compressor.adaptor.ox_utils.calibrator.calib_registry", false]], "calibcollector (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.CalibCollector", false]], "calibdata (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.CalibData", false]], "calibration (class in neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.Calibration", false]], "calibration() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.calibration", false]], "calibratorbase (class in neural_compressor.adaptor.ox_utils.calibrator)": [[3, "neural_compressor.adaptor.ox_utils.calibrator.CalibratorBase", false]], "call_counter() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.call_counter", false]], "call_one() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.call_one", false]], "callbacks (class in neural_compressor.training)": [[449, "neural_compressor.training.CallBacks", false]], "captureoutputtofile (class in neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.CaptureOutputToFile", false]], "captureoutputtofile (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.CaptureOutputToFile", false]], "cast_tensor() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.cast_tensor", false]], "castonnxtransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CastONNXTransform", false]], "castpytorchtransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CastPyTorchTransform", false]], "casttftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CastTFTransform", false]], "centercroptftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CenterCropTFTransform", false]], "centercroptransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CenterCropTransform", false]], "cfg_to_qconfig() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.cfg_to_qconfig", false]], "cfg_to_qconfig() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.cfg_to_qconfig", false]], "check_cfg_and_qconfig() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.check_cfg_and_qconfig", false]], "check_cfg_and_qconfig() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.check_cfg_and_qconfig", false]], "check_cfg_and_qconfig() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.check_cfg_and_qconfig", false]], "check_config() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.check_config", false]], "check_dataloader() (in module neural_compressor.data.dataloaders.dataloader)": [[201, "neural_compressor.data.dataloaders.dataloader.check_dataloader", false]], "check_integrity() (in module neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.check_integrity", false]], "check_key_exist() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.check_key_exist", false]], "check_key_validity() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.check_key_validity", false]], "check_model() (in module neural_compressor.utils.export.qlinear2qdq)": [[457, "neural_compressor.utils.export.qlinear2qdq.check_model", false]], "check_mx_version() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.check_mx_version", false]], "checkpoint_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.checkpoint_session", false]], "checkpoint_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.checkpoint_session", false]], "cifar10 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.CIFAR10", false]], "cifar100 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.CIFAR100", false]], "classifierheadsearcher (class in neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcher", false]], "classifierheadsearchertf (class in neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcherTF", false]], "classregister (class in neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.ClassRegister", false]], "clean_module_weight() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.clean_module_weight", false]], "cocoevalwrapper (class in neural_compressor.metric.coco_tools)": [[230, "neural_compressor.metric.coco_tools.COCOEvalWrapper", false]], "cocomapv2 (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.COCOmAPv2", false]], "coconpy (class in neural_compressor.data.datasets.coco_dataset)": [[210, "neural_compressor.data.datasets.coco_dataset.COCONpy", false]], "cocoraw (class in neural_compressor.data.datasets.coco_dataset)": [[210, "neural_compressor.data.datasets.coco_dataset.COCORaw", false]], "cocorecorddataset (class in neural_compressor.data.datasets.coco_dataset)": [[210, "neural_compressor.data.datasets.coco_dataset.COCORecordDataset", false]], "cocowrapper (class in neural_compressor.metric.coco_tools)": [[230, "neural_compressor.metric.coco_tools.COCOWrapper", false]], "collate_preds() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.collate_preds", false]], "collate_tf_preds() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.collate_tf_preds", false]], "collate_tf_preds() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.collate_tf_preds", false]], "collate_torch_preds() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.collate_torch_preds", false]], "collect_layer_inputs() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.collect_layer_inputs", false]], "collect_weight_info() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.collect_weight_info", false]], "collectorbase (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.CollectorBase", false]], "collecttransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CollectTransform", false]], "combine_capabilities() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.combine_capabilities", false]], "combine_histogram() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.combine_histogram", false]], "combine_histogram() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.combine_histogram", false]], "compare_label (neural_compressor.metric.metric.mae attribute)": [[234, "neural_compressor.metric.metric.MAE.compare_label", false]], "compare_label (neural_compressor.metric.metric.mse attribute)": [[234, "neural_compressor.metric.metric.MSE.compare_label", false]], "compare_objects() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.compare_objects", false]], "compare_weights() (in module neural_compressor.adaptor.torch_utils.hawq_metric)": [[135, "neural_compressor.adaptor.torch_utils.hawq_metric.compare_weights", false]], "composableconfig (class in neural_compressor.common.base_config)": [[152, "neural_compressor.common.base_config.ComposableConfig", false]], "composetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ComposeTransform", false]], "compressionmanager (class in neural_compressor.training)": [[449, "neural_compressor.training.CompressionManager", false]], "compute_bleu() (in module neural_compressor.metric.bleu_util)": [[228, "neural_compressor.metric.bleu_util.compute_bleu", false]], "compute_const_folding_using_tf() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.compute_const_folding_using_tf", false]], "compute_sparsity() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.compute_sparsity", false]], "concatoperator (class in neural_compressor.adaptor.ox_utils.operators.concat)": [[9, "neural_compressor.adaptor.ox_utils.operators.concat.ConcatOperator", false]], "config (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.config", false]], "config (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.config", false]], "config (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.config", false]], "config (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.config", false]], "config (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.config", false]], "config (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.config", false]], "config (neural_compressor.compression.pruner.schedulers.pruningscheduler attribute)": [[190, "neural_compressor.compression.pruner.schedulers.PruningScheduler.config", false]], "config_file_path (neural_compressor.compression.pruner.pruning.basepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasePruning.config_file_path", false]], "config_file_path (neural_compressor.compression.pruner.pruning.basicpruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasicPruning.config_file_path", false]], "config_file_path (neural_compressor.compression.pruner.pruning.retrainfreepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.RetrainFreePruning.config_file_path", false]], "config_instance() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.config_instance", false]], "config_list (neural_compressor.common.base_config.composableconfig attribute)": [[152, "neural_compressor.common.base_config.ComposableConfig.config_list", false]], "config_list (neural_compressor.common.base_tuning.configset attribute)": [[153, "neural_compressor.common.base_tuning.ConfigSet.config_list", false]], "config_quantizable_layers() (in module neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer)": [[103, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer.config_quantizable_layers", false]], "configloader (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.ConfigLoader", false]], "configregistry (class in neural_compressor.common.base_config)": [[152, "neural_compressor.common.base_config.ConfigRegistry", false]], "configset (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.ConfigSet", false]], "conservativetunestrategy (class in neural_compressor.strategy.conservative)": [[267, "neural_compressor.strategy.conservative.ConservativeTuneStrategy", false]], "construct_function_from_graph_def() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.construct_function_from_graph_def", false]], "construct_function_from_graph_def() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.construct_function_from_graph_def", false]], "convert() (in module neural_compressor.adaptor.torch_utils.bf16_convert)": [[134, "neural_compressor.adaptor.torch_utils.bf16_convert.Convert", false]], "convert() (in module neural_compressor.torch.quantization.quantize)": [[442, "neural_compressor.torch.quantization.quantize.convert", false]], "convert_by_vocab() (in module neural_compressor.data.transforms.tokenization)": [[224, "neural_compressor.data.transforms.tokenization.convert_by_vocab", false]], "convert_examples_to_features() (in module neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.convert_examples_to_features", false]], "convert_examples_to_features() (in module neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.convert_examples_to_features", false]], "convert_tensorflow_tensor_to_onnx() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.convert_tensorflow_tensor_to_onnx", false]], "convert_to_unicode() (in module neural_compressor.data.transforms.tokenization)": [[224, "neural_compressor.data.transforms.tokenization.convert_to_unicode", false]], "convertaddtobiasaddoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd)": [[38, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd.ConvertAddToBiasAddOptimizer", false]], "convertaddtobiasaddoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd)": [[310, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd.ConvertAddToBiasAddOptimizer", false]], "convertlayoutoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout)": [[39, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout.ConvertLayoutOptimizer", false]], "convertlayoutoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout)": [[311, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout.ConvertLayoutOptimizer", false]], "convertleakyreluoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu)": [[40, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu.ConvertLeakyReluOptimizer", false]], "convertleakyreluoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu)": [[312, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu.ConvertLeakyReluOptimizer", false]], "convertnantorandom (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random)": [[41, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random.ConvertNanToRandom", false]], "convertnantorandom (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random)": [[313, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random.ConvertNanToRandom", false]], "convertplaceholdertoconst (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const)": [[42, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const.ConvertPlaceholderToConst", false]], "convertplaceholdertoconst (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const)": [[314, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const.ConvertPlaceholderToConst", false]], "convoperator (class in neural_compressor.adaptor.ox_utils.operators.conv)": [[10, "neural_compressor.adaptor.ox_utils.operators.conv.ConvOperator", false]], "cpu_accelerator (class in neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.CPU_Accelerator", false]], "cpuinfo (class in neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.CpuInfo", false]], "cpuinfo (class in neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.CpuInfo", false]], "cpuinfo (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.CpuInfo", false]], "create_data_example() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.create_data_example", false]], "create_dataloader() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.create_dataloader", false]], "create_dataset() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.create_dataset", false]], "create_eval_func() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.create_eval_func", false]], "create_onnx_config() (in module neural_compressor.profiling.profiler.onnxrt_profiler.utils)": [[257, "neural_compressor.profiling.profiler.onnxrt_profiler.utils.create_onnx_config", false]], "create_quant_spec_from_config() (in module neural_compressor.torch.algorithms.pt2e_quant.utility)": [[409, "neural_compressor.torch.algorithms.pt2e_quant.utility.create_quant_spec_from_config", false]], "create_tf_config() (in module neural_compressor.profiling.profiler.tensorflow_profiler.utils)": [[261, "neural_compressor.profiling.profiler.tensorflow_profiler.utils.create_tf_config", false]], "create_train_func() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.create_train_func", false]], "create_xiq_quantizer_from_pt2e_config() (in module neural_compressor.torch.algorithms.pt2e_quant.utility)": [[409, "neural_compressor.torch.algorithms.pt2e_quant.utility.create_xiq_quantizer_from_pt2e_config", false]], "criterion (neural_compressor.compression.pruner.pruners.basic.kerasbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.KerasBasicPruner.criterion", false]], "criterion (neural_compressor.compression.pruner.pruners.basic.pytorchbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.PytorchBasicPruner.criterion", false]], "criterion (neural_compressor.compression.pruner.pruners.block_mask.pytorchblockmaskpruner attribute)": [[182, "neural_compressor.compression.pruner.pruners.block_mask.PytorchBlockMaskPruner.criterion", false]], "criterion (neural_compressor.compression.pruner.pruners.retrain_free.pytorchretrainfreepruner attribute)": [[187, "neural_compressor.compression.pruner.pruners.retrain_free.PytorchRetrainFreePruner.criterion", false]], "criterion_registry() (in module neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.criterion_registry", false]], "criterions (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.Criterions", false]], "cropresizetftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CropResizeTFTransform", false]], "cropresizetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CropResizeTransform", false]], "croptoboundingbox (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.CropToBoundingBox", false]], "cuda_accelerator (class in neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.CUDA_Accelerator", false]], "current_pattern (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.current_pattern", false]], "current_sparsity_ratio (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.current_sparsity_ratio", false]], "current_sparsity_ratio (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.current_sparsity_ratio", false]], "current_sparsity_ratio (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.current_sparsity_ratio", false]], "dataiterloader (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.DataIterLoader", false]], "dataloader (class in neural_compressor.data.dataloaders.dataloader)": [[201, "neural_compressor.data.dataloaders.dataloader.DataLoader", false]], "dataloaderwrap (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.DataLoaderWrap", false]], "dataset (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.Dataset", false]], "dataset (neural_compressor.metric.coco_tools.cocowrapper attribute)": [[230, "neural_compressor.metric.coco_tools.COCOWrapper.dataset", false]], "dataset_registry() (in module neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.dataset_registry", false]], "datasets (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.Datasets", false]], "debug() (in module neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.debug", false]], "deep_get() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.deep_get", false]], "deep_get() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.deep_get", false]], "deep_set() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.deep_set", false]], "default_collate() (in module neural_compressor.data.dataloaders.default_dataloader)": [[202, "neural_compressor.data.dataloaders.default_dataloader.default_collate", false]], "default_collate() (in module neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.default_collate", false]], "defaultdataloader (class in neural_compressor.data.dataloaders.default_dataloader)": [[202, "neural_compressor.data.dataloaders.default_dataloader.DefaultDataLoader", false]], "delete_assign() (in module neural_compressor.profiling.profiler.tensorflow_profiler.utils)": [[261, "neural_compressor.profiling.profiler.tensorflow_profiler.utils.delete_assign", false]], "dequantize() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.Dequantize", false]], "dequantize_data() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.dequantize_data", false]], "dequantize_data_with_scale_zero() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.dequantize_data_with_scale_zero", false]], "dequantize_weight() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.dequantize_weight", false]], "dequantizecastoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer)": [[36, "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer.DequantizeCastOptimizer", false]], "dequantizecastoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer)": [[308, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer.DequantizeCastOptimizer", false]], "detect_device() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.detect_device", false]], "detect_processor_type_based_on_hw() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.detect_processor_type_based_on_hw", false]], "detection_type (neural_compressor.metric.coco_tools.cocowrapper attribute)": [[230, "neural_compressor.metric.coco_tools.COCOWrapper.detection_type", false]], "device (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcher.device", false]], "device (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearchertf attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcherTF.device", false]], "device (neural_compressor.compression.pruner.model_slim.pattern_analyzer.jitbasicsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher.device", false]], "device (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.device", false]], "device (neural_compressor.compression.pruner.model_slim.pattern_analyzer.selfmhasearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.SelfMHASearcher.device", false]], "device (neural_compressor.compression.pruner.model_slim.weight_slim.linearcompression attribute)": [[174, "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompression.device", false]], "device_synchronize() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.device_synchronize", false]], "dilatedcontraction (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction)": [[43, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction.DilatedContraction", false]], "dilatedcontraction (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction)": [[315, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction.DilatedContraction", false]], "direct8bitoperator (class in neural_compressor.adaptor.ox_utils.operators.direct_q8)": [[11, "neural_compressor.adaptor.ox_utils.operators.direct_q8.Direct8BitOperator", false]], "disable_random() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.disable_random", false]], "disable_random() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.disable_random", false]], "distillationcallbacks (class in neural_compressor.compression.callbacks)": [[162, "neural_compressor.compression.callbacks.DistillationCallbacks", false]], "distillationconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.DistillationConfig", false]], "distribute_calib_tensors() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.distribute_calib_tensors", false]], "dotdict (class in neural_compressor.config)": [[195, "neural_compressor.config.DotDict", false]], "dotdict (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.DotDict", false]], "dowload_hf_model() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.dowload_hf_model", false]], "dowload_hf_model() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.dowload_hf_model", false]], "dowload_hf_model() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.dowload_hf_model", false]], "download_url() (in module neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.download_url", false]], "dtype_to_name() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.dtype_to_name", false]], "dummydataset (class in neural_compressor.data.datasets.dummy_dataset)": [[212, "neural_compressor.data.datasets.dummy_dataset.DummyDataset", false]], "dummydataset (class in neural_compressor.data.datasets.dummy_dataset_v2)": [[213, "neural_compressor.data.datasets.dummy_dataset_v2.DummyDataset", false]], "dummydataset (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.DummyDataset", false]], "dummydatasetv2 (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.DummyDatasetV2", false]], "dump_class_attrs() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.dump_class_attrs", false]], "dump_data_to_local() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.dump_data_to_local", false]], "dump_elapsed_time() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.dump_elapsed_time", false]], "dump_elapsed_time() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.dump_elapsed_time", false]], "dump_elapsed_time() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.dump_elapsed_time", false]], "dump_model_op_stats() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.dump_model_op_stats", false]], "dump_model_op_stats() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.dump_model_op_stats", false]], "dump_model_op_stats() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.dump_model_op_stats", false]], "dump_numa_info() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.dump_numa_info", false]], "dump_table() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.dump_table", false]], "dump_table_to_csv() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.dump_table_to_csv", false]], "dynamic_quant_export() (in module neural_compressor.utils.export.torch2onnx)": [[459, "neural_compressor.utils.export.torch2onnx.dynamic_quant_export", false]], "dynamicquantconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.DynamicQuantConfig", false]], "elemformat (class in neural_compressor.torch.algorithms.mx_quant.utils)": [[404, "neural_compressor.torch.algorithms.mx_quant.utils.ElemFormat", false]], "embedlayernormalizationoperator (class in neural_compressor.adaptor.ox_utils.operators.embed_layernorm)": [[12, "neural_compressor.adaptor.ox_utils.operators.embed_layernorm.EmbedLayerNormalizationOperator", false]], "end_step (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.end_step", false]], "end_step (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.end_step", false]], "end_step (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.end_step", false]], "enough_memo_store_scale() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.enough_memo_store_scale", false]], "ensure_list() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.ensure_list", false]], "equal_dicts() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.equal_dicts", false]], "error() (in module neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.error", false]], "estimator_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.estimator_session", false]], "estimator_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.estimator_session", false]], "eval_frequency (neural_compressor.compression.callbacks.distillationcallbacks attribute)": [[162, "neural_compressor.compression.callbacks.DistillationCallbacks.eval_frequency", false]], "evaluate() (in module neural_compressor.metric.evaluate_squad)": [[231, "neural_compressor.metric.evaluate_squad.evaluate", false]], "evaluate() (in module neural_compressor.metric.f1)": [[232, "neural_compressor.metric.f1.evaluate", false]], "evaluationfuncwrapper (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.EvaluationFuncWrapper", false]], "evaluator (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.Evaluator", false]], "exact_match_score() (in module neural_compressor.metric.evaluate_squad)": [[231, "neural_compressor.metric.evaluate_squad.exact_match_score", false]], "exampleclass (class in neural_compressor.template.api_doc_example)": [[281, "neural_compressor.template.api_doc_example.ExampleClass", false]], "exhaustivetunestrategy (class in neural_compressor.strategy.exhaustive)": [[268, "neural_compressor.strategy.exhaustive.ExhaustiveTuneStrategy", false]], "expanddimsoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer)": [[45, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer.ExpandDimsOptimizer", false]], "expanddimsoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer)": [[317, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer.ExpandDimsOptimizer", false]], "export() (in module neural_compressor.torch.export.pt2e_export)": [[435, "neural_compressor.torch.export.pt2e_export.export", false]], "export_compressed_model() (in module neural_compressor.utils.load_huggingface)": [[462, "neural_compressor.utils.load_huggingface.export_compressed_model", false]], "export_model_for_pt2e_quant() (in module neural_compressor.torch.export.pt2e_export)": [[435, "neural_compressor.torch.export.pt2e_export.export_model_for_pt2e_quant", false]], "exportconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.ExportConfig", false]], "exportsingleimagedetectionboxestococo() (in module neural_compressor.metric.coco_tools)": [[230, "neural_compressor.metric.coco_tools.ExportSingleImageDetectionBoxesToCoco", false]], "exportsingleimagedetectionmaskstococo() (in module neural_compressor.metric.coco_tools)": [[230, "neural_compressor.metric.coco_tools.ExportSingleImageDetectionMasksToCoco", false]], "exportsingleimagegroundtruthtococo() (in module neural_compressor.metric.coco_tools)": [[230, "neural_compressor.metric.coco_tools.ExportSingleImageGroundtruthToCoco", false]], "extract_data_type() (in module neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.extract_data_type", false]], "f1 (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.F1", false]], "f1_score() (in module neural_compressor.metric.evaluate_squad)": [[231, "neural_compressor.metric.evaluate_squad.f1_score", false]], "f1_score() (in module neural_compressor.metric.f1)": [[232, "neural_compressor.metric.f1.f1_score", false]], "fakeaffinetensorquantfunction (class in neural_compressor.adaptor.torch_utils.model_wrapper)": [[142, "neural_compressor.adaptor.torch_utils.model_wrapper.FakeAffineTensorQuantFunction", false]], "fakeaffinetensorquantfunction (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.FakeAffineTensorQuantFunction", false]], "fakequantize (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize)": [[98, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize.FakeQuantize", false]], "fakequantizebase (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize)": [[98, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize.FakeQuantizeBase", false]], "fallbacktuningsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.FallbackTuningSampler", false]], "fashionmnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.FashionMNIST", false]], "fastbiascorrection (class in neural_compressor.algorithm.fast_bias_correction)": [[147, "neural_compressor.algorithm.fast_bias_correction.FastBiasCorrection", false]], "fatal() (in module neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.fatal", false]], "fault_tolerant_file() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.fault_tolerant_file", false]], "fetch_module() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.fetch_module", false]], "fetch_module() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.fetch_module", false]], "fetch_module() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.fetch_module", false]], "fetcher (class in neural_compressor.data.dataloaders.fetcher)": [[203, "neural_compressor.data.dataloaders.fetcher.Fetcher", false]], "fetchweightfromreshapeoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape)": [[46, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape.FetchWeightFromReshapeOptimizer", false]], "fetchweightfromreshapeoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape)": [[318, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape.FetchWeightFromReshapeOptimizer", false]], "filter (class in neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.Filter", false]], "filter_fn() (in module neural_compressor.torch.algorithms.weight_only.hqq.quantizer)": [[427, "neural_compressor.torch.algorithms.weight_only.hqq.quantizer.filter_fn", false]], "filter_registry() (in module neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.filter_registry", false]], "filters (class in neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.FILTERS", false]], "finalize_calibration() (in module neural_compressor.torch.quantization.quantize)": [[442, "neural_compressor.torch.quantization.quantize.finalize_calibration", false]], "find_by_name() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.find_by_name", false]], "find_layers() (in module neural_compressor.compression.pruner.wanda.utils)": [[194, "neural_compressor.compression.pruner.wanda.utils.find_layers", false]], "find_layers() (in module neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.find_layers", false]], "find_layers_name() (in module neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.find_layers_name", false]], "find_opset() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.find_opset", false]], "fit() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.fit", false]], "fit() (in module neural_compressor.mix_precision)": [[235, "neural_compressor.mix_precision.fit", false]], "fit() (in module neural_compressor.quantization)": [[262, "neural_compressor.quantization.fit", false]], "fit() (in module neural_compressor.training)": [[449, "neural_compressor.training.fit", false]], "fix_ref_type_of_graph_def() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.fix_ref_type_of_graph_def", false]], "fix_ref_type_of_graph_def() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.fix_ref_type_of_graph_def", false]], "flatten_static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcher.flatten_static_graph", false]], "flatten_static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearchertf attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcherTF.flatten_static_graph", false]], "flatten_static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.jitbasicsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher.flatten_static_graph", false]], "flatten_static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.flatten_static_graph", false]], "flatten_static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.selfmhasearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.SelfMHASearcher.flatten_static_graph", false]], "float16activationoperator (class in neural_compressor.adaptor.ox_utils.operators.activation)": [[5, "neural_compressor.adaptor.ox_utils.operators.activation.Float16ActivationOperator", false]], "float16binaryoperator (class in neural_compressor.adaptor.ox_utils.operators.binary_op)": [[8, "neural_compressor.adaptor.ox_utils.operators.binary_op.Float16BinaryOperator", false]], "float_to_bfloat16() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.float_to_bfloat16", false]], "float_to_float16() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.float_to_float16", false]], "fn (neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.patternpair attribute)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.PatternPair.fn", false]], "foldbatchnormnodesoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm)": [[47, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm.FoldBatchNormNodesOptimizer", false]], "foldbatchnormnodesoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm)": [[319, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm.FoldBatchNormNodesOptimizer", false]], "footprint (class in neural_compressor.objective)": [[245, "neural_compressor.objective.Footprint", false]], "format_list2str() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.format_list2str", false]], "forward_wrapper() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.forward_wrapper", false]], "forward_wrapper() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.forward_wrapper", false]], "forward_wrapper() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.forward_wrapper", false]], "fp8_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.fp8_entry", false]], "fp8config (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.FP8Config", false]], "framework_datasets (in module neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.framework_datasets", false]], "freezefakequantopoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant)": [[73, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant.FreezeFakeQuantOpOptimizer", false]], "freezefakequantopoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant)": [[345, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant.FreezeFakeQuantOpOptimizer", false]], "freezevaluetransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value)": [[74, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value.FreezeValueTransformer", false]], "freezevaluetransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value)": [[346, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value.FreezeValueTransformer", false]], "freezevaluewithoutcalibtransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib)": [[75, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib.FreezeValueWithoutCalibTransformer", false]], "frozen_pb_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.frozen_pb_session", false]], "frozen_pb_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.frozen_pb_session", false]], "fulltokenizer (class in neural_compressor.data.transforms.tokenization)": [[224, "neural_compressor.data.transforms.tokenization.FullTokenizer", false]], "function1() (in module neural_compressor.template.api_doc_example)": [[281, "neural_compressor.template.api_doc_example.function1", false]], "function2() (in module neural_compressor.template.api_doc_example)": [[281, "neural_compressor.template.api_doc_example.function2", false]], "function3() (in module neural_compressor.template.api_doc_example)": [[281, "neural_compressor.template.api_doc_example.function3", false]], "fuse() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.fuse", false]], "fusebiasaddandaddoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add)": [[49, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add.FuseBiasAddAndAddOptimizer", false]], "fusebiasaddandaddoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add)": [[321, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add.FuseBiasAddAndAddOptimizer", false]], "fusecolumnwisemuloptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul)": [[50, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul.FuseColumnWiseMulOptimizer", false]], "fusecolumnwisemuloptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul)": [[322, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul.FuseColumnWiseMulOptimizer", false]], "fuseconvredundantdequantizetransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize)": [[76, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize.FuseConvRedundantDequantizeTransformer", false]], "fuseconvredundantdequantizetransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize)": [[347, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize.FuseConvRedundantDequantizeTransformer", false]], "fuseconvrequantizetransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize)": [[77, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize.FuseConvRequantizeTransformer", false]], "fuseconvrequantizetransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize)": [[348, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize.FuseConvRequantizeTransformer", false]], "fuseconvwithmathoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math)": [[51, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math.FuseConvWithMathOptimizer", false]], "fuseconvwithmathoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math)": [[323, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math.FuseConvWithMathOptimizer", false]], "fusedecomposedbnoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn)": [[52, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn.FuseDecomposedBNOptimizer", false]], "fusedecomposedbnoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn)": [[324, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn.FuseDecomposedBNOptimizer", false]], "fusedecomposedinoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in)": [[53, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in.FuseDecomposedINOptimizer", false]], "fusedecomposedinoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in)": [[325, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in.FuseDecomposedINOptimizer", false]], "fusedmatmuloperator (class in neural_compressor.adaptor.ox_utils.operators.matmul)": [[18, "neural_compressor.adaptor.ox_utils.operators.matmul.FusedMatMulOperator", false]], "fusegeluoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu)": [[54, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu.FuseGeluOptimizer", false]], "fusegeluoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu)": [[326, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu.FuseGeluOptimizer", false]], "fuselayernormoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm)": [[55, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm.FuseLayerNormOptimizer", false]], "fuselayernormoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm)": [[327, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm.FuseLayerNormOptimizer", false]], "fusematmulredundantdequantizetransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize)": [[78, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize.FuseMatMulRedundantDequantizeTransformer", false]], "fusematmulredundantdequantizetransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize)": [[349, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize.FuseMatMulRedundantDequantizeTransformer", false]], "fusematmulrequantizedequantizenewapitransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize)": [[79, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeDequantizeNewAPITransformer", false]], "fusematmulrequantizedequantizenewapitransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize)": [[350, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeDequantizeNewAPITransformer", false]], "fusematmulrequantizedequantizetransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize)": [[79, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeDequantizeTransformer", false]], "fusematmulrequantizedequantizetransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize)": [[350, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeDequantizeTransformer", false]], "fusematmulrequantizenewapitransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize)": [[79, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeNewAPITransformer", false]], "fusematmulrequantizenewapitransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize)": [[350, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeNewAPITransformer", false]], "fusematmulrequantizetransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize)": [[79, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeTransformer", false]], "fusematmulrequantizetransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize)": [[350, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize.FuseMatMulRequantizeTransformer", false]], "fusenodestartwithconcatv2 (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2)": [[109, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2.FuseNodeStartWithConcatV2", false]], "fusenodestartwithconcatv2 (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2)": [[119, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2.FuseNodeStartWithConcatV2", false]], "fusenodestartwithconcatv2 (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2)": [[364, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2.FuseNodeStartWithConcatV2", false]], "fusenodestartwithconcatv2 (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2)": [[374, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2.FuseNodeStartWithConcatV2", false]], "fusenodestartwithconv2d (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv)": [[110, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv.FuseNodeStartWithConv2d", false]], "fusenodestartwithconv2d (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv)": [[120, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv.FuseNodeStartWithConv2d", false]], "fusenodestartwithconv2d (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv)": [[365, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv.FuseNodeStartWithConv2d", false]], "fusenodestartwithconv2d (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv)": [[375, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv.FuseNodeStartWithConv2d", false]], "fusenodestartwithdeconv2d (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv)": [[111, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv.FuseNodeStartWithDeconv2d", false]], "fusenodestartwithdeconv2d (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv)": [[366, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv.FuseNodeStartWithDeconv2d", false]], "fusenodestartwithfusedbatchnormv3 (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn)": [[108, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn.FuseNodeStartWithFusedBatchNormV3", false]], "fusenodestartwithfusedbatchnormv3 (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn)": [[118, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn.FuseNodeStartWithFusedBatchNormV3", false]], "fusenodestartwithfusedbatchnormv3 (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn)": [[363, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn.FuseNodeStartWithFusedBatchNormV3", false]], "fusenodestartwithfusedbatchnormv3 (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn)": [[373, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn.FuseNodeStartWithFusedBatchNormV3", false]], "fusenodestartwithfusedinstancenorm (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in)": [[112, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in.FuseNodeStartWithFusedInstanceNorm", false]], "fusenodestartwithfusedinstancenorm (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in)": [[367, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in.FuseNodeStartWithFusedInstanceNorm", false]], "fusenodestartwithmatmul (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul)": [[113, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul.FuseNodeStartWithMatmul", false]], "fusenodestartwithmatmul (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul)": [[122, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul.FuseNodeStartWithMatmul", false]], "fusenodestartwithmatmul (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul)": [[368, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul.FuseNodeStartWithMatmul", false]], "fusenodestartwithmatmul (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul)": [[377, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul.FuseNodeStartWithMatmul", false]], "fusenodestartwithpooling (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling)": [[114, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling.FuseNodeStartWithPooling", false]], "fusenodestartwithpooling (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling)": [[123, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling.FuseNodeStartWithPooling", false]], "fusenodestartwithpooling (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling)": [[369, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling.FuseNodeStartWithPooling", false]], "fusenodestartwithpooling (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling)": [[378, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling.FuseNodeStartWithPooling", false]], "fusepadwithconv2doptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv)": [[56, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv.FusePadWithConv2DOptimizer", false]], "fusepadwithconv2doptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv)": [[328, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv.FusePadWithConv2DOptimizer", false]], "fusepadwithfp32conv2doptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv)": [[57, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv.FusePadWithFP32Conv2DOptimizer", false]], "fusepadwithfp32conv2doptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv)": [[329, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv.FusePadWithFP32Conv2DOptimizer", false]], "fusetransposereshapeoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose)": [[58, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose.FuseTransposeReshapeOptimizer", false]], "fusetransposereshapeoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose)": [[330, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose.FuseTransposeReshapeOptimizer", false]], "gatheroperator (class in neural_compressor.adaptor.ox_utils.operators.gather)": [[13, "neural_compressor.adaptor.ox_utils.operators.gather.GatherOperator", false]], "gemmoperator (class in neural_compressor.adaptor.ox_utils.operators.gemm)": [[15, "neural_compressor.adaptor.ox_utils.operators.gemm.GemmOperator", false]], "gen_bar_updater() (in module neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.gen_bar_updater", false]], "generaltopk (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.GeneralTopK", false]], "generate_activation_observer() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.generate_activation_observer", false]], "generate_activation_observer() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.generate_activation_observer", false]], "generate_feed_dict() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.generate_feed_dict", false]], "generate_feed_dict() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.generate_feed_dict", false]], "generate_ffn2_pruning_config() (in module neural_compressor.compression.pruner.model_slim.auto_slim)": [[171, "neural_compressor.compression.pruner.model_slim.auto_slim.generate_ffn2_pruning_config", false]], "generate_mha_pruning_config() (in module neural_compressor.compression.pruner.model_slim.auto_slim)": [[171, "neural_compressor.compression.pruner.model_slim.auto_slim.generate_mha_pruning_config", false]], "generate_prefix() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.generate_prefix", false]], "generate_prefix() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.generate_prefix", false]], "generate_xpu_qconfig() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.generate_xpu_qconfig", false]], "generategraphwithqdqpattern (class in neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern)": [[92, "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern.GenerateGraphWithQDQPattern", false]], "generategraphwithqdqpattern (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern)": [[357, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern.GenerateGraphWithQDQPattern", false]], "generator1() (in module neural_compressor.template.api_doc_example)": [[281, "neural_compressor.template.api_doc_example.generator1", false]], "get_absorb_layers() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_absorb_layers", false]], "get_absorb_layers() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.get_absorb_layers", false]], "get_accelerator() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.get_accelerator", false]], "get_activation() (in module neural_compressor.compression.distillation.utility)": [[166, "neural_compressor.compression.distillation.utility.get_activation", false]], "get_adaptor_name() (in module neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.get_adaptor_name", false]], "get_algorithm() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.get_algorithm", false]], "get_all_config_set() (in module neural_compressor.tensorflow.quantization.autotune)": [[302, "neural_compressor.tensorflow.quantization.autotune.get_all_config_set", false]], "get_all_config_set() (in module neural_compressor.torch.quantization.autotune)": [[438, "neural_compressor.torch.quantization.autotune.get_all_config_set", false]], "get_all_config_set_from_config_registry() (in module neural_compressor.common.base_config)": [[152, "neural_compressor.common.base_config.get_all_config_set_from_config_registry", false]], "get_all_fp32_data() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.get_all_fp32_data", false]], "get_all_fp32_data() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_all_fp32_data", false]], "get_all_registered_configs() (in module neural_compressor.tensorflow.keras.quantization.config)": [[299, "neural_compressor.tensorflow.keras.quantization.config.get_all_registered_configs", false]], "get_all_registered_configs() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_all_registered_configs", false]], "get_architecture() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.get_architecture", false]], "get_attributes() (in module neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.get_attributes", false]], "get_blob_size() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.get_blob_size", false]], "get_block_names() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.get_block_names", false]], "get_block_prefix() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_block_prefix", false]], "get_block_prefix() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.get_block_prefix", false]], "get_bounded_threads() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.get_bounded_threads", false]], "get_children() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.get_children", false]], "get_children() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.get_children", false]], "get_common_module() (in module neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.get_common_module", false]], "get_const_dim_count() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn)": [[52, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn.get_const_dim_count", false]], "get_const_dim_count() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in)": [[53, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in.get_const_dim_count", false]], "get_const_dim_count() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn)": [[324, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn.get_const_dim_count", false]], "get_const_dim_count() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in)": [[325, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in.get_const_dim_count", false]], "get_core_ids() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.get_core_ids", false]], "get_criterion() (in module neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.get_criterion", false]], "get_dataloader() (in module neural_compressor.torch.algorithms.weight_only.autoround)": [[418, "neural_compressor.torch.algorithms.weight_only.autoround.get_dataloader", false]], "get_default_autoround_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_AutoRound_config", false]], "get_default_awq_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_awq_config", false]], "get_default_double_quant_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_double_quant_config", false]], "get_default_dynamic_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_dynamic_config", false]], "get_default_fp8_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_fp8_config", false]], "get_default_fp8_config_set() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_fp8_config_set", false]], "get_default_gptq_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_gptq_config", false]], "get_default_hqq_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_hqq_config", false]], "get_default_mixed_precision_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_mixed_precision_config", false]], "get_default_mixed_precision_config_set() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_mixed_precision_config_set", false]], "get_default_mx_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_mx_config", false]], "get_default_rtn_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_rtn_config", false]], "get_default_sq_config() (in module neural_compressor.tensorflow.quantization.config)": [[303, "neural_compressor.tensorflow.quantization.config.get_default_sq_config", false]], "get_default_sq_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_sq_config", false]], "get_default_static_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_static_config", false]], "get_default_static_quant_config() (in module neural_compressor.tensorflow.keras.quantization.config)": [[299, "neural_compressor.tensorflow.keras.quantization.config.get_default_static_quant_config", false]], "get_default_static_quant_config() (in module neural_compressor.tensorflow.quantization.config)": [[303, "neural_compressor.tensorflow.quantization.config.get_default_static_quant_config", false]], "get_default_teq_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_default_teq_config", false]], "get_depth() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_depth", false]], "get_depth() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.get_depth", false]], "get_dict_at_depth() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_dict_at_depth", false]], "get_dict_at_depth() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.get_dict_at_depth", false]], "get_double_quant_config_dict() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.get_double_quant_config_dict", false]], "get_element_under_depth() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_element_under_depth", false]], "get_element_under_depth() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.get_element_under_depth", false]], "get_embedding_contiguous() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_embedding_contiguous", false]], "get_estimator_graph() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.get_estimator_graph", false]], "get_example_input() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_example_input", false]], "get_fallback_order() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_fallback_order", false]], "get_filter_fn() (in module neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.get_filter_fn", false]], "get_final_text() (in module neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.get_final_text", false]], "get_framework_name() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.get_framework_name", false]], "get_func_from_config() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.get_func_from_config", false]], "get_graph_def() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.get_graph_def", false]], "get_graph_def() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.get_graph_def", false]], "get_half_precision_node_set() (in module neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.get_half_precision_node_set", false]], "get_hidden_states() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_hidden_states", false]], "get_index_from_strided_slice_of_shape() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.get_index_from_strided_slice_of_shape", false]], "get_input_output_node_names() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.get_input_output_node_names", false]], "get_input_output_node_names() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.get_input_output_node_names", false]], "get_ipex_version() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.get_ipex_version", false]], "get_layer_names_in_block() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.get_layer_names_in_block", false]], "get_layers() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.get_layers", false]], "get_linux_numa_info() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.get_linux_numa_info", false]], "get_max_supported_opset_version() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema)": [[89, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema.get_max_supported_opset_version", false]], "get_metrics() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.get_metrics", false]], "get_model_device() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.get_model_device", false]], "get_model_fwk_name() (in module neural_compressor.model.model)": [[239, "neural_compressor.model.model.get_model_fwk_name", false]], "get_model_info() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.get_model_info", false]], "get_model_input_shape() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.get_model_input_shape", false]], "get_model_input_shape() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.get_model_input_shape", false]], "get_model_type() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.get_model_type", false]], "get_model_type() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.get_model_type", false]], "get_module() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.get_module", false]], "get_module() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.get_module", false]], "get_module() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.get_module", false]], "get_module() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.get_module", false]], "get_module() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.get_module", false]], "get_module_input_output() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_module_input_output", false]], "get_module_input_output() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.get_module_input_output", false]], "get_mse_order_per_fp32() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_mse_order_per_fp32", false]], "get_mse_order_per_int8() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_mse_order_per_int8", false]], "get_multimodal_block_names() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.get_multimodal_block_names", false]], "get_named_children() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.get_named_children", false]], "get_named_children() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.get_named_children", false]], "get_node_mapping() (in module neural_compressor.utils.export.torch2onnx)": [[459, "neural_compressor.utils.export.torch2onnx.get_node_mapping", false]], "get_node_original_name() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.get_node_original_name", false]], "get_numa_node() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.get_numa_node", false]], "get_number_of_sockets() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_number_of_sockets", false]], "get_op_list() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_op_list", false]], "get_op_type_by_name() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_op_type_by_name", false]], "get_parent() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.get_parent", false]], "get_parent() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.get_parent", false]], "get_pattern() (in module neural_compressor.compression.pruner.patterns)": [[176, "neural_compressor.compression.pruner.patterns.get_pattern", false]], "get_physical_ids() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.get_physical_ids", false]], "get_postprocess() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.get_postprocess", false]], "get_preprocess() (in module neural_compressor.utils.create_obj_from_config)": [[455, "neural_compressor.utils.create_obj_from_config.get_preprocess", false]], "get_processor_type_from_user_config() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.get_processor_type_from_user_config", false]], "get_pruner() (in module neural_compressor.compression.pruner.pruners)": [[183, "neural_compressor.compression.pruner.pruners.get_pruner", false]], "get_quant_dequant_output() (in module neural_compressor.adaptor.ox_utils.smooth_quant)": [[29, "neural_compressor.adaptor.ox_utils.smooth_quant.get_quant_dequant_output", false]], "get_quantizable_onnx_ops() (in module neural_compressor.utils.export.torch2onnx)": [[459, "neural_compressor.utils.export.torch2onnx.get_quantizable_onnx_ops", false]], "get_quantizable_ops_from_cfgs() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_quantizable_ops_from_cfgs", false]], "get_quantizable_ops_from_cfgs() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.get_quantizable_ops_from_cfgs", false]], "get_quantizable_ops_recursively() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.get_quantizable_ops_recursively", false]], "get_quantizable_ops_recursively() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.get_quantizable_ops_recursively", false]], "get_quantizer() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.get_quantizer", false]], "get_reg() (in module neural_compressor.compression.pruner.regs)": [[189, "neural_compressor.compression.pruner.regs.get_reg", false]], "get_reg_type() (in module neural_compressor.compression.pruner.regs)": [[189, "neural_compressor.compression.pruner.regs.get_reg_type", false]], "get_reversed_numa_info() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.get_reversed_numa_info", false]], "get_rtn_double_quant_config_set() (in module neural_compressor.torch.quantization.autotune)": [[438, "neural_compressor.torch.quantization.autotune.get_rtn_double_quant_config_set", false]], "get_scheduler() (in module neural_compressor.compression.pruner.schedulers)": [[190, "neural_compressor.compression.pruner.schedulers.get_scheduler", false]], "get_schema() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema)": [[89, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema.get_schema", false]], "get_size() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_size", false]], "get_sparsity_ratio() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.get_sparsity_ratio", false]], "get_sparsity_ratio_tf() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.get_sparsity_ratio_tf", false]], "get_subgraphs_from_onnx() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.get_subgraphs_from_onnx", false]], "get_super_module_by_name() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.get_super_module_by_name", false]], "get_super_module_by_name() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.get_super_module_by_name", false]], "get_tensor_by_name() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.get_tensor_by_name", false]], "get_tensor_by_name() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.get_tensor_by_name", false]], "get_tensor_histogram() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.get_tensor_histogram", false]], "get_tensor_histogram() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_tensor_histogram", false]], "get_tensor_val_from_graph_node() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.get_tensor_val_from_graph_node", false]], "get_tensorflow_node_attr() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.get_tensorflow_node_attr", false]], "get_tensorflow_node_shape_attr() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.get_tensorflow_node_shape_attr", false]], "get_tensorflow_tensor_data() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.get_tensorflow_tensor_data", false]], "get_tensorflow_tensor_shape() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.get_tensorflow_tensor_shape", false]], "get_tensors_info() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_tensors_info", false]], "get_tf_criterion() (in module neural_compressor.compression.pruner.tf_criteria)": [[191, "neural_compressor.compression.pruner.tf_criteria.get_tf_criterion", false]], "get_tf_model_type() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.get_tf_model_type", false]], "get_threads() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.get_threads", false]], "get_threads_per_core() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.get_threads_per_core", false]], "get_torch_version() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.get_torch_version", false]], "get_torch_version() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.get_torch_version", false]], "get_torchvision_map() (in module neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.get_torchvision_map", false]], "get_tuning_history() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_tuning_history", false]], "get_unquantized_node_set() (in module neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.get_unquantized_node_set", false]], "get_weight_from_input_tensor() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.get_weight_from_input_tensor", false]], "get_weight_scale() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.get_weight_scale", false]], "get_weights_details() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.get_weights_details", false]], "get_windows_numa_info() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.get_windows_numa_info", false]], "get_woq_tuning_config() (in module neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.get_woq_tuning_config", false]], "get_workspace() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.get_workspace", false]], "global_state (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.GLOBAL_STATE", false]], "global_step (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.global_step", false]], "global_step (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.global_step", false]], "global_step (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.global_step", false]], "globalaveragepooloperator (class in neural_compressor.adaptor.ox_utils.operators.gavgpool)": [[14, "neural_compressor.adaptor.ox_utils.operators.gavgpool.GlobalAveragePoolOperator", false]], "gptq (class in neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.GPTQ", false]], "gptq() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.gptq", false]], "gptq_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.gptq_entry", false]], "gptq_quantize() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.gptq_quantize", false]], "gptqconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.GPTQConfig", false]], "gptqconfig (class in neural_compressor.transformers.utils.quantization_config)": [[452, "neural_compressor.transformers.utils.quantization_config.GPTQConfig", false]], "gptquantizer (class in neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.GPTQuantizer", false]], "gradientcriterion (class in neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.GradientCriterion", false]], "graph_def_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.graph_def_session", false]], "graph_def_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.graph_def_session", false]], "graph_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.graph_session", false]], "graph_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.graph_session", false]], "graphanalyzer (class in neural_compressor.adaptor.tf_utils.graph_util)": [[95, "neural_compressor.adaptor.tf_utils.graph_util.GraphAnalyzer", false]], "graphanalyzer (class in neural_compressor.tensorflow.quantization.utils.graph_util)": [[360, "neural_compressor.tensorflow.quantization.utils.graph_util.GraphAnalyzer", false]], "graphconverter (class in neural_compressor.adaptor.tf_utils.graph_converter)": [[33, "neural_compressor.adaptor.tf_utils.graph_converter.GraphConverter", false]], "graphconverter (class in neural_compressor.tensorflow.quantization.utils.graph_converter)": [[306, "neural_compressor.tensorflow.quantization.utils.graph_converter.GraphConverter", false]], "graphconverterwithoutcalib (class in neural_compressor.adaptor.tf_utils.graph_converter_without_calib)": [[34, "neural_compressor.adaptor.tf_utils.graph_converter_without_calib.GraphConverterWithoutCalib", false]], "graphcseoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer)": [[59, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer.GraphCseOptimizer", false]], "graphcseoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer)": [[331, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer.GraphCseOptimizer", false]], "graphfoldconstantoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant)": [[48, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant.GraphFoldConstantOptimizer", false]], "graphfoldconstantoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant)": [[320, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant.GraphFoldConstantOptimizer", false]], "graphrewriterbase (class in neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base)": [[71, "neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base.GraphRewriterBase", false]], "graphrewriterbase (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base)": [[343, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base.GraphRewriterBase", false]], "graphrewriterhelper (class in neural_compressor.adaptor.tf_utils.graph_util)": [[95, "neural_compressor.adaptor.tf_utils.graph_util.GraphRewriterHelper", false]], "graphrewriterhelper (class in neural_compressor.tensorflow.quantization.utils.graph_util)": [[360, "neural_compressor.tensorflow.quantization.utils.graph_util.GraphRewriterHelper", false]], "graphtrace (class in neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.GraphTrace", false]], "graphtrace (class in neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.GraphTrace", false]], "graphtransformbase (class in neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base)": [[129, "neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base.GraphTransformBase", false]], "graphtransformbase (class in neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base)": [[381, "neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base.GraphTransformBase", false]], "grappleroptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass)": [[60, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass.GrapplerOptimizer", false]], "grappleroptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass)": [[332, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass.GrapplerOptimizer", false]], "group_size (neural_compressor.torch.algorithms.weight_only.hqq.qtensor.qtensormetainfo attribute)": [[426, "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensorMetaInfo.group_size", false]], "grouplasso (class in neural_compressor.compression.pruner.regs)": [[189, "neural_compressor.compression.pruner.regs.GroupLasso", false]], "halfprecisionconverter (class in neural_compressor.torch.algorithms.mixed_precision.half_precision_convert)": [[399, "neural_compressor.torch.algorithms.mixed_precision.half_precision_convert.HalfPrecisionConverter", false]], "halfprecisionmodulewrapper (class in neural_compressor.torch.algorithms.mixed_precision.module_wrappers)": [[401, "neural_compressor.torch.algorithms.mixed_precision.module_wrappers.HalfPrecisionModuleWrapper", false]], "hawq_top() (in module neural_compressor.adaptor.torch_utils.hawq_metric)": [[135, "neural_compressor.adaptor.torch_utils.hawq_metric.hawq_top", false]], "hawq_v2tunestrategy (class in neural_compressor.strategy.hawq_v2)": [[269, "neural_compressor.strategy.hawq_v2.HAWQ_V2TuneStrategy", false]], "head_masks (neural_compressor.compression.pruner.pruners.mha.pythonmultiheadattentionpruner attribute)": [[184, "neural_compressor.compression.pruner.pruners.mha.PythonMultiheadAttentionPruner.head_masks", false]], "hessiantrace (class in neural_compressor.adaptor.torch_utils.hawq_metric)": [[135, "neural_compressor.adaptor.torch_utils.hawq_metric.HessianTrace", false]], "histogramcollector (class in neural_compressor.adaptor.ox_utils.calibrator)": [[3, "neural_compressor.adaptor.ox_utils.calibrator.HistogramCollector", false]], "hpoconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.HPOConfig", false]], "hpu_accelerator (class in neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.HPU_Accelerator", false]], "hpuweightonlylinear (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.HPUWeightOnlyLinear", false]], "hqq_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.hqq_entry", false]], "hqqconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.HQQConfig", false]], "hqqlinear (class in neural_compressor.torch.algorithms.weight_only.hqq.core)": [[423, "neural_compressor.torch.algorithms.weight_only.hqq.core.HQQLinear", false]], "hqqmoduleconfig (class in neural_compressor.torch.algorithms.weight_only.hqq.config)": [[422, "neural_compressor.torch.algorithms.weight_only.hqq.config.HQQModuleConfig", false]], "hqqtensorhandle (class in neural_compressor.torch.algorithms.weight_only.hqq.core)": [[423, "neural_compressor.torch.algorithms.weight_only.hqq.core.HQQTensorHandle", false]], "hqquantizer (class in neural_compressor.torch.algorithms.weight_only.hqq.quantizer)": [[427, "neural_compressor.torch.algorithms.weight_only.hqq.quantizer.HQQuantizer", false]], "imagefolder (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.ImageFolder", false]], "imagenetraw (class in neural_compressor.data.datasets.imagenet_dataset)": [[214, "neural_compressor.data.datasets.imagenet_dataset.ImagenetRaw", false]], "incquantizationconfigmixin (class in neural_compressor.transformers.utils.quantization_config)": [[452, "neural_compressor.transformers.utils.quantization_config.INCQuantizationConfigMixin", false]], "incweightonlylinear (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.INCWeightOnlyLinear", false]], "indexfetcher (class in neural_compressor.data.dataloaders.fetcher)": [[203, "neural_compressor.data.dataloaders.fetcher.IndexFetcher", false]], "indexfetcher (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.IndexFetcher", false]], "infer_onnx_shape_dtype() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.infer_onnx_shape_dtype", false]], "infer_shapes() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.infer_shapes", false]], "info() (in module neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.info", false]], "init_quantize_config() (in module neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper)": [[101, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper.init_quantize_config", false]], "init_tuning() (in module neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.init_tuning", false]], "initial_tuning_cfg_with_quant_mode() (in module neural_compressor.strategy.utils.tuning_space)": [[278, "neural_compressor.strategy.utils.tuning_space.initial_tuning_cfg_with_quant_mode", false]], "initialize_int8_avgpool() (in module neural_compressor.tensorflow.keras.layers.pool2d)": [[297, "neural_compressor.tensorflow.keras.layers.pool2d.initialize_int8_avgpool", false]], "initialize_int8_conv2d() (in module neural_compressor.tensorflow.keras.layers.conv2d)": [[292, "neural_compressor.tensorflow.keras.layers.conv2d.initialize_int8_conv2d", false]], "initialize_int8_dense() (in module neural_compressor.tensorflow.keras.layers.dense)": [[293, "neural_compressor.tensorflow.keras.layers.dense.initialize_int8_dense", false]], "initialize_int8_depthwise_conv2d() (in module neural_compressor.tensorflow.keras.layers.depthwise_conv2d)": [[294, "neural_compressor.tensorflow.keras.layers.depthwise_conv2d.initialize_int8_depthwise_conv2d", false]], "initialize_int8_maxpool() (in module neural_compressor.tensorflow.keras.layers.pool2d)": [[297, "neural_compressor.tensorflow.keras.layers.pool2d.initialize_int8_maxpool", false]], "initialize_int8_separable_conv2d() (in module neural_compressor.tensorflow.keras.layers.separable_conv2d)": [[298, "neural_compressor.tensorflow.keras.layers.separable_conv2d.initialize_int8_separable_conv2d", false]], "initialize_name_counter() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.initialize_name_counter", false]], "injectdummybiasaddoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd)": [[44, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd.InjectDummyBiasAddOptimizer", false]], "injectdummybiasaddoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd)": [[316, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd.InjectDummyBiasAddOptimizer", false]], "input2tuple() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.input2tuple", false]], "inputfeatures (class in neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.InputFeatures", false]], "inputfeatures (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.InputFeatures", false]], "insertlogging (class in neural_compressor.adaptor.tf_utils.transform_graph.insert_logging)": [[131, "neural_compressor.adaptor.tf_utils.transform_graph.insert_logging.InsertLogging", false]], "insertlogging (class in neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging)": [[383, "neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging.InsertLogging", false]], "insertprintminmaxnode (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node)": [[62, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node.InsertPrintMinMaxNode", false]], "insertprintminmaxnode (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node)": [[334, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node.InsertPrintMinMaxNode", false]], "int8_node_name_reverse() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.int8_node_name_reverse", false]], "intermediatelayersknowledgedistillationloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.IntermediateLayersKnowledgeDistillationLoss", false]], "intermediatelayersknowledgedistillationlossconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.IntermediateLayersKnowledgeDistillationLossConfig", false]], "invalid_layers (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.invalid_layers", false]], "invalid_layers (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.invalid_layers", false]], "invalid_layers (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.invalid_layers", false]], "ipexmodel (class in neural_compressor.model.torch_model)": [[244, "neural_compressor.model.torch_model.IPEXModel", false]], "is_b_transposed() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.is_B_transposed", false]], "is_ckpt_format() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.is_ckpt_format", false]], "is_ckpt_format() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.is_ckpt_format", false]], "is_fused_module() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.is_fused_module", false]], "is_global (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.is_global", false]], "is_global (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.is_global", false]], "is_global (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.is_global", false]], "is_hpex_available() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.is_hpex_available", false]], "is_int8_model() (in module neural_compressor.utils.pytorch)": [[465, "neural_compressor.utils.pytorch.is_int8_model", false]], "is_ipex_available() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.is_ipex_available", false]], "is_ipex_imported() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.is_ipex_imported", false]], "is_leaf() (in module neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.is_leaf", false]], "is_list_or_tuple() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.is_list_or_tuple", false]], "is_model_quantized() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.is_model_quantized", false]], "is_onnx_domain() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.is_onnx_domain", false]], "is_optimum_habana_available() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.is_optimum_habana_available", false]], "is_package_available() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.is_package_available", false]], "is_saved_model_format() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.is_saved_model_format", false]], "is_saved_model_format() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.is_saved_model_format", false]], "is_transformers_imported() (in module neural_compressor.torch.utils.environ)": [[446, "neural_compressor.torch.utils.environ.is_transformers_imported", false]], "isiterable() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.isiterable", false]], "iterabledataset (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.IterableDataset", false]], "iterablefetcher (class in neural_compressor.data.dataloaders.fetcher)": [[203, "neural_compressor.data.dataloaders.fetcher.IterableFetcher", false]], "iterablefetcher (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.IterableFetcher", false]], "iterablesampler (class in neural_compressor.data.dataloaders.sampler)": [[207, "neural_compressor.data.dataloaders.sampler.IterableSampler", false]], "iterablesampler (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.IterableSampler", false]], "iterativescheduler (class in neural_compressor.compression.pruner.schedulers)": [[190, "neural_compressor.compression.pruner.schedulers.IterativeScheduler", false]], "iterator_sess_run() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.iterator_sess_run", false]], "iterator_sess_run() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.iterator_sess_run", false]], "itex_installed() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.itex_installed", false]], "jitbasicsearcher (class in neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher", false]], "k (neural_compressor.metric.metric.generaltopk attribute)": [[234, "neural_compressor.metric.metric.GeneralTopK.k", false]], "k (neural_compressor.metric.metric.tensorflowtopk attribute)": [[234, "neural_compressor.metric.metric.TensorflowTopK.k", false]], "keep_mask_layers (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.keep_mask_layers", false]], "keep_mask_layers (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.keep_mask_layers", false]], "keep_mask_layers (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.keep_mask_layers", false]], "keras (class in neural_compressor.config)": [[195, "neural_compressor.config.Keras", false]], "keras_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.keras_session", false]], "keras_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.keras_session", false]], "kerasadaptor (class in neural_compressor.tensorflow.algorithms.static_quant.keras)": [[288, "neural_compressor.tensorflow.algorithms.static_quant.keras.KerasAdaptor", false]], "kerasbasepattern (class in neural_compressor.compression.pruner.patterns.base)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern", false]], "kerasbasepruner (class in neural_compressor.compression.pruner.pruners.base)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner", false]], "kerasbasicpruner (class in neural_compressor.compression.pruner.pruners.basic)": [[181, "neural_compressor.compression.pruner.pruners.basic.KerasBasicPruner", false]], "kerasconfigconverter (class in neural_compressor.tensorflow.algorithms.static_quant.keras)": [[288, "neural_compressor.tensorflow.algorithms.static_quant.keras.KerasConfigConverter", false]], "kerasmodel (class in neural_compressor.model.keras_model)": [[238, "neural_compressor.model.keras_model.KerasModel", false]], "kerasmodel (class in neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.KerasModel", false]], "keraspatternnxm (class in neural_compressor.compression.pruner.patterns.nxm)": [[179, "neural_compressor.compression.pruner.patterns.nxm.KerasPatternNxM", false]], "kerasquery (class in neural_compressor.tensorflow.algorithms.static_quant.keras)": [[288, "neural_compressor.tensorflow.algorithms.static_quant.keras.KerasQuery", false]], "kerassurgery (class in neural_compressor.tensorflow.algorithms.static_quant.keras)": [[288, "neural_compressor.tensorflow.algorithms.static_quant.keras.KerasSurgery", false]], "kl_divergence (class in neural_compressor.utils.kl_divergence)": [[461, "neural_compressor.utils.kl_divergence.KL_Divergence", false]], "klcalibrator (class in neural_compressor.adaptor.ox_utils.calibrator)": [[3, "neural_compressor.adaptor.ox_utils.calibrator.KLCalibrator", false]], "knowledgedistillationframework (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.KnowledgeDistillationFramework", false]], "knowledgedistillationloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.KnowledgeDistillationLoss", false]], "knowledgedistillationlossconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.KnowledgeDistillationLossConfig", false]], "label_list (neural_compressor.metric.metric.accuracy attribute)": [[234, "neural_compressor.metric.metric.Accuracy.label_list", false]], "label_list (neural_compressor.metric.metric.mae attribute)": [[234, "neural_compressor.metric.metric.MAE.label_list", false]], "label_list (neural_compressor.metric.metric.mse attribute)": [[234, "neural_compressor.metric.metric.MSE.label_list", false]], "labelbalancecocorawfilter (class in neural_compressor.data.filters.coco_filter)": [[217, "neural_compressor.data.filters.coco_filter.LabelBalanceCOCORawFilter", false]], "labelbalancecocorecordfilter (class in neural_compressor.data.filters.coco_filter)": [[217, "neural_compressor.data.filters.coco_filter.LabelBalanceCOCORecordFilter", false]], "labels (neural_compressor.metric.bleu.bleu attribute)": [[227, "neural_compressor.metric.bleu.BLEU.labels", false]], "labelshift (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.LabelShift", false]], "layer_1 (neural_compressor.compression.pruner.model_slim.weight_slim.linearcompression attribute)": [[174, "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompression.layer_1", false]], "layer_2 (neural_compressor.compression.pruner.model_slim.weight_slim.linearcompression attribute)": [[174, "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompression.layer_2", false]], "layerhistogramcollector (class in neural_compressor.utils.collect_layer_histogram)": [[453, "neural_compressor.utils.collect_layer_histogram.LayerHistogramCollector", false]], "layerwisequant (class in neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize)": [[139, "neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize.LayerWiseQuant", false]], "lazyimport (class in neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.LazyImport", false]], "lazyimport (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.LazyImport", false]], "linear2linearsearcher (class in neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher", false]], "linear_layers (neural_compressor.compression.pruner.pruners.mha.pythonmultiheadattentionpruner attribute)": [[184, "neural_compressor.compression.pruner.pruners.mha.PythonMultiheadAttentionPruner.linear_layers", false]], "linear_patterns (neural_compressor.compression.pruner.model_slim.weight_slim.linearcompressioniterator attribute)": [[174, "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompressionIterator.linear_patterns", false]], "linearcompression (class in neural_compressor.compression.pruner.model_slim.weight_slim)": [[174, "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompression", false]], "linearcompressioniterator (class in neural_compressor.compression.pruner.model_slim.weight_slim)": [[174, "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompressionIterator", false]], "load() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load)": [[140, "neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load.load", false]], "load() (in module neural_compressor.torch.algorithms.layer_wise.load)": [[396, "neural_compressor.torch.algorithms.layer_wise.load.load", false]], "load() (in module neural_compressor.torch.algorithms.pt2e_quant.save_load)": [[408, "neural_compressor.torch.algorithms.pt2e_quant.save_load.load", false]], "load() (in module neural_compressor.torch.algorithms.static_quant.save_load)": [[415, "neural_compressor.torch.algorithms.static_quant.save_load.load", false]], "load() (in module neural_compressor.torch.algorithms.weight_only.save_load)": [[431, "neural_compressor.torch.algorithms.weight_only.save_load.load", false]], "load() (in module neural_compressor.torch.quantization.load_entry)": [[441, "neural_compressor.torch.quantization.load_entry.load", false]], "load() (in module neural_compressor.utils.pytorch)": [[465, "neural_compressor.utils.pytorch.load", false]], "load_and_cache_examples() (in module neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.load_and_cache_examples", false]], "load_config_mapping() (in module neural_compressor.common.utils.save_load)": [[160, "neural_compressor.common.utils.save_load.load_config_mapping", false]], "load_data_from_pkl() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.load_data_from_pkl", false]], "load_empty_model() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.load_empty_model", false]], "load_empty_model() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.load_empty_model", false]], "load_empty_model() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.load_empty_model", false]], "load_layer_wise_quantized_model() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.load_layer_wise_quantized_model", false]], "load_layer_wise_quantized_model() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.load_layer_wise_quantized_model", false]], "load_module() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.load_module", false]], "load_saved_model() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.load_saved_model", false]], "load_saved_model() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.load_saved_model", false]], "load_tensor() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.load_tensor", false]], "load_tensor() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.load_tensor", false]], "load_tensor_from_shard() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.load_tensor_from_shard", false]], "load_tensor_from_shard() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.load_tensor_from_shard", false]], "load_value() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.load_value", false]], "load_vocab() (in module neural_compressor.data.transforms.tokenization)": [[224, "neural_compressor.data.transforms.tokenization.load_vocab", false]], "load_weight_only() (in module neural_compressor.utils.pytorch)": [[465, "neural_compressor.utils.pytorch.load_weight_only", false]], "loadformat (class in neural_compressor.torch.utils.constants)": [[445, "neural_compressor.torch.utils.constants.LoadFormat", false]], "log() (in module neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.log", false]], "log_process() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.log_process", false]], "log_quantizable_layers_per_transformer() (in module neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.log_quantizable_layers_per_transformer", false]], "logger (class in neural_compressor.common.utils.logger)": [[159, "neural_compressor.common.utils.logger.Logger", false]], "logger (class in neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.Logger", false]], "loss (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.Loss", false]], "lowerbitssampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.LowerBitsSampler", false]], "lstmoperator (class in neural_compressor.adaptor.ox_utils.operators.lstm)": [[17, "neural_compressor.adaptor.ox_utils.operators.lstm.LSTMOperator", false]], "m (neural_compressor.compression.pruner.patterns.mha.patternmha attribute)": [[177, "neural_compressor.compression.pruner.patterns.mha.PatternMHA.M", false]], "m (neural_compressor.compression.pruner.patterns.ninm.pytorchpatternninm attribute)": [[178, "neural_compressor.compression.pruner.patterns.ninm.PytorchPatternNInM.M", false]], "mae (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.MAE", false]], "magnitudecriterion (class in neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.MagnitudeCriterion", false]], "magnitudecriterion (class in neural_compressor.compression.pruner.tf_criteria)": [[191, "neural_compressor.compression.pruner.tf_criteria.MagnitudeCriterion", false]], "make_dquant_node() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.make_dquant_node", false]], "make_matmul_weight_only_node() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.make_matmul_weight_only_node", false]], "make_module() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.make_module", false]], "make_nc_model() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.make_nc_model", false]], "make_onnx_inputs_outputs() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.make_onnx_inputs_outputs", false]], "make_onnx_shape() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.make_onnx_shape", false]], "make_quant_node() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.make_quant_node", false]], "make_sub_graph() (in module neural_compressor.adaptor.ox_utils.smooth_quant)": [[29, "neural_compressor.adaptor.ox_utils.smooth_quant.make_sub_graph", false]], "make_symbol_block() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.make_symbol_block", false]], "map_numpy_to_onnx_dtype() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.map_numpy_to_onnx_dtype", false]], "map_onnx_to_numpy_type() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.map_onnx_to_numpy_type", false]], "map_tensorflow_dtype() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.map_tensorflow_dtype", false]], "masks (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.masks", false]], "masks (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.masks", false]], "masks (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.masks", false]], "match_datatype_pattern() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.match_datatype_pattern", false]], "matmuloperator (class in neural_compressor.adaptor.ox_utils.operators.matmul)": [[18, "neural_compressor.adaptor.ox_utils.operators.matmul.MatMulOperator", false]], "max_sparsity_ratio_per_op (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.max_sparsity_ratio_per_op", false]], "max_sparsity_ratio_per_op (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.max_sparsity_ratio_per_op", false]], "max_sparsity_ratio_per_op (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.max_sparsity_ratio_per_op", false]], "max_sparsity_ratio_per_op (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.max_sparsity_ratio_per_op", false]], "max_sparsity_ratio_per_op (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.max_sparsity_ratio_per_op", false]], "max_sparsity_ratio_per_op (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.max_sparsity_ratio_per_op", false]], "maxpooloperator (class in neural_compressor.adaptor.ox_utils.operators.maxpool)": [[19, "neural_compressor.adaptor.ox_utils.operators.maxpool.MaxPoolOperator", false]], "mergeduplicatedqdqoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq)": [[93, "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq.MergeDuplicatedQDQOptimizer", false]], "mergeduplicatedqdqoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq)": [[358, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq.MergeDuplicatedQDQOptimizer", false]], "metainfochangingmemopoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer)": [[81, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer.MetaInfoChangingMemOpOptimizer", false]], "metainfochangingmemopoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer)": [[352, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer.MetaInfoChangingMemOpOptimizer", false]], "metric (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.Metric", false]], "metric_max_over_ground_truths() (in module neural_compressor.metric.evaluate_squad)": [[231, "neural_compressor.metric.evaluate_squad.metric_max_over_ground_truths", false]], "metric_max_over_ground_truths() (in module neural_compressor.metric.f1)": [[232, "neural_compressor.metric.f1.metric_max_over_ground_truths", false]], "metric_registry() (in module neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.metric_registry", false]], "metrics (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.METRICS", false]], "metrics (neural_compressor.metric.metric.metrics attribute)": [[234, "neural_compressor.metric.metric.METRICS.metrics", false]], "metrics (neural_compressor.metric.metric.mxnetmetrics attribute)": [[234, "neural_compressor.metric.metric.MXNetMetrics.metrics", false]], "metrics (neural_compressor.metric.metric.onnxrtitmetrics attribute)": [[234, "neural_compressor.metric.metric.ONNXRTITMetrics.metrics", false]], "metrics (neural_compressor.metric.metric.onnxrtqlmetrics attribute)": [[234, "neural_compressor.metric.metric.ONNXRTQLMetrics.metrics", false]], "metrics (neural_compressor.metric.metric.pytorchmetrics attribute)": [[234, "neural_compressor.metric.metric.PyTorchMetrics.metrics", false]], "metrics (neural_compressor.metric.metric.tensorflowmetrics attribute)": [[234, "neural_compressor.metric.metric.TensorflowMetrics.metrics", false]], "mha_compressions (neural_compressor.compression.pruner.pruners.mha.pythonmultiheadattentionpruner attribute)": [[184, "neural_compressor.compression.pruner.pruners.mha.PythonMultiheadAttentionPruner.mha_compressions", false]], "mha_scores (neural_compressor.compression.pruner.pruners.mha.pythonmultiheadattentionpruner attribute)": [[184, "neural_compressor.compression.pruner.pruners.mha.PythonMultiheadAttentionPruner.mha_scores", false]], "min_sparsity_ratio_per_op (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.min_sparsity_ratio_per_op", false]], "min_sparsity_ratio_per_op (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.min_sparsity_ratio_per_op", false]], "min_sparsity_ratio_per_op (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.min_sparsity_ratio_per_op", false]], "minmaxcalibrator (class in neural_compressor.adaptor.ox_utils.calibrator)": [[3, "neural_compressor.adaptor.ox_utils.calibrator.MinMaxCalibrator", false]], "miou (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.mIOU", false]], "mixed_precision_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.mixed_precision_entry", false]], "mixedprecisionconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.MixedPrecisionConfig", false]], "mixedprecisionconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.MixedPrecisionConfig", false]], "mnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.MNIST", false]], "mode (class in neural_compressor.common.utils.constants)": [[157, "neural_compressor.common.utils.constants.Mode", false]], "mode (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.MODE", false]], "model (class in neural_compressor.model.model)": [[239, "neural_compressor.model.model.Model", false]], "model (class in neural_compressor.tensorflow.utils.model)": [[389, "neural_compressor.tensorflow.utils.model.Model", false]], "model (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcher.model", false]], "model (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearchertf attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcherTF.model", false]], "model (neural_compressor.compression.pruner.model_slim.pattern_analyzer.jitbasicsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher.model", false]], "model (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.model", false]], "model (neural_compressor.compression.pruner.model_slim.pattern_analyzer.recipesearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.RecipeSearcher.model", false]], "model (neural_compressor.compression.pruner.model_slim.pattern_analyzer.selfmhasearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.SelfMHASearcher.model", false]], "model (neural_compressor.compression.pruner.pruning.basepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasePruning.model", false]], "model (neural_compressor.compression.pruner.pruning.basicpruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasicPruning.model", false]], "model (neural_compressor.compression.pruner.pruning.retrainfreepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.RetrainFreePruning.model", false]], "model_forward() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.model_forward", false]], "model_forward() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.model_forward", false]], "model_forward_per_sample() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.model_forward_per_sample", false]], "model_level (neural_compressor.common.tuning_param.paramlevel attribute)": [[156, "neural_compressor.common.tuning_param.ParamLevel.MODEL_LEVEL", false]], "model_slim() (in module neural_compressor.compression.pruner.model_slim.auto_slim)": [[171, "neural_compressor.compression.pruner.model_slim.auto_slim.model_slim", false]], "model_slim_ffn2() (in module neural_compressor.compression.pruner.model_slim.auto_slim)": [[171, "neural_compressor.compression.pruner.model_slim.auto_slim.model_slim_ffn2", false]], "model_slim_mha() (in module neural_compressor.compression.pruner.model_slim.auto_slim)": [[171, "neural_compressor.compression.pruner.model_slim.auto_slim.model_slim_mha", false]], "modelsize (class in neural_compressor.objective)": [[245, "neural_compressor.objective.ModelSize", false]], "modelwisetuningsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.ModelWiseTuningSampler", false]], "module": [[0, "module-neural_compressor.adaptor.mxnet_utils", false], [1, "module-neural_compressor.adaptor.mxnet_utils.util", false], [2, "module-neural_compressor.adaptor.ox_utils.calibration", false], [3, "module-neural_compressor.adaptor.ox_utils.calibrator", false], [4, "module-neural_compressor.adaptor.ox_utils", false], [5, "module-neural_compressor.adaptor.ox_utils.operators.activation", false], [6, "module-neural_compressor.adaptor.ox_utils.operators.argmax", false], [7, "module-neural_compressor.adaptor.ox_utils.operators.attention", false], [8, "module-neural_compressor.adaptor.ox_utils.operators.binary_op", false], [9, "module-neural_compressor.adaptor.ox_utils.operators.concat", false], [10, "module-neural_compressor.adaptor.ox_utils.operators.conv", false], [11, "module-neural_compressor.adaptor.ox_utils.operators.direct_q8", false], [12, "module-neural_compressor.adaptor.ox_utils.operators.embed_layernorm", false], [13, "module-neural_compressor.adaptor.ox_utils.operators.gather", false], [14, "module-neural_compressor.adaptor.ox_utils.operators.gavgpool", false], [15, "module-neural_compressor.adaptor.ox_utils.operators.gemm", false], [16, "module-neural_compressor.adaptor.ox_utils.operators", false], [17, "module-neural_compressor.adaptor.ox_utils.operators.lstm", false], [18, "module-neural_compressor.adaptor.ox_utils.operators.matmul", false], [19, "module-neural_compressor.adaptor.ox_utils.operators.maxpool", false], [20, "module-neural_compressor.adaptor.ox_utils.operators.norm", false], [21, "module-neural_compressor.adaptor.ox_utils.operators.ops", false], [22, "module-neural_compressor.adaptor.ox_utils.operators.pad", false], [23, "module-neural_compressor.adaptor.ox_utils.operators.pooling", false], [24, "module-neural_compressor.adaptor.ox_utils.operators.reduce", false], [25, "module-neural_compressor.adaptor.ox_utils.operators.resize", false], [26, "module-neural_compressor.adaptor.ox_utils.operators.split", false], [27, "module-neural_compressor.adaptor.ox_utils.operators.unary_op", false], [28, "module-neural_compressor.adaptor.ox_utils.quantizer", false], [29, "module-neural_compressor.adaptor.ox_utils.smooth_quant", false], [30, "module-neural_compressor.adaptor.ox_utils.util", false], [31, "module-neural_compressor.adaptor.ox_utils.weight_only", false], [32, "module-neural_compressor.adaptor.tensorflow", false], [33, "module-neural_compressor.adaptor.tf_utils.graph_converter", false], [34, "module-neural_compressor.adaptor.tf_utils.graph_converter_without_calib", false], [35, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert", false], [36, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer", false], [37, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16", false], [38, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd", false], [39, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout", false], [40, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu", false], [41, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random", false], [42, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const", false], [43, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction", false], [44, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd", false], [45, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer", false], [46, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape", false], [47, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm", false], [48, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant", false], [49, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add", false], [50, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul", false], [51, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math", false], [52, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn", false], [53, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in", false], [54, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu", false], [55, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm", false], [56, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv", false], [57, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv", false], [58, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose", false], [59, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer", false], [60, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass", false], [61, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic", false], [62, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node", false], [63, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu", false], [64, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize", false], [65, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes", false], [66, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm", false], [67, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input", false], [68, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes", false], [69, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes", false], [70, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer", false], [71, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base", false], [72, "module-neural_compressor.adaptor.tf_utils.graph_rewriter", false], [73, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant", false], [74, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value", false], [75, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib", false], [76, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize", false], [77, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize", false], [78, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize", false], [79, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize", false], [80, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8", false], [81, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer", false], [82, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter", false], [83, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse", false], [84, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert", false], [85, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation", false], [86, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx", false], [87, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph", false], [88, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node", false], [89, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema", false], [90, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils", false], [91, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq", false], [92, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern", false], [93, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq", false], [94, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern", false], [95, "module-neural_compressor.adaptor.tf_utils.graph_util", false], [96, "module-neural_compressor.adaptor.tf_utils", false], [97, "module-neural_compressor.adaptor.tf_utils.quantize_graph", false], [98, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize", false], [99, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat", false], [100, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config", false], [101, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper", false], [102, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers", false], [103, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer", false], [104, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add", false], [105, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base", false], [106, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn", false], [107, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper", false], [108, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn", false], [109, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2", false], [110, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv", false], [111, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv", false], [112, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in", false], [113, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul", false], [114, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling", false], [115, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq", false], [116, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq", false], [117, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base", false], [118, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn", false], [119, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2", false], [120, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv", false], [121, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu", false], [122, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul", false], [123, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling", false], [124, "module-neural_compressor.adaptor.tf_utils.quantize_graph_common", false], [125, "module-neural_compressor.adaptor.tf_utils.smooth_quant_calibration", false], [126, "module-neural_compressor.adaptor.tf_utils.smooth_quant_scaler", false], [127, "module-neural_compressor.adaptor.tf_utils.tf2onnx_converter", false], [128, "module-neural_compressor.adaptor.tf_utils.transform_graph.bias_correction", false], [129, "module-neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base", false], [130, "module-neural_compressor.adaptor.tf_utils.transform_graph", false], [131, "module-neural_compressor.adaptor.tf_utils.transform_graph.insert_logging", false], [132, "module-neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat", false], [133, "module-neural_compressor.adaptor.tf_utils.util", false], [134, "module-neural_compressor.adaptor.torch_utils.bf16_convert", false], [135, "module-neural_compressor.adaptor.torch_utils.hawq_metric", false], [136, "module-neural_compressor.adaptor.torch_utils", false], [137, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant", false], [138, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle", false], [139, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize", false], [140, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load", false], [141, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.utils", false], [142, "module-neural_compressor.adaptor.torch_utils.model_wrapper", false], [143, "module-neural_compressor.adaptor.torch_utils.pattern_detector", false], [144, "module-neural_compressor.adaptor.torch_utils.symbolic_trace", false], [145, "module-neural_compressor.adaptor.torch_utils.util", false], [146, "module-neural_compressor.algorithm.algorithm", false], [147, "module-neural_compressor.algorithm.fast_bias_correction", false], [148, "module-neural_compressor.algorithm", false], [149, "module-neural_compressor.algorithm.smooth_quant", false], [150, "module-neural_compressor.algorithm.weight_correction", false], [151, "module-neural_compressor.benchmark", false], [152, "module-neural_compressor.common.base_config", false], [153, "module-neural_compressor.common.base_tuning", false], [154, "module-neural_compressor.common.benchmark", false], [155, "module-neural_compressor.common", false], [156, "module-neural_compressor.common.tuning_param", false], [157, "module-neural_compressor.common.utils.constants", false], [158, "module-neural_compressor.common.utils", false], [159, "module-neural_compressor.common.utils.logger", false], [160, "module-neural_compressor.common.utils.save_load", false], [161, "module-neural_compressor.common.utils.utility", false], [162, "module-neural_compressor.compression.callbacks", false], [163, "module-neural_compressor.compression.distillation.criterions", false], [164, "module-neural_compressor.compression.distillation", false], [165, "module-neural_compressor.compression.distillation.optimizers", false], [166, "module-neural_compressor.compression.distillation.utility", false], [167, "module-neural_compressor.compression.hpo", false], [168, "module-neural_compressor.compression.hpo.sa_optimizer", false], [169, "module-neural_compressor.compression.pruner.criteria", false], [170, "module-neural_compressor.compression.pruner", false], [171, "module-neural_compressor.compression.pruner.model_slim.auto_slim", false], [172, "module-neural_compressor.compression.pruner.model_slim", false], [173, "module-neural_compressor.compression.pruner.model_slim.pattern_analyzer", false], [174, "module-neural_compressor.compression.pruner.model_slim.weight_slim", false], [175, "module-neural_compressor.compression.pruner.patterns.base", false], [176, "module-neural_compressor.compression.pruner.patterns", false], [177, "module-neural_compressor.compression.pruner.patterns.mha", false], [178, "module-neural_compressor.compression.pruner.patterns.ninm", false], [179, "module-neural_compressor.compression.pruner.patterns.nxm", false], [180, "module-neural_compressor.compression.pruner.pruners.base", false], [181, "module-neural_compressor.compression.pruner.pruners.basic", false], [182, "module-neural_compressor.compression.pruner.pruners.block_mask", false], [183, "module-neural_compressor.compression.pruner.pruners", false], [184, "module-neural_compressor.compression.pruner.pruners.mha", false], [185, "module-neural_compressor.compression.pruner.pruners.pattern_lock", false], [186, "module-neural_compressor.compression.pruner.pruners.progressive", false], [187, "module-neural_compressor.compression.pruner.pruners.retrain_free", false], [188, "module-neural_compressor.compression.pruner.pruning", false], [189, "module-neural_compressor.compression.pruner.regs", false], [190, "module-neural_compressor.compression.pruner.schedulers", false], [191, "module-neural_compressor.compression.pruner.tf_criteria", false], [192, "module-neural_compressor.compression.pruner.utils", false], [193, "module-neural_compressor.compression.pruner.wanda", false], [194, "module-neural_compressor.compression.pruner.wanda.utils", false], [195, "module-neural_compressor.config", false], [196, "module-neural_compressor.contrib", false], [197, "module-neural_compressor.contrib.strategy", false], [198, "module-neural_compressor.contrib.strategy.sigopt", false], [199, "module-neural_compressor.contrib.strategy.tpe", false], [200, "module-neural_compressor.data.dataloaders.base_dataloader", false], [201, "module-neural_compressor.data.dataloaders.dataloader", false], [202, "module-neural_compressor.data.dataloaders.default_dataloader", false], [203, "module-neural_compressor.data.dataloaders.fetcher", false], [204, "module-neural_compressor.data.dataloaders.mxnet_dataloader", false], [205, "module-neural_compressor.data.dataloaders.onnxrt_dataloader", false], [206, "module-neural_compressor.data.dataloaders.pytorch_dataloader", false], [207, "module-neural_compressor.data.dataloaders.sampler", false], [208, "module-neural_compressor.data.dataloaders.tensorflow_dataloader", false], [209, "module-neural_compressor.data.datasets.bert_dataset", false], [210, "module-neural_compressor.data.datasets.coco_dataset", false], [211, "module-neural_compressor.data.datasets.dataset", false], [212, "module-neural_compressor.data.datasets.dummy_dataset", false], [213, "module-neural_compressor.data.datasets.dummy_dataset_v2", false], [214, "module-neural_compressor.data.datasets.imagenet_dataset", false], [215, "module-neural_compressor.data.datasets", false], [216, "module-neural_compressor.data.datasets.style_transfer_dataset", false], [217, "module-neural_compressor.data.filters.coco_filter", false], [218, "module-neural_compressor.data.filters.filter", false], [219, "module-neural_compressor.data.filters", false], [220, "module-neural_compressor.data", false], [221, "module-neural_compressor.data.transforms.imagenet_transform", false], [222, "module-neural_compressor.data.transforms", false], [223, "module-neural_compressor.data.transforms.postprocess", false], [224, "module-neural_compressor.data.transforms.tokenization", false], [225, "module-neural_compressor.data.transforms.transform", false], [226, "module-neural_compressor", false], [227, "module-neural_compressor.metric.bleu", false], [228, "module-neural_compressor.metric.bleu_util", false], [229, "module-neural_compressor.metric.coco_label_map", false], [230, "module-neural_compressor.metric.coco_tools", false], [231, "module-neural_compressor.metric.evaluate_squad", false], [232, "module-neural_compressor.metric.f1", false], [233, "module-neural_compressor.metric", false], [234, "module-neural_compressor.metric.metric", false], [235, "module-neural_compressor.mix_precision", false], [236, "module-neural_compressor.model.base_model", false], [237, "module-neural_compressor.model", false], [238, "module-neural_compressor.model.keras_model", false], [239, "module-neural_compressor.model.model", false], [240, "module-neural_compressor.model.mxnet_model", false], [241, "module-neural_compressor.model.nets_factory", false], [242, "module-neural_compressor.model.onnx_model", false], [243, "module-neural_compressor.model.tensorflow_model", false], [244, "module-neural_compressor.model.torch_model", false], [245, "module-neural_compressor.objective", false], [246, "module-neural_compressor.profiling", false], [247, "module-neural_compressor.profiling.parser.factory", false], [248, "module-neural_compressor.profiling.parser.onnx_parser.factory", false], [249, "module-neural_compressor.profiling.parser.onnx_parser.parser", false], [250, "module-neural_compressor.profiling.parser.parser", false], [251, "module-neural_compressor.profiling.parser.result", false], [252, "module-neural_compressor.profiling.parser.tensorflow_parser.factory", false], [253, "module-neural_compressor.profiling.parser.tensorflow_parser.parser", false], [254, "module-neural_compressor.profiling.profiler.factory", false], [255, "module-neural_compressor.profiling.profiler.onnxrt_profiler.factory", false], [256, "module-neural_compressor.profiling.profiler.onnxrt_profiler.profiler", false], [257, "module-neural_compressor.profiling.profiler.onnxrt_profiler.utils", false], [258, "module-neural_compressor.profiling.profiler.profiler", false], [259, "module-neural_compressor.profiling.profiler.tensorflow_profiler.factory", false], [260, "module-neural_compressor.profiling.profiler.tensorflow_profiler.profiler", false], [261, "module-neural_compressor.profiling.profiler.tensorflow_profiler.utils", false], [262, "module-neural_compressor.quantization", false], [263, "module-neural_compressor.strategy.auto", false], [264, "module-neural_compressor.strategy.auto_mixed_precision", false], [265, "module-neural_compressor.strategy.basic", false], [266, "module-neural_compressor.strategy.bayesian", false], [267, "module-neural_compressor.strategy.conservative", false], [268, "module-neural_compressor.strategy.exhaustive", false], [269, "module-neural_compressor.strategy.hawq_v2", false], [270, "module-neural_compressor.strategy", false], [271, "module-neural_compressor.strategy.mse", false], [272, "module-neural_compressor.strategy.mse_v2", false], [273, "module-neural_compressor.strategy.random", false], [274, "module-neural_compressor.strategy.strategy", false], [275, "module-neural_compressor.strategy.utils.constant", false], [276, "module-neural_compressor.strategy.utils", false], [277, "module-neural_compressor.strategy.utils.tuning_sampler", false], [278, "module-neural_compressor.strategy.utils.tuning_space", false], [279, "module-neural_compressor.strategy.utils.tuning_structs", false], [280, "module-neural_compressor.strategy.utils.utility", false], [281, "module-neural_compressor.template.api_doc_example", false], [282, "module-neural_compressor.tensorflow.algorithms", false], [283, "module-neural_compressor.tensorflow.algorithms.smoother.calibration", false], [284, "module-neural_compressor.tensorflow.algorithms.smoother.core", false], [285, "module-neural_compressor.tensorflow.algorithms.smoother", false], [286, "module-neural_compressor.tensorflow.algorithms.smoother.scaler", false], [287, "module-neural_compressor.tensorflow.algorithms.static_quant", false], [288, "module-neural_compressor.tensorflow.algorithms.static_quant.keras", false], [289, "module-neural_compressor.tensorflow.algorithms.static_quant.tensorflow", false], [290, "module-neural_compressor.tensorflow", false], [291, "module-neural_compressor.tensorflow.keras", false], [292, "module-neural_compressor.tensorflow.keras.layers.conv2d", false], [293, "module-neural_compressor.tensorflow.keras.layers.dense", false], [294, "module-neural_compressor.tensorflow.keras.layers.depthwise_conv2d", false], [295, "module-neural_compressor.tensorflow.keras.layers", false], [296, "module-neural_compressor.tensorflow.keras.layers.layer_initializer", false], [297, "module-neural_compressor.tensorflow.keras.layers.pool2d", false], [298, "module-neural_compressor.tensorflow.keras.layers.separable_conv2d", false], [299, "module-neural_compressor.tensorflow.keras.quantization.config", false], [300, "module-neural_compressor.tensorflow.keras.quantization", false], [301, "module-neural_compressor.tensorflow.quantization.algorithm_entry", false], [302, "module-neural_compressor.tensorflow.quantization.autotune", false], [303, "module-neural_compressor.tensorflow.quantization.config", false], [304, "module-neural_compressor.tensorflow.quantization", false], [305, "module-neural_compressor.tensorflow.quantization.quantize", false], [306, "module-neural_compressor.tensorflow.quantization.utils.graph_converter", false], [307, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert", false], [308, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer", false], [309, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16", false], [310, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd", false], [311, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout", false], [312, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu", false], [313, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random", false], [314, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const", false], [315, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction", false], [316, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd", false], [317, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer", false], [318, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape", false], [319, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm", false], [320, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant", false], [321, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add", false], [322, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul", false], [323, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math", false], [324, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn", false], [325, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in", false], [326, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu", false], [327, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm", false], [328, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv", false], [329, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv", false], [330, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose", false], [331, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer", false], [332, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass", false], [333, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic", false], [334, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node", false], [335, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu", false], [336, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize", false], [337, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes", false], [338, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm", false], [339, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input", false], [340, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes", false], [341, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes", false], [342, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer", false], [343, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base", false], [344, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter", false], [345, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant", false], [346, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value", false], [347, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize", false], [348, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize", false], [349, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize", false], [350, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize", false], [351, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8", false], [352, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer", false], [353, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter", false], [354, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse", false], [355, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation", false], [356, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq", false], [357, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern", false], [358, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq", false], [359, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern", false], [360, "module-neural_compressor.tensorflow.quantization.utils.graph_util", false], [361, "module-neural_compressor.tensorflow.quantization.utils", false], [362, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph", false], [363, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn", false], [364, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2", false], [365, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv", false], [366, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv", false], [367, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in", false], [368, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul", false], [369, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling", false], [370, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq", false], [371, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq", false], [372, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base", false], [373, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn", false], [374, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2", false], [375, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv", false], [376, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu", false], [377, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul", false], [378, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling", false], [379, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph_common", false], [380, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction", false], [381, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base", false], [382, "module-neural_compressor.tensorflow.quantization.utils.transform_graph", false], [383, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging", false], [384, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat", false], [385, "module-neural_compressor.tensorflow.quantization.utils.utility", false], [386, "module-neural_compressor.tensorflow.utils.constants", false], [387, "module-neural_compressor.tensorflow.utils.data", false], [388, "module-neural_compressor.tensorflow.utils", false], [389, "module-neural_compressor.tensorflow.utils.model", false], [390, "module-neural_compressor.tensorflow.utils.model_wrappers", false], [391, "module-neural_compressor.tensorflow.utils.utility", false], [392, "module-neural_compressor.torch.algorithms.base_algorithm", false], [393, "module-neural_compressor.torch.algorithms.fp8_quant.utils.logger", false], [394, "module-neural_compressor.torch.algorithms", false], [395, "module-neural_compressor.torch.algorithms.layer_wise", false], [396, "module-neural_compressor.torch.algorithms.layer_wise.load", false], [397, "module-neural_compressor.torch.algorithms.layer_wise.modified_pickle", false], [398, "module-neural_compressor.torch.algorithms.layer_wise.utils", false], [399, "module-neural_compressor.torch.algorithms.mixed_precision.half_precision_convert", false], [400, "module-neural_compressor.torch.algorithms.mixed_precision", false], [401, "module-neural_compressor.torch.algorithms.mixed_precision.module_wrappers", false], [402, "module-neural_compressor.torch.algorithms.mx_quant", false], [403, "module-neural_compressor.torch.algorithms.mx_quant.mx", false], [404, "module-neural_compressor.torch.algorithms.mx_quant.utils", false], [405, "module-neural_compressor.torch.algorithms.pt2e_quant.core", false], [406, "module-neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter", false], [407, "module-neural_compressor.torch.algorithms.pt2e_quant", false], [408, "module-neural_compressor.torch.algorithms.pt2e_quant.save_load", false], [409, "module-neural_compressor.torch.algorithms.pt2e_quant.utility", false], [410, "module-neural_compressor.torch.algorithms.smooth_quant", false], [411, "module-neural_compressor.torch.algorithms.smooth_quant.save_load", false], [412, "module-neural_compressor.torch.algorithms.smooth_quant.smooth_quant", false], [413, "module-neural_compressor.torch.algorithms.smooth_quant.utility", false], [414, "module-neural_compressor.torch.algorithms.static_quant", false], [415, "module-neural_compressor.torch.algorithms.static_quant.save_load", false], [416, "module-neural_compressor.torch.algorithms.static_quant.static_quant", false], [417, "module-neural_compressor.torch.algorithms.static_quant.utility", false], [418, "module-neural_compressor.torch.algorithms.weight_only.autoround", false], [419, "module-neural_compressor.torch.algorithms.weight_only.awq", false], [420, "module-neural_compressor.torch.algorithms.weight_only.gptq", false], [421, "module-neural_compressor.torch.algorithms.weight_only.hqq.bitpack", false], [422, "module-neural_compressor.torch.algorithms.weight_only.hqq.config", false], [423, "module-neural_compressor.torch.algorithms.weight_only.hqq.core", false], [424, "module-neural_compressor.torch.algorithms.weight_only.hqq", false], [425, "module-neural_compressor.torch.algorithms.weight_only.hqq.optimizer", false], [426, "module-neural_compressor.torch.algorithms.weight_only.hqq.qtensor", false], [427, "module-neural_compressor.torch.algorithms.weight_only.hqq.quantizer", false], [428, "module-neural_compressor.torch.algorithms.weight_only", false], [429, "module-neural_compressor.torch.algorithms.weight_only.modules", false], [430, "module-neural_compressor.torch.algorithms.weight_only.rtn", false], [431, "module-neural_compressor.torch.algorithms.weight_only.save_load", false], [432, "module-neural_compressor.torch.algorithms.weight_only.teq", false], [433, "module-neural_compressor.torch.algorithms.weight_only.utility", false], [434, "module-neural_compressor.torch.export", false], [435, "module-neural_compressor.torch.export.pt2e_export", false], [436, "module-neural_compressor.torch", false], [437, "module-neural_compressor.torch.quantization.algorithm_entry", false], [438, "module-neural_compressor.torch.quantization.autotune", false], [439, "module-neural_compressor.torch.quantization.config", false], [440, "module-neural_compressor.torch.quantization", false], [441, "module-neural_compressor.torch.quantization.load_entry", false], [442, "module-neural_compressor.torch.quantization.quantize", false], [443, "module-neural_compressor.torch.utils.auto_accelerator", false], [444, "module-neural_compressor.torch.utils.bit_packer", false], [445, "module-neural_compressor.torch.utils.constants", false], [446, "module-neural_compressor.torch.utils.environ", false], [447, "module-neural_compressor.torch.utils", false], [448, "module-neural_compressor.torch.utils.utility", false], [449, "module-neural_compressor.training", false], [450, "module-neural_compressor.transformers.quantization.utils", false], [451, "module-neural_compressor.transformers.utils", false], [452, "module-neural_compressor.transformers.utils.quantization_config", false], [453, "module-neural_compressor.utils.collect_layer_histogram", false], [454, "module-neural_compressor.utils.constant", false], [455, "module-neural_compressor.utils.create_obj_from_config", false], [456, "module-neural_compressor.utils.export", false], [457, "module-neural_compressor.utils.export.qlinear2qdq", false], [458, "module-neural_compressor.utils.export.tf2onnx", false], [459, "module-neural_compressor.utils.export.torch2onnx", false], [460, "module-neural_compressor.utils", false], [461, "module-neural_compressor.utils.kl_divergence", false], [462, "module-neural_compressor.utils.load_huggingface", false], [463, "module-neural_compressor.utils.logger", false], [464, "module-neural_compressor.utils.options", false], [465, "module-neural_compressor.utils.pytorch", false], [466, "module-neural_compressor.utils.utility", false], [467, "module-neural_compressor.utils.weights_details", false], [468, "module-neural_compressor.version", false]], "module_debug_level1 (in module neural_compressor.template.api_doc_example)": [[281, "neural_compressor.template.api_doc_example.module_debug_level1", false]], "modules (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.modules", false]], "modules (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.modules", false]], "modules (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.modules", false]], "modules (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.modules", false]], "modules (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.modules", false]], "modules (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.modules", false]], "move_input_device() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.move_input_device", false]], "move_input_to_device() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.move_input_to_device", false]], "move_input_to_device() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.move_input_to_device", false]], "movesqueezeafterreluoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu)": [[63, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu.MoveSqueezeAfterReluOptimizer", false]], "movesqueezeafterreluoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu)": [[335, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu.MoveSqueezeAfterReluOptimizer", false]], "mse (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.MSE", false]], "mse (neural_compressor.metric.metric.rmse attribute)": [[234, "neural_compressor.metric.metric.RMSE.mse", false]], "mse_metric_gap() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.mse_metric_gap", false]], "mse_v2tunestrategy (class in neural_compressor.strategy.mse_v2)": [[272, "neural_compressor.strategy.mse_v2.MSE_V2TuneStrategy", false]], "msetunestrategy (class in neural_compressor.strategy.mse)": [[271, "neural_compressor.strategy.mse.MSETuneStrategy", false]], "mullinear (class in neural_compressor.adaptor.torch_utils.model_wrapper)": [[142, "neural_compressor.adaptor.torch_utils.model_wrapper.MulLinear", false]], "mullinear (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.MulLinear", false]], "multiobjective (class in neural_compressor.objective)": [[245, "neural_compressor.objective.MultiObjective", false]], "mx_quant_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.mx_quant_entry", false]], "mxlinear (class in neural_compressor.torch.algorithms.mx_quant.mx)": [[403, "neural_compressor.torch.algorithms.mx_quant.mx.MXLinear", false]], "mxnet (class in neural_compressor.config)": [[195, "neural_compressor.config.MXNet", false]], "mxnetcifar10 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.MXNetCIFAR10", false]], "mxnetcifar100 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.MXNetCIFAR100", false]], "mxnetcropresizetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.MXNetCropResizeTransform", false]], "mxnetcroptoboundingbox (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.MXNetCropToBoundingBox", false]], "mxnetdataloader (class in neural_compressor.data.dataloaders.mxnet_dataloader)": [[204, "neural_compressor.data.dataloaders.mxnet_dataloader.MXNetDataLoader", false]], "mxnetdatasets (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.MXNetDatasets", false]], "mxnetfashionmnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.MXNetFashionMNIST", false]], "mxnetfilters (class in neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.MXNetFilters", false]], "mxnetimagefolder (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.MXNetImageFolder", false]], "mxnetimagenetraw (class in neural_compressor.data.datasets.imagenet_dataset)": [[214, "neural_compressor.data.datasets.imagenet_dataset.MXNetImagenetRaw", false]], "mxnetmetrics (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.MXNetMetrics", false]], "mxnetmnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.MXNetMNIST", false]], "mxnetmodel (class in neural_compressor.model.mxnet_model)": [[240, "neural_compressor.model.mxnet_model.MXNetModel", false]], "mxnetnormalizetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.MXNetNormalizeTransform", false]], "mxnettransforms (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.MXNetTransforms", false]], "mxnettranspose (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.MXNetTranspose", false]], "mxquantconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.MXQuantConfig", false]], "mxquantizer (class in neural_compressor.torch.algorithms.mx_quant.mx)": [[403, "neural_compressor.torch.algorithms.mx_quant.mx.MXQuantizer", false]], "n (neural_compressor.compression.pruner.patterns.mha.patternmha attribute)": [[177, "neural_compressor.compression.pruner.patterns.mha.PatternMHA.N", false]], "n (neural_compressor.compression.pruner.patterns.ninm.pytorchpatternninm attribute)": [[178, "neural_compressor.compression.pruner.patterns.ninm.PytorchPatternNInM.N", false]], "name (neural_compressor.common.base_config.baseconfig attribute)": [[152, "neural_compressor.common.base_config.BaseConfig.name", false]], "namecollector (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.NameCollector", false]], "nasconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.NASConfig", false]], "nbits (neural_compressor.torch.algorithms.weight_only.hqq.qtensor.qtensormetainfo attribute)": [[426, "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensorMetaInfo.nbits", false]], "ndarray_to_device() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.ndarray_to_device", false]], "need_apply() (in module neural_compressor.tensorflow.quantization.quantize)": [[305, "neural_compressor.tensorflow.quantization.quantize.need_apply", false]], "need_apply() (in module neural_compressor.torch.quantization.quantize)": [[442, "neural_compressor.torch.quantization.quantize.need_apply", false]], "neural_compressor": [[226, "module-neural_compressor", false]], "neural_compressor.adaptor.mxnet_utils": [[0, "module-neural_compressor.adaptor.mxnet_utils", false]], "neural_compressor.adaptor.mxnet_utils.util": [[1, "module-neural_compressor.adaptor.mxnet_utils.util", false]], "neural_compressor.adaptor.ox_utils": [[4, "module-neural_compressor.adaptor.ox_utils", false]], "neural_compressor.adaptor.ox_utils.calibration": [[2, "module-neural_compressor.adaptor.ox_utils.calibration", false]], "neural_compressor.adaptor.ox_utils.calibrator": [[3, "module-neural_compressor.adaptor.ox_utils.calibrator", false]], "neural_compressor.adaptor.ox_utils.operators": [[16, "module-neural_compressor.adaptor.ox_utils.operators", false]], "neural_compressor.adaptor.ox_utils.operators.activation": [[5, "module-neural_compressor.adaptor.ox_utils.operators.activation", false]], "neural_compressor.adaptor.ox_utils.operators.argmax": [[6, "module-neural_compressor.adaptor.ox_utils.operators.argmax", false]], "neural_compressor.adaptor.ox_utils.operators.attention": [[7, "module-neural_compressor.adaptor.ox_utils.operators.attention", false]], "neural_compressor.adaptor.ox_utils.operators.binary_op": [[8, "module-neural_compressor.adaptor.ox_utils.operators.binary_op", false]], "neural_compressor.adaptor.ox_utils.operators.concat": [[9, "module-neural_compressor.adaptor.ox_utils.operators.concat", false]], "neural_compressor.adaptor.ox_utils.operators.conv": [[10, "module-neural_compressor.adaptor.ox_utils.operators.conv", false]], "neural_compressor.adaptor.ox_utils.operators.direct_q8": [[11, "module-neural_compressor.adaptor.ox_utils.operators.direct_q8", false]], "neural_compressor.adaptor.ox_utils.operators.embed_layernorm": [[12, "module-neural_compressor.adaptor.ox_utils.operators.embed_layernorm", false]], "neural_compressor.adaptor.ox_utils.operators.gather": [[13, "module-neural_compressor.adaptor.ox_utils.operators.gather", false]], "neural_compressor.adaptor.ox_utils.operators.gavgpool": [[14, "module-neural_compressor.adaptor.ox_utils.operators.gavgpool", false]], "neural_compressor.adaptor.ox_utils.operators.gemm": [[15, "module-neural_compressor.adaptor.ox_utils.operators.gemm", false]], "neural_compressor.adaptor.ox_utils.operators.lstm": [[17, "module-neural_compressor.adaptor.ox_utils.operators.lstm", false]], "neural_compressor.adaptor.ox_utils.operators.matmul": [[18, "module-neural_compressor.adaptor.ox_utils.operators.matmul", false]], "neural_compressor.adaptor.ox_utils.operators.maxpool": [[19, "module-neural_compressor.adaptor.ox_utils.operators.maxpool", false]], "neural_compressor.adaptor.ox_utils.operators.norm": [[20, "module-neural_compressor.adaptor.ox_utils.operators.norm", false]], "neural_compressor.adaptor.ox_utils.operators.ops": [[21, "module-neural_compressor.adaptor.ox_utils.operators.ops", false]], "neural_compressor.adaptor.ox_utils.operators.pad": [[22, "module-neural_compressor.adaptor.ox_utils.operators.pad", false]], "neural_compressor.adaptor.ox_utils.operators.pooling": [[23, "module-neural_compressor.adaptor.ox_utils.operators.pooling", false]], "neural_compressor.adaptor.ox_utils.operators.reduce": [[24, "module-neural_compressor.adaptor.ox_utils.operators.reduce", false]], "neural_compressor.adaptor.ox_utils.operators.resize": [[25, "module-neural_compressor.adaptor.ox_utils.operators.resize", false]], "neural_compressor.adaptor.ox_utils.operators.split": [[26, "module-neural_compressor.adaptor.ox_utils.operators.split", false]], "neural_compressor.adaptor.ox_utils.operators.unary_op": [[27, "module-neural_compressor.adaptor.ox_utils.operators.unary_op", false]], "neural_compressor.adaptor.ox_utils.quantizer": [[28, "module-neural_compressor.adaptor.ox_utils.quantizer", false]], "neural_compressor.adaptor.ox_utils.smooth_quant": [[29, "module-neural_compressor.adaptor.ox_utils.smooth_quant", false]], "neural_compressor.adaptor.ox_utils.util": [[30, "module-neural_compressor.adaptor.ox_utils.util", false]], "neural_compressor.adaptor.ox_utils.weight_only": [[31, "module-neural_compressor.adaptor.ox_utils.weight_only", false]], "neural_compressor.adaptor.tensorflow": [[32, "module-neural_compressor.adaptor.tensorflow", false]], "neural_compressor.adaptor.tf_utils": [[96, "module-neural_compressor.adaptor.tf_utils", false]], "neural_compressor.adaptor.tf_utils.graph_converter": [[33, "module-neural_compressor.adaptor.tf_utils.graph_converter", false]], "neural_compressor.adaptor.tf_utils.graph_converter_without_calib": [[34, "module-neural_compressor.adaptor.tf_utils.graph_converter_without_calib", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter": [[72, "module-neural_compressor.adaptor.tf_utils.graph_rewriter", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16": [[37, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert": [[35, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer": [[36, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic": [[61, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd": [[38, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout": [[39, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu": [[40, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random": [[41, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const": [[42, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction": [[43, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd": [[44, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer": [[45, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape": [[46, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm": [[47, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant": [[48, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add": [[49, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul": [[50, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math": [[51, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn": [[52, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in": [[53, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu": [[54, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm": [[55, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv": [[56, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv": [[57, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose": [[58, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer": [[59, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass": [[60, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node": [[62, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu": [[63, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize": [[64, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes": [[65, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm": [[66, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input": [[67, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes": [[68, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes": [[69, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer": [[70, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base": [[71, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8": [[80, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant": [[73, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value": [[74, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib": [[75, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize": [[76, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize": [[77, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize": [[78, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize": [[79, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer": [[81, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter": [[82, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse": [[83, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert": [[84, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation": [[85, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx": [[86, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph": [[87, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node": [[88, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema": [[89, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils": [[90, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq": [[91, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern": [[92, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq": [[93, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq", false]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern": [[94, "module-neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern", false]], "neural_compressor.adaptor.tf_utils.graph_util": [[95, "module-neural_compressor.adaptor.tf_utils.graph_util", false]], "neural_compressor.adaptor.tf_utils.quantize_graph": [[97, "module-neural_compressor.adaptor.tf_utils.quantize_graph", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat": [[99, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize": [[98, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config": [[100, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper": [[101, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers": [[102, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer": [[103, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add": [[104, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base": [[105, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn": [[106, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper": [[107, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq": [[115, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn": [[108, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2": [[109, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv": [[110, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv": [[111, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in": [[112, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul": [[113, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling": [[114, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq": [[116, "module-neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base": [[117, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn": [[118, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2": [[119, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv": [[120, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu": [[121, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul": [[122, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul", false]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling": [[123, "module-neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling", false]], "neural_compressor.adaptor.tf_utils.quantize_graph_common": [[124, "module-neural_compressor.adaptor.tf_utils.quantize_graph_common", false]], "neural_compressor.adaptor.tf_utils.smooth_quant_calibration": [[125, "module-neural_compressor.adaptor.tf_utils.smooth_quant_calibration", false]], "neural_compressor.adaptor.tf_utils.smooth_quant_scaler": [[126, "module-neural_compressor.adaptor.tf_utils.smooth_quant_scaler", false]], "neural_compressor.adaptor.tf_utils.tf2onnx_converter": [[127, "module-neural_compressor.adaptor.tf_utils.tf2onnx_converter", false]], "neural_compressor.adaptor.tf_utils.transform_graph": [[130, "module-neural_compressor.adaptor.tf_utils.transform_graph", false]], "neural_compressor.adaptor.tf_utils.transform_graph.bias_correction": [[128, "module-neural_compressor.adaptor.tf_utils.transform_graph.bias_correction", false]], "neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base": [[129, "module-neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base", false]], "neural_compressor.adaptor.tf_utils.transform_graph.insert_logging": [[131, "module-neural_compressor.adaptor.tf_utils.transform_graph.insert_logging", false]], "neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat": [[132, "module-neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat", false]], "neural_compressor.adaptor.tf_utils.util": [[133, "module-neural_compressor.adaptor.tf_utils.util", false]], "neural_compressor.adaptor.torch_utils": [[136, "module-neural_compressor.adaptor.torch_utils", false]], "neural_compressor.adaptor.torch_utils.bf16_convert": [[134, "module-neural_compressor.adaptor.torch_utils.bf16_convert", false]], "neural_compressor.adaptor.torch_utils.hawq_metric": [[135, "module-neural_compressor.adaptor.torch_utils.hawq_metric", false]], "neural_compressor.adaptor.torch_utils.layer_wise_quant": [[137, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant", false]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle": [[138, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle", false]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize": [[139, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize", false]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load": [[140, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load", false]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils": [[141, "module-neural_compressor.adaptor.torch_utils.layer_wise_quant.utils", false]], "neural_compressor.adaptor.torch_utils.model_wrapper": [[142, "module-neural_compressor.adaptor.torch_utils.model_wrapper", false]], "neural_compressor.adaptor.torch_utils.pattern_detector": [[143, "module-neural_compressor.adaptor.torch_utils.pattern_detector", false]], "neural_compressor.adaptor.torch_utils.symbolic_trace": [[144, "module-neural_compressor.adaptor.torch_utils.symbolic_trace", false]], "neural_compressor.adaptor.torch_utils.util": [[145, "module-neural_compressor.adaptor.torch_utils.util", false]], "neural_compressor.algorithm": [[148, "module-neural_compressor.algorithm", false]], "neural_compressor.algorithm.algorithm": [[146, "module-neural_compressor.algorithm.algorithm", false]], "neural_compressor.algorithm.fast_bias_correction": [[147, "module-neural_compressor.algorithm.fast_bias_correction", false]], "neural_compressor.algorithm.smooth_quant": [[149, "module-neural_compressor.algorithm.smooth_quant", false]], "neural_compressor.algorithm.weight_correction": [[150, "module-neural_compressor.algorithm.weight_correction", false]], "neural_compressor.benchmark": [[151, "module-neural_compressor.benchmark", false]], "neural_compressor.common": [[155, "module-neural_compressor.common", false]], "neural_compressor.common.base_config": [[152, "module-neural_compressor.common.base_config", false]], "neural_compressor.common.base_tuning": [[153, "module-neural_compressor.common.base_tuning", false]], "neural_compressor.common.benchmark": [[154, "module-neural_compressor.common.benchmark", false]], "neural_compressor.common.tuning_param": [[156, "module-neural_compressor.common.tuning_param", false]], "neural_compressor.common.utils": [[158, "module-neural_compressor.common.utils", false]], "neural_compressor.common.utils.constants": [[157, "module-neural_compressor.common.utils.constants", false]], "neural_compressor.common.utils.logger": [[159, "module-neural_compressor.common.utils.logger", false]], "neural_compressor.common.utils.save_load": [[160, "module-neural_compressor.common.utils.save_load", false]], "neural_compressor.common.utils.utility": [[161, "module-neural_compressor.common.utils.utility", false]], "neural_compressor.compression.callbacks": [[162, "module-neural_compressor.compression.callbacks", false]], "neural_compressor.compression.distillation": [[164, "module-neural_compressor.compression.distillation", false]], "neural_compressor.compression.distillation.criterions": [[163, "module-neural_compressor.compression.distillation.criterions", false]], "neural_compressor.compression.distillation.optimizers": [[165, "module-neural_compressor.compression.distillation.optimizers", false]], "neural_compressor.compression.distillation.utility": [[166, "module-neural_compressor.compression.distillation.utility", false]], "neural_compressor.compression.hpo": [[167, "module-neural_compressor.compression.hpo", false]], "neural_compressor.compression.hpo.sa_optimizer": [[168, "module-neural_compressor.compression.hpo.sa_optimizer", false]], "neural_compressor.compression.pruner": [[170, "module-neural_compressor.compression.pruner", false]], "neural_compressor.compression.pruner.criteria": [[169, "module-neural_compressor.compression.pruner.criteria", false]], "neural_compressor.compression.pruner.model_slim": [[172, "module-neural_compressor.compression.pruner.model_slim", false]], "neural_compressor.compression.pruner.model_slim.auto_slim": [[171, "module-neural_compressor.compression.pruner.model_slim.auto_slim", false]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer": [[173, "module-neural_compressor.compression.pruner.model_slim.pattern_analyzer", false]], "neural_compressor.compression.pruner.model_slim.weight_slim": [[174, "module-neural_compressor.compression.pruner.model_slim.weight_slim", false]], "neural_compressor.compression.pruner.patterns": [[176, "module-neural_compressor.compression.pruner.patterns", false]], "neural_compressor.compression.pruner.patterns.base": [[175, "module-neural_compressor.compression.pruner.patterns.base", false]], "neural_compressor.compression.pruner.patterns.mha": [[177, "module-neural_compressor.compression.pruner.patterns.mha", false]], "neural_compressor.compression.pruner.patterns.ninm": [[178, "module-neural_compressor.compression.pruner.patterns.ninm", false]], "neural_compressor.compression.pruner.patterns.nxm": [[179, "module-neural_compressor.compression.pruner.patterns.nxm", false]], "neural_compressor.compression.pruner.pruners": [[183, "module-neural_compressor.compression.pruner.pruners", false]], "neural_compressor.compression.pruner.pruners.base": [[180, "module-neural_compressor.compression.pruner.pruners.base", false]], "neural_compressor.compression.pruner.pruners.basic": [[181, "module-neural_compressor.compression.pruner.pruners.basic", false]], "neural_compressor.compression.pruner.pruners.block_mask": [[182, "module-neural_compressor.compression.pruner.pruners.block_mask", false]], "neural_compressor.compression.pruner.pruners.mha": [[184, "module-neural_compressor.compression.pruner.pruners.mha", false]], "neural_compressor.compression.pruner.pruners.pattern_lock": [[185, "module-neural_compressor.compression.pruner.pruners.pattern_lock", false]], "neural_compressor.compression.pruner.pruners.progressive": [[186, "module-neural_compressor.compression.pruner.pruners.progressive", false]], "neural_compressor.compression.pruner.pruners.retrain_free": [[187, "module-neural_compressor.compression.pruner.pruners.retrain_free", false]], "neural_compressor.compression.pruner.pruning": [[188, "module-neural_compressor.compression.pruner.pruning", false]], "neural_compressor.compression.pruner.regs": [[189, "module-neural_compressor.compression.pruner.regs", false]], "neural_compressor.compression.pruner.schedulers": [[190, "module-neural_compressor.compression.pruner.schedulers", false]], "neural_compressor.compression.pruner.tf_criteria": [[191, "module-neural_compressor.compression.pruner.tf_criteria", false]], "neural_compressor.compression.pruner.utils": [[192, "module-neural_compressor.compression.pruner.utils", false]], "neural_compressor.compression.pruner.wanda": [[193, "module-neural_compressor.compression.pruner.wanda", false]], "neural_compressor.compression.pruner.wanda.utils": [[194, "module-neural_compressor.compression.pruner.wanda.utils", false]], "neural_compressor.config": [[195, "module-neural_compressor.config", false]], "neural_compressor.contrib": [[196, "module-neural_compressor.contrib", false]], "neural_compressor.contrib.strategy": [[197, "module-neural_compressor.contrib.strategy", false]], "neural_compressor.contrib.strategy.sigopt": [[198, "module-neural_compressor.contrib.strategy.sigopt", false]], "neural_compressor.contrib.strategy.tpe": [[199, "module-neural_compressor.contrib.strategy.tpe", false]], "neural_compressor.data": [[220, "module-neural_compressor.data", false]], "neural_compressor.data.dataloaders.base_dataloader": [[200, "module-neural_compressor.data.dataloaders.base_dataloader", false]], "neural_compressor.data.dataloaders.dataloader": [[201, "module-neural_compressor.data.dataloaders.dataloader", false]], "neural_compressor.data.dataloaders.default_dataloader": [[202, "module-neural_compressor.data.dataloaders.default_dataloader", false]], "neural_compressor.data.dataloaders.fetcher": [[203, "module-neural_compressor.data.dataloaders.fetcher", false]], "neural_compressor.data.dataloaders.mxnet_dataloader": [[204, "module-neural_compressor.data.dataloaders.mxnet_dataloader", false]], "neural_compressor.data.dataloaders.onnxrt_dataloader": [[205, "module-neural_compressor.data.dataloaders.onnxrt_dataloader", false]], "neural_compressor.data.dataloaders.pytorch_dataloader": [[206, "module-neural_compressor.data.dataloaders.pytorch_dataloader", false]], "neural_compressor.data.dataloaders.sampler": [[207, "module-neural_compressor.data.dataloaders.sampler", false]], "neural_compressor.data.dataloaders.tensorflow_dataloader": [[208, "module-neural_compressor.data.dataloaders.tensorflow_dataloader", false]], "neural_compressor.data.datasets": [[215, "module-neural_compressor.data.datasets", false]], "neural_compressor.data.datasets.bert_dataset": [[209, "module-neural_compressor.data.datasets.bert_dataset", false]], "neural_compressor.data.datasets.coco_dataset": [[210, "module-neural_compressor.data.datasets.coco_dataset", false]], "neural_compressor.data.datasets.dataset": [[211, "module-neural_compressor.data.datasets.dataset", false]], "neural_compressor.data.datasets.dummy_dataset": [[212, "module-neural_compressor.data.datasets.dummy_dataset", false]], "neural_compressor.data.datasets.dummy_dataset_v2": [[213, "module-neural_compressor.data.datasets.dummy_dataset_v2", false]], "neural_compressor.data.datasets.imagenet_dataset": [[214, "module-neural_compressor.data.datasets.imagenet_dataset", false]], "neural_compressor.data.datasets.style_transfer_dataset": [[216, "module-neural_compressor.data.datasets.style_transfer_dataset", false]], "neural_compressor.data.filters": [[219, "module-neural_compressor.data.filters", false]], "neural_compressor.data.filters.coco_filter": [[217, "module-neural_compressor.data.filters.coco_filter", false]], "neural_compressor.data.filters.filter": [[218, "module-neural_compressor.data.filters.filter", false]], "neural_compressor.data.transforms": [[222, "module-neural_compressor.data.transforms", false]], "neural_compressor.data.transforms.imagenet_transform": [[221, "module-neural_compressor.data.transforms.imagenet_transform", false]], "neural_compressor.data.transforms.postprocess": [[223, "module-neural_compressor.data.transforms.postprocess", false]], "neural_compressor.data.transforms.tokenization": [[224, "module-neural_compressor.data.transforms.tokenization", false]], "neural_compressor.data.transforms.transform": [[225, "module-neural_compressor.data.transforms.transform", false]], "neural_compressor.metric": [[233, "module-neural_compressor.metric", false]], "neural_compressor.metric.bleu": [[227, "module-neural_compressor.metric.bleu", false]], "neural_compressor.metric.bleu_util": [[228, "module-neural_compressor.metric.bleu_util", false]], "neural_compressor.metric.coco_label_map": [[229, "module-neural_compressor.metric.coco_label_map", false]], "neural_compressor.metric.coco_tools": [[230, "module-neural_compressor.metric.coco_tools", false]], "neural_compressor.metric.evaluate_squad": [[231, "module-neural_compressor.metric.evaluate_squad", false]], "neural_compressor.metric.f1": [[232, "module-neural_compressor.metric.f1", false]], "neural_compressor.metric.metric": [[234, "module-neural_compressor.metric.metric", false]], "neural_compressor.mix_precision": [[235, "module-neural_compressor.mix_precision", false]], "neural_compressor.model": [[237, "module-neural_compressor.model", false]], "neural_compressor.model.base_model": [[236, "module-neural_compressor.model.base_model", false]], "neural_compressor.model.keras_model": [[238, "module-neural_compressor.model.keras_model", false]], "neural_compressor.model.model": [[239, "module-neural_compressor.model.model", false]], "neural_compressor.model.mxnet_model": [[240, "module-neural_compressor.model.mxnet_model", false]], "neural_compressor.model.nets_factory": [[241, "module-neural_compressor.model.nets_factory", false]], "neural_compressor.model.onnx_model": [[242, "module-neural_compressor.model.onnx_model", false]], "neural_compressor.model.tensorflow_model": [[243, "module-neural_compressor.model.tensorflow_model", false]], "neural_compressor.model.torch_model": [[244, "module-neural_compressor.model.torch_model", false]], "neural_compressor.objective": [[245, "module-neural_compressor.objective", false]], "neural_compressor.profiling": [[246, "module-neural_compressor.profiling", false]], "neural_compressor.profiling.parser.factory": [[247, "module-neural_compressor.profiling.parser.factory", false]], "neural_compressor.profiling.parser.onnx_parser.factory": [[248, "module-neural_compressor.profiling.parser.onnx_parser.factory", false]], "neural_compressor.profiling.parser.onnx_parser.parser": [[249, "module-neural_compressor.profiling.parser.onnx_parser.parser", false]], "neural_compressor.profiling.parser.parser": [[250, "module-neural_compressor.profiling.parser.parser", false]], "neural_compressor.profiling.parser.result": [[251, "module-neural_compressor.profiling.parser.result", false]], "neural_compressor.profiling.parser.tensorflow_parser.factory": [[252, "module-neural_compressor.profiling.parser.tensorflow_parser.factory", false]], "neural_compressor.profiling.parser.tensorflow_parser.parser": [[253, "module-neural_compressor.profiling.parser.tensorflow_parser.parser", false]], "neural_compressor.profiling.profiler.factory": [[254, "module-neural_compressor.profiling.profiler.factory", false]], "neural_compressor.profiling.profiler.onnxrt_profiler.factory": [[255, "module-neural_compressor.profiling.profiler.onnxrt_profiler.factory", false]], "neural_compressor.profiling.profiler.onnxrt_profiler.profiler": [[256, "module-neural_compressor.profiling.profiler.onnxrt_profiler.profiler", false]], "neural_compressor.profiling.profiler.onnxrt_profiler.utils": [[257, "module-neural_compressor.profiling.profiler.onnxrt_profiler.utils", false]], "neural_compressor.profiling.profiler.profiler": [[258, "module-neural_compressor.profiling.profiler.profiler", false]], "neural_compressor.profiling.profiler.tensorflow_profiler.factory": [[259, "module-neural_compressor.profiling.profiler.tensorflow_profiler.factory", false]], "neural_compressor.profiling.profiler.tensorflow_profiler.profiler": [[260, "module-neural_compressor.profiling.profiler.tensorflow_profiler.profiler", false]], "neural_compressor.profiling.profiler.tensorflow_profiler.utils": [[261, "module-neural_compressor.profiling.profiler.tensorflow_profiler.utils", false]], "neural_compressor.quantization": [[262, "module-neural_compressor.quantization", false]], "neural_compressor.strategy": [[270, "module-neural_compressor.strategy", false]], "neural_compressor.strategy.auto": [[263, "module-neural_compressor.strategy.auto", false]], "neural_compressor.strategy.auto_mixed_precision": [[264, "module-neural_compressor.strategy.auto_mixed_precision", false]], "neural_compressor.strategy.basic": [[265, "module-neural_compressor.strategy.basic", false]], "neural_compressor.strategy.bayesian": [[266, "module-neural_compressor.strategy.bayesian", false]], "neural_compressor.strategy.conservative": [[267, "module-neural_compressor.strategy.conservative", false]], "neural_compressor.strategy.exhaustive": [[268, "module-neural_compressor.strategy.exhaustive", false]], "neural_compressor.strategy.hawq_v2": [[269, "module-neural_compressor.strategy.hawq_v2", false]], "neural_compressor.strategy.mse": [[271, "module-neural_compressor.strategy.mse", false]], "neural_compressor.strategy.mse_v2": [[272, "module-neural_compressor.strategy.mse_v2", false]], "neural_compressor.strategy.random": [[273, "module-neural_compressor.strategy.random", false]], "neural_compressor.strategy.strategy": [[274, "module-neural_compressor.strategy.strategy", false]], "neural_compressor.strategy.utils": [[276, "module-neural_compressor.strategy.utils", false]], "neural_compressor.strategy.utils.constant": [[275, "module-neural_compressor.strategy.utils.constant", false]], "neural_compressor.strategy.utils.tuning_sampler": [[277, "module-neural_compressor.strategy.utils.tuning_sampler", false]], "neural_compressor.strategy.utils.tuning_space": [[278, "module-neural_compressor.strategy.utils.tuning_space", false]], "neural_compressor.strategy.utils.tuning_structs": [[279, "module-neural_compressor.strategy.utils.tuning_structs", false]], "neural_compressor.strategy.utils.utility": [[280, "module-neural_compressor.strategy.utils.utility", false]], "neural_compressor.template.api_doc_example": [[281, "module-neural_compressor.template.api_doc_example", false]], "neural_compressor.tensorflow": [[290, "module-neural_compressor.tensorflow", false]], "neural_compressor.tensorflow.algorithms": [[282, "module-neural_compressor.tensorflow.algorithms", false]], "neural_compressor.tensorflow.algorithms.smoother": [[285, "module-neural_compressor.tensorflow.algorithms.smoother", false]], "neural_compressor.tensorflow.algorithms.smoother.calibration": [[283, "module-neural_compressor.tensorflow.algorithms.smoother.calibration", false]], "neural_compressor.tensorflow.algorithms.smoother.core": [[284, "module-neural_compressor.tensorflow.algorithms.smoother.core", false]], "neural_compressor.tensorflow.algorithms.smoother.scaler": [[286, "module-neural_compressor.tensorflow.algorithms.smoother.scaler", false]], "neural_compressor.tensorflow.algorithms.static_quant": [[287, "module-neural_compressor.tensorflow.algorithms.static_quant", false]], "neural_compressor.tensorflow.algorithms.static_quant.keras": [[288, "module-neural_compressor.tensorflow.algorithms.static_quant.keras", false]], "neural_compressor.tensorflow.algorithms.static_quant.tensorflow": [[289, "module-neural_compressor.tensorflow.algorithms.static_quant.tensorflow", false]], "neural_compressor.tensorflow.keras": [[291, "module-neural_compressor.tensorflow.keras", false]], "neural_compressor.tensorflow.keras.layers": [[295, "module-neural_compressor.tensorflow.keras.layers", false]], "neural_compressor.tensorflow.keras.layers.conv2d": [[292, "module-neural_compressor.tensorflow.keras.layers.conv2d", false]], "neural_compressor.tensorflow.keras.layers.dense": [[293, "module-neural_compressor.tensorflow.keras.layers.dense", false]], "neural_compressor.tensorflow.keras.layers.depthwise_conv2d": [[294, "module-neural_compressor.tensorflow.keras.layers.depthwise_conv2d", false]], "neural_compressor.tensorflow.keras.layers.layer_initializer": [[296, "module-neural_compressor.tensorflow.keras.layers.layer_initializer", false]], "neural_compressor.tensorflow.keras.layers.pool2d": [[297, "module-neural_compressor.tensorflow.keras.layers.pool2d", false]], "neural_compressor.tensorflow.keras.layers.separable_conv2d": [[298, "module-neural_compressor.tensorflow.keras.layers.separable_conv2d", false]], "neural_compressor.tensorflow.keras.quantization": [[300, "module-neural_compressor.tensorflow.keras.quantization", false]], "neural_compressor.tensorflow.keras.quantization.config": [[299, "module-neural_compressor.tensorflow.keras.quantization.config", false]], "neural_compressor.tensorflow.quantization": [[304, "module-neural_compressor.tensorflow.quantization", false]], "neural_compressor.tensorflow.quantization.algorithm_entry": [[301, "module-neural_compressor.tensorflow.quantization.algorithm_entry", false]], "neural_compressor.tensorflow.quantization.autotune": [[302, "module-neural_compressor.tensorflow.quantization.autotune", false]], "neural_compressor.tensorflow.quantization.config": [[303, "module-neural_compressor.tensorflow.quantization.config", false]], "neural_compressor.tensorflow.quantization.quantize": [[305, "module-neural_compressor.tensorflow.quantization.quantize", false]], "neural_compressor.tensorflow.quantization.utils": [[361, "module-neural_compressor.tensorflow.quantization.utils", false]], "neural_compressor.tensorflow.quantization.utils.graph_converter": [[306, "module-neural_compressor.tensorflow.quantization.utils.graph_converter", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter": [[344, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16": [[309, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert": [[307, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer": [[308, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic": [[333, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd": [[310, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout": [[311, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu": [[312, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random": [[313, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const": [[314, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction": [[315, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd": [[316, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer": [[317, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape": [[318, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm": [[319, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant": [[320, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add": [[321, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul": [[322, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math": [[323, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn": [[324, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in": [[325, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu": [[326, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm": [[327, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv": [[328, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv": [[329, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose": [[330, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer": [[331, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass": [[332, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node": [[334, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu": [[335, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize": [[336, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes": [[337, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm": [[338, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input": [[339, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes": [[340, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes": [[341, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer": [[342, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base": [[343, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8": [[351, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant": [[345, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value": [[346, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize": [[347, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize": [[348, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize": [[349, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize": [[350, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer": [[352, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter": [[353, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse": [[354, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation": [[355, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq": [[356, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern": [[357, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq": [[358, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq", false]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern": [[359, "module-neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern", false]], "neural_compressor.tensorflow.quantization.utils.graph_util": [[360, "module-neural_compressor.tensorflow.quantization.utils.graph_util", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph": [[362, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq": [[370, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn": [[363, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2": [[364, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv": [[365, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv": [[366, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in": [[367, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul": [[368, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling": [[369, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq": [[371, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base": [[372, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn": [[373, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2": [[374, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv": [[375, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu": [[376, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul": [[377, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling": [[378, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling", false]], "neural_compressor.tensorflow.quantization.utils.quantize_graph_common": [[379, "module-neural_compressor.tensorflow.quantization.utils.quantize_graph_common", false]], "neural_compressor.tensorflow.quantization.utils.transform_graph": [[382, "module-neural_compressor.tensorflow.quantization.utils.transform_graph", false]], "neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction": [[380, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction", false]], "neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base": [[381, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base", false]], "neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging": [[383, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging", false]], "neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat": [[384, "module-neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat", false]], "neural_compressor.tensorflow.quantization.utils.utility": [[385, "module-neural_compressor.tensorflow.quantization.utils.utility", false]], "neural_compressor.tensorflow.utils": [[388, "module-neural_compressor.tensorflow.utils", false]], "neural_compressor.tensorflow.utils.constants": [[386, "module-neural_compressor.tensorflow.utils.constants", false]], "neural_compressor.tensorflow.utils.data": [[387, "module-neural_compressor.tensorflow.utils.data", false]], "neural_compressor.tensorflow.utils.model": [[389, "module-neural_compressor.tensorflow.utils.model", false]], "neural_compressor.tensorflow.utils.model_wrappers": [[390, "module-neural_compressor.tensorflow.utils.model_wrappers", false]], "neural_compressor.tensorflow.utils.utility": [[391, "module-neural_compressor.tensorflow.utils.utility", false]], "neural_compressor.torch": [[436, "module-neural_compressor.torch", false]], "neural_compressor.torch.algorithms": [[394, "module-neural_compressor.torch.algorithms", false]], "neural_compressor.torch.algorithms.base_algorithm": [[392, "module-neural_compressor.torch.algorithms.base_algorithm", false]], "neural_compressor.torch.algorithms.fp8_quant.utils.logger": [[393, "module-neural_compressor.torch.algorithms.fp8_quant.utils.logger", false]], "neural_compressor.torch.algorithms.layer_wise": [[395, "module-neural_compressor.torch.algorithms.layer_wise", false]], "neural_compressor.torch.algorithms.layer_wise.load": [[396, "module-neural_compressor.torch.algorithms.layer_wise.load", false]], "neural_compressor.torch.algorithms.layer_wise.modified_pickle": [[397, "module-neural_compressor.torch.algorithms.layer_wise.modified_pickle", false]], "neural_compressor.torch.algorithms.layer_wise.utils": [[398, "module-neural_compressor.torch.algorithms.layer_wise.utils", false]], "neural_compressor.torch.algorithms.mixed_precision": [[400, "module-neural_compressor.torch.algorithms.mixed_precision", false]], "neural_compressor.torch.algorithms.mixed_precision.half_precision_convert": [[399, "module-neural_compressor.torch.algorithms.mixed_precision.half_precision_convert", false]], "neural_compressor.torch.algorithms.mixed_precision.module_wrappers": [[401, "module-neural_compressor.torch.algorithms.mixed_precision.module_wrappers", false]], "neural_compressor.torch.algorithms.mx_quant": [[402, "module-neural_compressor.torch.algorithms.mx_quant", false]], "neural_compressor.torch.algorithms.mx_quant.mx": [[403, "module-neural_compressor.torch.algorithms.mx_quant.mx", false]], "neural_compressor.torch.algorithms.mx_quant.utils": [[404, "module-neural_compressor.torch.algorithms.mx_quant.utils", false]], "neural_compressor.torch.algorithms.pt2e_quant": [[407, "module-neural_compressor.torch.algorithms.pt2e_quant", false]], "neural_compressor.torch.algorithms.pt2e_quant.core": [[405, "module-neural_compressor.torch.algorithms.pt2e_quant.core", false]], "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter": [[406, "module-neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter", false]], "neural_compressor.torch.algorithms.pt2e_quant.save_load": [[408, "module-neural_compressor.torch.algorithms.pt2e_quant.save_load", false]], "neural_compressor.torch.algorithms.pt2e_quant.utility": [[409, "module-neural_compressor.torch.algorithms.pt2e_quant.utility", false]], "neural_compressor.torch.algorithms.smooth_quant": [[410, "module-neural_compressor.torch.algorithms.smooth_quant", false]], "neural_compressor.torch.algorithms.smooth_quant.save_load": [[411, "module-neural_compressor.torch.algorithms.smooth_quant.save_load", false]], "neural_compressor.torch.algorithms.smooth_quant.smooth_quant": [[412, "module-neural_compressor.torch.algorithms.smooth_quant.smooth_quant", false]], "neural_compressor.torch.algorithms.smooth_quant.utility": [[413, "module-neural_compressor.torch.algorithms.smooth_quant.utility", false]], "neural_compressor.torch.algorithms.static_quant": [[414, "module-neural_compressor.torch.algorithms.static_quant", false]], "neural_compressor.torch.algorithms.static_quant.save_load": [[415, "module-neural_compressor.torch.algorithms.static_quant.save_load", false]], "neural_compressor.torch.algorithms.static_quant.static_quant": [[416, "module-neural_compressor.torch.algorithms.static_quant.static_quant", false]], "neural_compressor.torch.algorithms.static_quant.utility": [[417, "module-neural_compressor.torch.algorithms.static_quant.utility", false]], "neural_compressor.torch.algorithms.weight_only": [[428, "module-neural_compressor.torch.algorithms.weight_only", false]], "neural_compressor.torch.algorithms.weight_only.autoround": [[418, "module-neural_compressor.torch.algorithms.weight_only.autoround", false]], "neural_compressor.torch.algorithms.weight_only.awq": [[419, "module-neural_compressor.torch.algorithms.weight_only.awq", false]], "neural_compressor.torch.algorithms.weight_only.gptq": [[420, "module-neural_compressor.torch.algorithms.weight_only.gptq", false]], "neural_compressor.torch.algorithms.weight_only.hqq": [[424, "module-neural_compressor.torch.algorithms.weight_only.hqq", false]], "neural_compressor.torch.algorithms.weight_only.hqq.bitpack": [[421, "module-neural_compressor.torch.algorithms.weight_only.hqq.bitpack", false]], "neural_compressor.torch.algorithms.weight_only.hqq.config": [[422, "module-neural_compressor.torch.algorithms.weight_only.hqq.config", false]], "neural_compressor.torch.algorithms.weight_only.hqq.core": [[423, "module-neural_compressor.torch.algorithms.weight_only.hqq.core", false]], "neural_compressor.torch.algorithms.weight_only.hqq.optimizer": [[425, "module-neural_compressor.torch.algorithms.weight_only.hqq.optimizer", false]], "neural_compressor.torch.algorithms.weight_only.hqq.qtensor": [[426, "module-neural_compressor.torch.algorithms.weight_only.hqq.qtensor", false]], "neural_compressor.torch.algorithms.weight_only.hqq.quantizer": [[427, "module-neural_compressor.torch.algorithms.weight_only.hqq.quantizer", false]], "neural_compressor.torch.algorithms.weight_only.modules": [[429, "module-neural_compressor.torch.algorithms.weight_only.modules", false]], "neural_compressor.torch.algorithms.weight_only.rtn": [[430, "module-neural_compressor.torch.algorithms.weight_only.rtn", false]], "neural_compressor.torch.algorithms.weight_only.save_load": [[431, "module-neural_compressor.torch.algorithms.weight_only.save_load", false]], "neural_compressor.torch.algorithms.weight_only.teq": [[432, "module-neural_compressor.torch.algorithms.weight_only.teq", false]], "neural_compressor.torch.algorithms.weight_only.utility": [[433, "module-neural_compressor.torch.algorithms.weight_only.utility", false]], "neural_compressor.torch.export": [[434, "module-neural_compressor.torch.export", false]], "neural_compressor.torch.export.pt2e_export": [[435, "module-neural_compressor.torch.export.pt2e_export", false]], "neural_compressor.torch.quantization": [[440, "module-neural_compressor.torch.quantization", false]], "neural_compressor.torch.quantization.algorithm_entry": [[437, "module-neural_compressor.torch.quantization.algorithm_entry", false]], "neural_compressor.torch.quantization.autotune": [[438, "module-neural_compressor.torch.quantization.autotune", false]], "neural_compressor.torch.quantization.config": [[439, "module-neural_compressor.torch.quantization.config", false]], "neural_compressor.torch.quantization.load_entry": [[441, "module-neural_compressor.torch.quantization.load_entry", false]], "neural_compressor.torch.quantization.quantize": [[442, "module-neural_compressor.torch.quantization.quantize", false]], "neural_compressor.torch.utils": [[447, "module-neural_compressor.torch.utils", false]], "neural_compressor.torch.utils.auto_accelerator": [[443, "module-neural_compressor.torch.utils.auto_accelerator", false]], "neural_compressor.torch.utils.bit_packer": [[444, "module-neural_compressor.torch.utils.bit_packer", false]], "neural_compressor.torch.utils.constants": [[445, "module-neural_compressor.torch.utils.constants", false]], "neural_compressor.torch.utils.environ": [[446, "module-neural_compressor.torch.utils.environ", false]], "neural_compressor.torch.utils.utility": [[448, "module-neural_compressor.torch.utils.utility", false]], "neural_compressor.training": [[449, "module-neural_compressor.training", false]], "neural_compressor.transformers.quantization.utils": [[450, "module-neural_compressor.transformers.quantization.utils", false]], "neural_compressor.transformers.utils": [[451, "module-neural_compressor.transformers.utils", false]], "neural_compressor.transformers.utils.quantization_config": [[452, "module-neural_compressor.transformers.utils.quantization_config", false]], "neural_compressor.utils": [[460, "module-neural_compressor.utils", false]], "neural_compressor.utils.collect_layer_histogram": [[453, "module-neural_compressor.utils.collect_layer_histogram", false]], "neural_compressor.utils.constant": [[454, "module-neural_compressor.utils.constant", false]], "neural_compressor.utils.create_obj_from_config": [[455, "module-neural_compressor.utils.create_obj_from_config", false]], "neural_compressor.utils.export": [[456, "module-neural_compressor.utils.export", false]], "neural_compressor.utils.export.qlinear2qdq": [[457, "module-neural_compressor.utils.export.qlinear2qdq", false]], "neural_compressor.utils.export.tf2onnx": [[458, "module-neural_compressor.utils.export.tf2onnx", false]], "neural_compressor.utils.export.torch2onnx": [[459, "module-neural_compressor.utils.export.torch2onnx", false]], "neural_compressor.utils.kl_divergence": [[461, "module-neural_compressor.utils.kl_divergence", false]], "neural_compressor.utils.load_huggingface": [[462, "module-neural_compressor.utils.load_huggingface", false]], "neural_compressor.utils.logger": [[463, "module-neural_compressor.utils.logger", false]], "neural_compressor.utils.options": [[464, "module-neural_compressor.utils.options", false]], "neural_compressor.utils.pytorch": [[465, "module-neural_compressor.utils.pytorch", false]], "neural_compressor.utils.utility": [[466, "module-neural_compressor.utils.utility", false]], "neural_compressor.utils.weights_details": [[467, "module-neural_compressor.utils.weights_details", false]], "neural_compressor.version": [[468, "module-neural_compressor.version", false]], "node_collector (class in neural_compressor.adaptor.torch_utils.hawq_metric)": [[135, "neural_compressor.adaptor.torch_utils.hawq_metric.Node_collector", false]], "node_from_map() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn)": [[52, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn.node_from_map", false]], "node_from_map() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in)": [[53, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in.node_from_map", false]], "node_from_map() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm)": [[55, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm.node_from_map", false]], "node_from_map() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn)": [[324, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn.node_from_map", false]], "node_from_map() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in)": [[325, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in.node_from_map", false]], "node_from_map() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm)": [[327, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm.node_from_map", false]], "node_name_from_input() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn)": [[52, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn.node_name_from_input", false]], "node_name_from_input() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in)": [[53, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in.node_name_from_input", false]], "node_name_from_input() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm)": [[55, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm.node_name_from_input", false]], "node_name_from_input() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn)": [[324, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn.node_name_from_input", false]], "node_name_from_input() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in)": [[325, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in.node_name_from_input", false]], "node_name_from_input() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm)": [[327, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm.node_name_from_input", false]], "nondigit_punct_re (neural_compressor.metric.bleu.unicoderegex attribute)": [[227, "neural_compressor.metric.bleu.UnicodeRegex.nondigit_punct_re", false]], "normalizationoperator (class in neural_compressor.adaptor.ox_utils.operators.norm)": [[20, "neural_compressor.adaptor.ox_utils.operators.norm.NormalizationOperator", false]], "normalize_answer() (in module neural_compressor.metric.f1)": [[232, "neural_compressor.metric.f1.normalize_answer", false]], "normalizetftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.NormalizeTFTransform", false]], "normalizetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.NormalizeTransform", false]], "num_correct (neural_compressor.metric.metric.generaltopk attribute)": [[234, "neural_compressor.metric.metric.GeneralTopK.num_correct", false]], "num_correct (neural_compressor.metric.metric.tensorflowtopk attribute)": [[234, "neural_compressor.metric.metric.TensorflowTopK.num_correct", false]], "num_sample (neural_compressor.metric.metric.generaltopk attribute)": [[234, "neural_compressor.metric.metric.GeneralTopK.num_sample", false]], "num_sample (neural_compressor.metric.metric.tensorflowtopk attribute)": [[234, "neural_compressor.metric.metric.TensorflowTopK.num_sample", false]], "objective (class in neural_compressor.objective)": [[245, "neural_compressor.objective.Objective", false]], "objective_custom_registry() (in module neural_compressor.objective)": [[245, "neural_compressor.objective.objective_custom_registry", false]], "objective_registry() (in module neural_compressor.objective)": [[245, "neural_compressor.objective.objective_registry", false]], "oneshotscheduler (class in neural_compressor.compression.pruner.schedulers)": [[190, "neural_compressor.compression.pruner.schedulers.OneshotScheduler", false]], "onnx (class in neural_compressor.config)": [[195, "neural_compressor.config.ONNX", false]], "onnx_qlinear_to_qdq() (in module neural_compressor.utils.export.qlinear2qdq)": [[457, "neural_compressor.utils.export.qlinear2qdq.onnx_qlinear_to_qdq", false]], "onnxbilinearimagenettransform (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.OnnxBilinearImagenetTransform", false]], "onnxgraph (class in neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph)": [[87, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph.OnnxGraph", false]], "onnxmodel (class in neural_compressor.model.onnx_model)": [[242, "neural_compressor.model.onnx_model.ONNXModel", false]], "onnxnode (class in neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node)": [[88, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node.OnnxNode", false]], "onnxopschema (class in neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema)": [[89, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema.OnnxOpSchema", false]], "onnxprofilingparser (class in neural_compressor.profiling.parser.onnx_parser.parser)": [[249, "neural_compressor.profiling.parser.onnx_parser.parser.OnnxProfilingParser", false]], "onnxqlinear2qdqconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.ONNXQlinear2QDQConfig", false]], "onnxresizecropimagenettransform (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.ONNXResizeCropImagenetTransform", false]], "onnxrt (class in neural_compressor.utils.options)": [[464, "neural_compressor.utils.options.onnxrt", false]], "onnxrtaugment (class in neural_compressor.adaptor.ox_utils.calibration)": [[2, "neural_compressor.adaptor.ox_utils.calibration.ONNXRTAugment", false]], "onnxrtbertdataloader (class in neural_compressor.data.dataloaders.onnxrt_dataloader)": [[205, "neural_compressor.data.dataloaders.onnxrt_dataloader.ONNXRTBertDataLoader", false]], "onnxrtbertdataset (class in neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.ONNXRTBertDataset", false]], "onnxrtcroptoboundingbox (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ONNXRTCropToBoundingBox", false]], "onnxrtdataloader (class in neural_compressor.data.dataloaders.onnxrt_dataloader)": [[205, "neural_compressor.data.dataloaders.onnxrt_dataloader.ONNXRTDataLoader", false]], "onnxrtglue (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.ONNXRTGLUE", false]], "onnxrtimagenetdataset (class in neural_compressor.data.datasets.imagenet_dataset)": [[214, "neural_compressor.data.datasets.imagenet_dataset.ONNXRTImagenetDataset", false]], "onnxrtitdatasets (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.ONNXRTITDatasets", false]], "onnxrtitfilters (class in neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.ONNXRTITFilters", false]], "onnxrtitmetrics (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.ONNXRTITMetrics", false]], "onnxrtittransforms (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ONNXRTITTransforms", false]], "onnxrtparserfactory (class in neural_compressor.profiling.parser.onnx_parser.factory)": [[248, "neural_compressor.profiling.parser.onnx_parser.factory.OnnxrtParserFactory", false]], "onnxrtqldatasets (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.ONNXRTQLDatasets", false]], "onnxrtqlfilters (class in neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.ONNXRTQLFilters", false]], "onnxrtqlmetrics (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.ONNXRTQLMetrics", false]], "onnxrtqltransforms (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ONNXRTQLTransforms", false]], "op_level (neural_compressor.common.tuning_param.paramlevel attribute)": [[156, "neural_compressor.common.tuning_param.ParamLevel.OP_LEVEL", false]], "op_registry() (in module neural_compressor.adaptor.ox_utils.operators.ops)": [[21, "neural_compressor.adaptor.ox_utils.operators.ops.op_registry", false]], "op_type_level (neural_compressor.common.tuning_param.paramlevel attribute)": [[156, "neural_compressor.common.tuning_param.ParamLevel.OP_TYPE_LEVEL", false]], "opentry (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.OpEntry", false]], "operator (class in neural_compressor.adaptor.ox_utils.operators.ops)": [[21, "neural_compressor.adaptor.ox_utils.operators.ops.Operator", false]], "operatorconfig (class in neural_compressor.tensorflow.keras.quantization.config)": [[299, "neural_compressor.tensorflow.keras.quantization.config.OperatorConfig", false]], "operatorconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.OperatorConfig", false]], "optimize_weights_proximal_legacy() (in module neural_compressor.torch.algorithms.weight_only.hqq.optimizer)": [[425, "neural_compressor.torch.algorithms.weight_only.hqq.optimizer.optimize_weights_proximal_legacy", false]], "optimizedmodel (class in neural_compressor.utils.load_huggingface)": [[462, "neural_compressor.utils.load_huggingface.OptimizedModel", false]], "optimizeqdqgraph (class in neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq)": [[116, "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq.OptimizeQDQGraph", false]], "optimizeqdqgraph (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq)": [[371, "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq.OptimizeQDQGraph", false]], "optimizer_registry() (in module neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.optimizer_registry", false]], "optimizers (class in neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.Optimizers", false]], "options (class in neural_compressor.config)": [[195, "neural_compressor.config.Options", false]], "optuningconfig (class in neural_compressor.strategy.utils.tuning_structs)": [[279, "neural_compressor.strategy.utils.tuning_structs.OpTuningConfig", false]], "optype (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.OpType", false]], "optypewisetuningsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.OpTypeWiseTuningSampler", false]], "opwisetuningsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.OpWiseTuningSampler", false]], "ordereddefaultdict (class in neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.OrderedDefaultDict", false]], "ortsmoothquant (class in neural_compressor.adaptor.ox_utils.smooth_quant)": [[29, "neural_compressor.adaptor.ox_utils.smooth_quant.ORTSmoothQuant", false]], "pack_array_with_numba_b2_c16() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b2_c16", false]], "pack_array_with_numba_b2_c32() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b2_c32", false]], "pack_array_with_numba_b2_c64() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b2_c64", false]], "pack_array_with_numba_b2_c8() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b2_c8", false]], "pack_array_with_numba_b4_c16() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b4_c16", false]], "pack_array_with_numba_b4_c32() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b4_c32", false]], "pack_array_with_numba_b4_c64() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b4_c64", false]], "pack_array_with_numba_b4_c8() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b4_c8", false]], "pack_array_with_numba_b8_c16() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b8_c16", false]], "pack_array_with_numba_b8_c32() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b8_c32", false]], "pack_array_with_numba_b8_c64() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b8_c64", false]], "pack_array_with_numba_b8_c8() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.pack_array_with_numba_b8_c8", false]], "packer (class in neural_compressor.torch.algorithms.weight_only.hqq.bitpack)": [[421, "neural_compressor.torch.algorithms.weight_only.hqq.bitpack.Packer", false]], "packing (neural_compressor.torch.algorithms.weight_only.hqq.qtensor.qtensormetainfo attribute)": [[426, "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensorMetaInfo.packing", false]], "pad_tensor() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.pad_tensor", false]], "paddedcentercroptransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PaddedCenterCropTransform", false]], "padoperator (class in neural_compressor.adaptor.ox_utils.operators.pad)": [[22, "neural_compressor.adaptor.ox_utils.operators.pad.PadOperator", false]], "paramlevel (class in neural_compressor.common.tuning_param)": [[156, "neural_compressor.common.tuning_param.ParamLevel", false]], "params_list (neural_compressor.common.base_config.baseconfig attribute)": [[152, "neural_compressor.common.base_config.BaseConfig.params_list", false]], "parse_auto_slim_config() (in module neural_compressor.compression.pruner.model_slim.auto_slim)": [[171, "neural_compressor.compression.pruner.model_slim.auto_slim.parse_auto_slim_config", false]], "parse_cfgs() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.parse_cfgs", false]], "parse_last_linear() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.parse_last_linear", false]], "parse_last_linear_tf() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.parse_last_linear_tf", false]], "parse_saved_model() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.parse_saved_model", false]], "parse_saved_model() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.parse_saved_model", false]], "parse_str2list() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.parse_str2list", false]], "parse_to_prune() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.parse_to_prune", false]], "parse_to_prune_tf() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.parse_to_prune_tf", false]], "parse_tune_config() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.parse_tune_config", false]], "parse_valid_pruner_types() (in module neural_compressor.compression.pruner.pruners)": [[183, "neural_compressor.compression.pruner.pruners.parse_valid_pruner_types", false]], "parsedecodebert (class in neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.ParseDecodeBert", false]], "parsedecodecoco (class in neural_compressor.data.datasets.coco_dataset)": [[210, "neural_compressor.data.datasets.coco_dataset.ParseDecodeCoco", false]], "parsedecodeimagenet (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.ParseDecodeImagenet", false]], "parsedecodeimagenettransform (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.ParseDecodeImagenetTransform", false]], "parsedecodevoctransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ParseDecodeVocTransform", false]], "parserfactory (class in neural_compressor.profiling.parser.factory)": [[247, "neural_compressor.profiling.parser.factory.ParserFactory", false]], "paser_cfgs() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.paser_cfgs", false]], "patch_hqq_moduile() (in module neural_compressor.torch.algorithms.weight_only.hqq.quantizer)": [[427, "neural_compressor.torch.algorithms.weight_only.hqq.quantizer.patch_hqq_moduile", false]], "pattern (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.pattern", false]], "pattern (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.pattern", false]], "pattern (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.pattern", false]], "pattern (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.pattern", false]], "pattern (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.pattern", false]], "pattern (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.pattern", false]], "pattern (neural_compressor.compression.pruner.pruners.basic.kerasbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.KerasBasicPruner.pattern", false]], "pattern (neural_compressor.compression.pruner.pruners.basic.pytorchbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.PytorchBasicPruner.pattern", false]], "pattern (neural_compressor.compression.pruner.pruners.block_mask.pytorchblockmaskpruner attribute)": [[182, "neural_compressor.compression.pruner.pruners.block_mask.PytorchBlockMaskPruner.pattern", false]], "pattern (neural_compressor.compression.pruner.pruners.retrain_free.pytorchretrainfreepruner attribute)": [[187, "neural_compressor.compression.pruner.pruners.retrain_free.PytorchRetrainFreePruner.pattern", false]], "pattern_factory() (in module neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.pattern_factory", false]], "pattern_to_internal() (in module neural_compressor.strategy.utils.tuning_space)": [[278, "neural_compressor.strategy.utils.tuning_space.pattern_to_internal", false]], "pattern_to_path() (in module neural_compressor.strategy.utils.tuning_space)": [[278, "neural_compressor.strategy.utils.tuning_space.pattern_to_path", false]], "patternmha (class in neural_compressor.compression.pruner.patterns.mha)": [[177, "neural_compressor.compression.pruner.patterns.mha.PatternMHA", false]], "patternpair (class in neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.PatternPair", false]], "percentilecalibrator (class in neural_compressor.adaptor.ox_utils.calibrator)": [[3, "neural_compressor.adaptor.ox_utils.calibrator.PercentileCalibrator", false]], "performance (class in neural_compressor.objective)": [[245, "neural_compressor.objective.Performance", false]], "pickleerror": [[138, "neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle.PickleError", false], [397, "neural_compressor.torch.algorithms.layer_wise.modified_pickle.PickleError", false]], "picklingerror": [[138, "neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle.PicklingError", false], [397, "neural_compressor.torch.algorithms.layer_wise.modified_pickle.PicklingError", false]], "pooloperator (class in neural_compressor.adaptor.ox_utils.operators.pooling)": [[23, "neural_compressor.adaptor.ox_utils.operators.pooling.PoolOperator", false]], "postcompressionutils (class in neural_compressor.compression.pruner.model_slim.weight_slim)": [[174, "neural_compressor.compression.pruner.model_slim.weight_slim.PostCompressionUtils", false]], "postcseoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse)": [[83, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse.PostCseOptimizer", false]], "postcseoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse)": [[354, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse.PostCseOptimizer", false]], "posthostconstconverter (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter)": [[82, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter.PostHostConstConverter", false]], "posthostconstconverter (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter)": [[353, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter.PostHostConstConverter", false]], "postprocess (class in neural_compressor.data.transforms.postprocess)": [[223, "neural_compressor.data.transforms.postprocess.Postprocess", false]], "postprocess_model() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.postprocess_model", false]], "posttrainingquantconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.PostTrainingQuantConfig", false]], "pred_list (neural_compressor.metric.metric.accuracy attribute)": [[234, "neural_compressor.metric.metric.Accuracy.pred_list", false]], "pred_list (neural_compressor.metric.metric.mae attribute)": [[234, "neural_compressor.metric.metric.MAE.pred_list", false]], "pred_list (neural_compressor.metric.metric.mse attribute)": [[234, "neural_compressor.metric.metric.MSE.pred_list", false]], "predictions (neural_compressor.metric.bleu.bleu attribute)": [[227, "neural_compressor.metric.bleu.BLEU.predictions", false]], "preoptimization (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize)": [[64, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize.PreOptimization", false]], "preoptimization (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize)": [[336, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize.PreOptimization", false]], "prepare() (in module neural_compressor.torch.quantization.quantize)": [[442, "neural_compressor.torch.quantization.quantize.prepare", false]], "prepare_compression() (in module neural_compressor.training)": [[449, "neural_compressor.training.prepare_compression", false]], "prepare_dataloader() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.prepare_dataloader", false]], "prepare_inputs() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.prepare_inputs", false]], "prepare_model() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.prepare_model", false]], "prepare_model_data() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.prepare_model_data", false]], "prepare_pruning() (in module neural_compressor.compression.pruner)": [[170, "neural_compressor.compression.pruner.prepare_pruning", false]], "preprocess_user_cfg() (in module neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.preprocess_user_cfg", false]], "print_iterables() (in module neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.print_iterables", false]], "print_op_list() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.print_op_list", false]], "print_table() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.print_table", false]], "process_and_check_config() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.process_and_check_config", false]], "process_config() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.process_config", false]], "process_weight_config() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.process_weight_config", false]], "process_yaml_config() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.process_yaml_config", false]], "processortype (class in neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.ProcessorType", false]], "profile() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.profile", false]], "profiler (class in neural_compressor.profiling.profiler.onnxrt_profiler.profiler)": [[256, "neural_compressor.profiling.profiler.onnxrt_profiler.profiler.Profiler", false]], "profiler (class in neural_compressor.profiling.profiler.profiler)": [[258, "neural_compressor.profiling.profiler.profiler.Profiler", false]], "profiler (class in neural_compressor.profiling.profiler.tensorflow_profiler.profiler)": [[260, "neural_compressor.profiling.profiler.tensorflow_profiler.profiler.Profiler", false]], "profilerfactory (class in neural_compressor.profiling.profiler.factory)": [[254, "neural_compressor.profiling.profiler.factory.ProfilerFactory", false]], "profilerfactory (class in neural_compressor.profiling.profiler.onnxrt_profiler.factory)": [[255, "neural_compressor.profiling.profiler.onnxrt_profiler.factory.ProfilerFactory", false]], "profilerfactory (class in neural_compressor.profiling.profiler.tensorflow_profiler.factory)": [[259, "neural_compressor.profiling.profiler.tensorflow_profiler.factory.ProfilerFactory", false]], "profilingparser (class in neural_compressor.profiling.parser.parser)": [[250, "neural_compressor.profiling.parser.parser.ProfilingParser", false]], "profilingresult (class in neural_compressor.profiling.parser.result)": [[251, "neural_compressor.profiling.parser.result.ProfilingResult", false]], "pruner_info (neural_compressor.compression.pruner.pruning.basepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasePruning.pruner_info", false]], "pruner_info (neural_compressor.compression.pruner.pruning.basicpruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasicPruning.pruner_info", false]], "pruner_info (neural_compressor.compression.pruner.pruning.retrainfreepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.RetrainFreePruning.pruner_info", false]], "pruners (neural_compressor.compression.pruner.pruning.basepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasePruning.pruners", false]], "pruners (neural_compressor.compression.pruner.pruning.basicpruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.BasicPruning.pruners", false]], "pruners (neural_compressor.compression.pruner.pruning.retrainfreepruning attribute)": [[188, "neural_compressor.compression.pruner.pruning.RetrainFreePruning.pruners", false]], "pruning_frequency (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.pruning_frequency", false]], "pruning_frequency (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.pruning_frequency", false]], "pruning_frequency (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.pruning_frequency", false]], "pruningcallbacks (class in neural_compressor.compression.callbacks)": [[162, "neural_compressor.compression.callbacks.PruningCallbacks", false]], "pruningcriterion (class in neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.PruningCriterion", false]], "pruningcriterion (class in neural_compressor.compression.pruner.tf_criteria)": [[191, "neural_compressor.compression.pruner.tf_criteria.PruningCriterion", false]], "pruningscheduler (class in neural_compressor.compression.pruner.schedulers)": [[190, "neural_compressor.compression.pruner.schedulers.PruningScheduler", false]], "pt2e_dynamic_quant_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.pt2e_dynamic_quant_entry", false]], "pt2e_static_quant_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.pt2e_static_quant_entry", false]], "punct_nondigit_re (neural_compressor.metric.bleu.unicoderegex attribute)": [[227, "neural_compressor.metric.bleu.UnicodeRegex.punct_nondigit_re", false]], "pythonmultiheadattentionpruner (class in neural_compressor.compression.pruner.pruners.mha)": [[184, "neural_compressor.compression.pruner.pruners.mha.PythonMultiheadAttentionPruner", false]], "pytorch (class in neural_compressor.config)": [[195, "neural_compressor.config.PyTorch", false]], "pytorchalignimagechannel (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PyTorchAlignImageChannel", false]], "pytorchbasemodel (class in neural_compressor.model.torch_model)": [[244, "neural_compressor.model.torch_model.PyTorchBaseModel", false]], "pytorchbasepattern (class in neural_compressor.compression.pruner.patterns.base)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern", false]], "pytorchbasepruner (class in neural_compressor.compression.pruner.pruners.base)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner", false]], "pytorchbasicpruner (class in neural_compressor.compression.pruner.pruners.basic)": [[181, "neural_compressor.compression.pruner.pruners.basic.PytorchBasicPruner", false]], "pytorchbertdataset (class in neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.PytorchBertDataset", false]], "pytorchblockmaskpruner (class in neural_compressor.compression.pruner.pruners.block_mask)": [[182, "neural_compressor.compression.pruner.pruners.block_mask.PytorchBlockMaskPruner", false]], "pytorchcifar10 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.PytorchCIFAR10", false]], "pytorchcifar100 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.PytorchCIFAR100", false]], "pytorchcriterions (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchCriterions", false]], "pytorchcropresizetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PyTorchCropResizeTransform", false]], "pytorchcrossentropyloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchCrossEntropyLoss", false]], "pytorchdataloader (class in neural_compressor.data.dataloaders.pytorch_dataloader)": [[206, "neural_compressor.data.dataloaders.pytorch_dataloader.PyTorchDataLoader", false]], "pytorchdatasets (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.PyTorchDatasets", false]], "pytorchfashionmnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.PytorchFashionMNIST", false]], "pytorchfilters (class in neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.PyTorchFilters", false]], "pytorchfxmodel (class in neural_compressor.model.torch_model)": [[244, "neural_compressor.model.torch_model.PyTorchFXModel", false]], "pytorchimagenetraw (class in neural_compressor.data.datasets.imagenet_dataset)": [[214, "neural_compressor.data.datasets.imagenet_dataset.PytorchImagenetRaw", false]], "pytorchintermediatelayersknowledgedistillationloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchIntermediateLayersKnowledgeDistillationLoss", false]], "pytorchintermediatelayersknowledgedistillationlosswrapper (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchIntermediateLayersKnowledgeDistillationLossWrapper", false]], "pytorchknowledgedistillationloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchKnowledgeDistillationLoss", false]], "pytorchknowledgedistillationlosswrapper (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchKnowledgeDistillationLossWrapper", false]], "pytorchloss (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.PyTorchLoss", false]], "pytorchmetrics (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.PyTorchMetrics", false]], "pytorchmnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.PytorchMNIST", false]], "pytorchmodel (class in neural_compressor.model.torch_model)": [[244, "neural_compressor.model.torch_model.PyTorchModel", false]], "pytorchmxnettransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PytorchMxnetTransform", false]], "pytorchmxnetwrapdataset (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.PytorchMxnetWrapDataset", false]], "pytorchmxnetwrapfunction (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.PytorchMxnetWrapFunction", false]], "pytorchmxnetwrapfunction (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PytorchMxnetWrapFunction", false]], "pytorchnormalizetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PyTorchNormalizeTransform", false]], "pytorchoptimizers (class in neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.PyTorchOptimizers", false]], "pytorchpatternlockpruner (class in neural_compressor.compression.pruner.pruners.pattern_lock)": [[185, "neural_compressor.compression.pruner.pruners.pattern_lock.PytorchPatternLockPruner", false]], "pytorchpatternninm (class in neural_compressor.compression.pruner.patterns.ninm)": [[178, "neural_compressor.compression.pruner.patterns.ninm.PytorchPatternNInM", false]], "pytorchpatternnxm (class in neural_compressor.compression.pruner.patterns.nxm)": [[179, "neural_compressor.compression.pruner.patterns.nxm.PytorchPatternNxM", false]], "pytorchprogressivepruner (class in neural_compressor.compression.pruner.pruners.progressive)": [[186, "neural_compressor.compression.pruner.pruners.progressive.PytorchProgressivePruner", false]], "pytorchretrainfreepruner (class in neural_compressor.compression.pruner.pruners.retrain_free)": [[187, "neural_compressor.compression.pruner.pruners.retrain_free.PytorchRetrainFreePruner", false]], "pytorchselfknowledgedistillationloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchSelfKnowledgeDistillationLoss", false]], "pytorchselfknowledgedistillationlosswrapper (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.PyTorchSelfKnowledgeDistillationLossWrapper", false]], "pytorchsgd (class in neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.PyTorchSGD", false]], "pytorchtransforms (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PyTorchTransforms", false]], "pytorchtranspose (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.PyTorchTranspose", false]], "qactivationoperator (class in neural_compressor.adaptor.ox_utils.operators.activation)": [[5, "neural_compressor.adaptor.ox_utils.operators.activation.QActivationOperator", false]], "qargmaxoperator (class in neural_compressor.adaptor.ox_utils.operators.argmax)": [[6, "neural_compressor.adaptor.ox_utils.operators.argmax.QArgMaxOperator", false]], "qat_clone_function() (in module neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper)": [[101, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper.qat_clone_function", false]], "qattentionoperator (class in neural_compressor.adaptor.ox_utils.operators.attention)": [[7, "neural_compressor.adaptor.ox_utils.operators.attention.QAttentionOperator", false]], "qavgpool2d (class in neural_compressor.tensorflow.keras.layers.pool2d)": [[297, "neural_compressor.tensorflow.keras.layers.pool2d.QAvgPool2D", false]], "qbinaryoperator (class in neural_compressor.adaptor.ox_utils.operators.binary_op)": [[8, "neural_compressor.adaptor.ox_utils.operators.binary_op.QBinaryOperator", false]], "qconcatoperator (class in neural_compressor.adaptor.ox_utils.operators.concat)": [[9, "neural_compressor.adaptor.ox_utils.operators.concat.QConcatOperator", false]], "qconv2d (class in neural_compressor.tensorflow.keras.layers.conv2d)": [[292, "neural_compressor.tensorflow.keras.layers.conv2d.QConv2D", false]], "qconvoperator (class in neural_compressor.adaptor.ox_utils.operators.conv)": [[10, "neural_compressor.adaptor.ox_utils.operators.conv.QConvOperator", false]], "qdense (class in neural_compressor.tensorflow.keras.layers.dense)": [[293, "neural_compressor.tensorflow.keras.layers.dense.QDense", false]], "qdepthwiseconv2d (class in neural_compressor.tensorflow.keras.layers.depthwise_conv2d)": [[294, "neural_compressor.tensorflow.keras.layers.depthwise_conv2d.QDepthwiseConv2D", false]], "qdirectoperator (class in neural_compressor.adaptor.ox_utils.operators.direct_q8)": [[11, "neural_compressor.adaptor.ox_utils.operators.direct_q8.QDirectOperator", false]], "qdq_quantize() (in module neural_compressor.torch.algorithms.smooth_quant.smooth_quant)": [[412, "neural_compressor.torch.algorithms.smooth_quant.smooth_quant.qdq_quantize", false]], "qdq_tensor() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.qdq_tensor", false]], "qdq_weight_actor() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.qdq_weight_actor", false]], "qdq_weight_asym() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.qdq_weight_asym", false]], "qdq_weight_sym() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.qdq_weight_sym", false]], "qdqlayer (class in neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.QDQLayer", false]], "qdqlayer (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.QDQLayer", false]], "qembedlayernormalizationoperator (class in neural_compressor.adaptor.ox_utils.operators.embed_layernorm)": [[12, "neural_compressor.adaptor.ox_utils.operators.embed_layernorm.QEmbedLayerNormalizationOperator", false]], "qgatheroperator (class in neural_compressor.adaptor.ox_utils.operators.gather)": [[13, "neural_compressor.adaptor.ox_utils.operators.gather.QGatherOperator", false]], "qgemmoperator (class in neural_compressor.adaptor.ox_utils.operators.gemm)": [[15, "neural_compressor.adaptor.ox_utils.operators.gemm.QGemmOperator", false]], "qglobalaveragepooloperator (class in neural_compressor.adaptor.ox_utils.operators.gavgpool)": [[14, "neural_compressor.adaptor.ox_utils.operators.gavgpool.QGlobalAveragePoolOperator", false]], "qmatmuloperator (class in neural_compressor.adaptor.ox_utils.operators.matmul)": [[18, "neural_compressor.adaptor.ox_utils.operators.matmul.QMatMulOperator", false]], "qmaxpool2d (class in neural_compressor.tensorflow.keras.layers.pool2d)": [[297, "neural_compressor.tensorflow.keras.layers.pool2d.QMaxPool2D", false]], "qmaxpooloperator (class in neural_compressor.adaptor.ox_utils.operators.maxpool)": [[19, "neural_compressor.adaptor.ox_utils.operators.maxpool.QMaxPoolOperator", false]], "qop_registry() (in module neural_compressor.adaptor.ox_utils.operators.ops)": [[21, "neural_compressor.adaptor.ox_utils.operators.ops.qop_registry", false]], "qoperator (class in neural_compressor.adaptor.ox_utils.operators.ops)": [[21, "neural_compressor.adaptor.ox_utils.operators.ops.QOperator", false]], "qpadoperator (class in neural_compressor.adaptor.ox_utils.operators.pad)": [[22, "neural_compressor.adaptor.ox_utils.operators.pad.QPadOperator", false]], "qpooloperator (class in neural_compressor.adaptor.ox_utils.operators.pooling)": [[23, "neural_compressor.adaptor.ox_utils.operators.pooling.QPoolOperator", false]], "qresizeoperator (class in neural_compressor.adaptor.ox_utils.operators.resize)": [[25, "neural_compressor.adaptor.ox_utils.operators.resize.QResizeOperator", false]], "qseparableconv2d (class in neural_compressor.tensorflow.keras.layers.separable_conv2d)": [[298, "neural_compressor.tensorflow.keras.layers.separable_conv2d.QSeparableConv2D", false]], "qsplitoperator (class in neural_compressor.adaptor.ox_utils.operators.split)": [[26, "neural_compressor.adaptor.ox_utils.operators.split.QSplitOperator", false]], "qtensor (class in neural_compressor.torch.algorithms.weight_only.hqq.qtensor)": [[426, "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensor", false]], "qtensorconfig (class in neural_compressor.torch.algorithms.weight_only.hqq.config)": [[422, "neural_compressor.torch.algorithms.weight_only.hqq.config.QTensorConfig", false]], "qtensormetainfo (class in neural_compressor.torch.algorithms.weight_only.hqq.qtensor)": [[426, "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensorMetaInfo", false]], "quant_dequant_data() (in module neural_compressor.adaptor.ox_utils.smooth_quant)": [[29, "neural_compressor.adaptor.ox_utils.smooth_quant.quant_dequant_data", false]], "quant_dequant_w_v1() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.quant_dequant_w_v1", false]], "quant_dequant_x_v1() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.quant_dequant_x_v1", false]], "quant_mode_from_pattern() (in module neural_compressor.strategy.utils.tuning_space)": [[278, "neural_compressor.strategy.utils.tuning_space.quant_mode_from_pattern", false]], "quant_tensor() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.quant_tensor", false]], "quant_tensor() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.quant_tensor", false]], "quant_weight_w_scale() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.quant_weight_w_scale", false]], "quantformat (class in neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.QuantFormat", false]], "quantizationawaretrainingcallbacks (class in neural_compressor.compression.callbacks)": [[162, "neural_compressor.compression.callbacks.QuantizationAwareTrainingCallbacks", false]], "quantizationawaretrainingconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.QuantizationAwareTrainingConfig", false]], "quantizationmethod (class in neural_compressor.transformers.utils.quantization_config)": [[452, "neural_compressor.transformers.utils.quantization_config.QuantizationMethod", false]], "quantizationmode (class in neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.QuantizationMode", false]], "quantize() (in module neural_compressor.torch.quantization.quantize)": [[442, "neural_compressor.torch.quantization.quantize.quantize", false]], "quantize_4bit() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.quantize_4bit", false]], "quantize_data() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.quantize_data", false]], "quantize_data_per_channel() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.quantize_data_per_channel", false]], "quantize_data_with_scale_zero() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.quantize_data_with_scale_zero", false]], "quantize_elemwise_op() (in module neural_compressor.torch.algorithms.mx_quant.utils)": [[404, "neural_compressor.torch.algorithms.mx_quant.utils.quantize_elemwise_op", false]], "quantize_model() (in module neural_compressor.tensorflow.quantization.quantize)": [[305, "neural_compressor.tensorflow.quantization.quantize.quantize_model", false]], "quantize_model_with_single_config() (in module neural_compressor.tensorflow.quantization.quantize)": [[305, "neural_compressor.tensorflow.quantization.quantize.quantize_model_with_single_config", false]], "quantize_mx_op() (in module neural_compressor.torch.algorithms.mx_quant.utils)": [[404, "neural_compressor.torch.algorithms.mx_quant.utils.quantize_mx_op", false]], "quantize_nparray() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.quantize_nparray", false]], "quantize_sym_model() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.quantize_sym_model", false]], "quantizeconfig (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config)": [[100, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config.QuantizeConfig", false]], "quantizedinitializer (class in neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.QuantizedInitializer", false]], "quantizedinput (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.QuantizedInput", false]], "quantizedrnnconverter (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert)": [[84, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert.QuantizedRNNConverter", false]], "quantizedvalue (class in neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.QuantizedValue", false]], "quantizedvaluetype (class in neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.QuantizedValueType", false]], "quantizegraphbase (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base)": [[117, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base.QuantizeGraphBase", false]], "quantizegraphbase (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base)": [[372, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base.QuantizeGraphBase", false]], "quantizegraphforintel (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu)": [[121, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu.QuantizeGraphForIntel", false]], "quantizegraphforintel (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu)": [[376, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu.QuantizeGraphForIntel", false]], "quantizegraphhelper (class in neural_compressor.adaptor.tf_utils.quantize_graph_common)": [[124, "neural_compressor.adaptor.tf_utils.quantize_graph_common.QuantizeGraphHelper", false]], "quantizegraphhelper (class in neural_compressor.tensorflow.quantization.utils.quantize_graph_common)": [[379, "neural_compressor.tensorflow.quantization.utils.quantize_graph_common.QuantizeGraphHelper", false]], "quantizelayeradd (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add)": [[104, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add.QuantizeLayerAdd", false]], "quantizelayerbase (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base)": [[105, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base.QuantizeLayerBase", false]], "quantizelayerbatchnormalization (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn)": [[106, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn.QuantizeLayerBatchNormalization", false]], "quantizenodebase (class in neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base)": [[117, "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base.QuantizeNodeBase", false]], "quantizenodebase (class in neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base)": [[372, "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base.QuantizeNodeBase", false]], "quantizer (class in neural_compressor.adaptor.ox_utils.quantizer)": [[28, "neural_compressor.adaptor.ox_utils.quantizer.Quantizer", false]], "quantizer (class in neural_compressor.torch.algorithms.base_algorithm)": [[392, "neural_compressor.torch.algorithms.base_algorithm.Quantizer", false]], "quantizer (class in neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.Quantizer", false]], "quantizewrapper (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper)": [[107, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper.QuantizeWrapper", false]], "quantizewrapperbase (class in neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper)": [[107, "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper.QuantizeWrapperBase", false]], "quantoptions (class in neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.QuantOptions", false]], "quanttype (class in neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.QuantType", false]], "quanttype (class in neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.QuantType", false]], "query_quantizable_nodes() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.query_quantizable_nodes", false]], "randomcroptftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomCropTFTransform", false]], "randomcroptransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomCropTransform", false]], "randomhorizontalflip (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomHorizontalFlip", false]], "randomresizedcropmxnettransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomResizedCropMXNetTransform", false]], "randomresizedcroppytorchtransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomResizedCropPytorchTransform", false]], "randomresizedcroptftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomResizedCropTFTransform", false]], "randomresizedcroptransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomResizedCropTransform", false]], "randomtunestrategy (class in neural_compressor.strategy.random)": [[273, "neural_compressor.strategy.random.RandomTuneStrategy", false]], "randomverticalflip (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RandomVerticalFlip", false]], "rawgptquantizer (class in neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.RAWGPTQuantizer", false]], "read_graph() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.read_graph", false]], "read_graph() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.read_graph", false]], "read_squad_examples() (in module neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.read_squad_examples", false]], "read_tensorflow_node_attrs() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.read_tensorflow_node_attrs", false]], "recipe (neural_compressor.compression.pruner.model_slim.pattern_analyzer.recipesearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.RecipeSearcher.recipe", false]], "recipesearcher (class in neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.RecipeSearcher", false]], "reconstruct_saved_model() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.reconstruct_saved_model", false]], "reconstruct_saved_model() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.reconstruct_saved_model", false]], "record_output() (in module neural_compressor.compression.distillation.utility)": [[166, "neural_compressor.compression.distillation.utility.record_output", false]], "recover() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.recover", false]], "recover_forward() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.recover_forward", false]], "recover_model_from_json() (in module neural_compressor.torch.algorithms.smooth_quant.save_load)": [[411, "neural_compressor.torch.algorithms.smooth_quant.save_load.recover_model_from_json", false]], "recover_model_from_json() (in module neural_compressor.utils.pytorch)": [[465, "neural_compressor.utils.pytorch.recover_model_from_json", false]], "reduceminmaxoperator (class in neural_compressor.adaptor.ox_utils.operators.reduce)": [[24, "neural_compressor.adaptor.ox_utils.operators.reduce.ReduceMinMaxOperator", false]], "reduceoperator (class in neural_compressor.adaptor.ox_utils.operators.reduce)": [[24, "neural_compressor.adaptor.ox_utils.operators.reduce.ReduceOperator", false]], "reg (neural_compressor.compression.pruner.pruners.basic.kerasbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.KerasBasicPruner.reg", false]], "reg (neural_compressor.compression.pruner.pruners.basic.pytorchbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.PytorchBasicPruner.reg", false]], "reg (neural_compressor.compression.pruner.pruners.block_mask.pytorchblockmaskpruner attribute)": [[182, "neural_compressor.compression.pruner.pruners.block_mask.PytorchBlockMaskPruner.reg", false]], "reg (neural_compressor.compression.pruner.pruners.retrain_free.pytorchretrainfreepruner attribute)": [[187, "neural_compressor.compression.pruner.pruners.retrain_free.PytorchRetrainFreePruner.reg", false]], "reg_terms (neural_compressor.compression.pruner.regs.grouplasso attribute)": [[189, "neural_compressor.compression.pruner.regs.GroupLasso.reg_terms", false]], "register_accelerator() (in module neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.register_accelerator", false]], "register_algo() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.register_algo", false]], "register_algo() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.register_algo", false]], "register_autotune() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.register_autotune", false]], "register_config() (in module neural_compressor.common.base_config)": [[152, "neural_compressor.common.base_config.register_config", false]], "register_criterion() (in module neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.register_criterion", false]], "register_criterion() (in module neural_compressor.compression.pruner.tf_criteria)": [[191, "neural_compressor.compression.pruner.tf_criteria.register_criterion", false]], "register_customer_metric() (in module neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.register_customer_metric", false]], "register_pack_func() (in module neural_compressor.torch.utils.bit_packer)": [[444, "neural_compressor.torch.utils.bit_packer.register_pack_func", false]], "register_pattern() (in module neural_compressor.compression.pruner.patterns.base)": [[175, "neural_compressor.compression.pruner.patterns.base.register_pattern", false]], "register_pruner() (in module neural_compressor.compression.pruner.pruners.base)": [[180, "neural_compressor.compression.pruner.pruners.base.register_pruner", false]], "register_pruning() (in module neural_compressor.compression.pruner.pruning)": [[188, "neural_compressor.compression.pruner.pruning.register_pruning", false]], "register_reg() (in module neural_compressor.compression.pruner.regs)": [[189, "neural_compressor.compression.pruner.regs.register_reg", false]], "register_scheduler() (in module neural_compressor.compression.pruner.schedulers)": [[190, "neural_compressor.compression.pruner.schedulers.register_scheduler", false]], "register_supported_configs_for_fwk() (in module neural_compressor.common.base_config)": [[152, "neural_compressor.common.base_config.register_supported_configs_for_fwk", false]], "register_weight_hooks() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.register_weight_hooks", false]], "removableactivationoperator (class in neural_compressor.adaptor.ox_utils.operators.activation)": [[5, "neural_compressor.adaptor.ox_utils.operators.activation.RemovableActivationOperator", false]], "remove_init_from_model_input() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.remove_init_from_model_input", false]], "removetrainingnodesoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes)": [[65, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes.RemoveTrainingNodesOptimizer", false]], "removetrainingnodesoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes)": [[337, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes.RemoveTrainingNodesOptimizer", false]], "renamebatchnormoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm)": [[66, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm.RenameBatchNormOptimizer", false]], "renamebatchnormoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm)": [[338, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm.RenameBatchNormOptimizer", false]], "replace_forward() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.replace_forward", false]], "replace_pattern (neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.patternpair attribute)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.PatternPair.replace_pattern", false]], "replacement_fn() (in module neural_compressor.torch.algorithms.weight_only.hqq.quantizer)": [[427, "neural_compressor.torch.algorithms.weight_only.hqq.quantizer.replacement_fn", false]], "rerangequantizedconcat (class in neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat)": [[132, "neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat.RerangeQuantizedConcat", false]], "rerangequantizedconcat (class in neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat)": [[384, "neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat.RerangeQuantizedConcat", false]], "rescalekeraspretraintransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RescaleKerasPretrainTransform", false]], "rescaletftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RescaleTFTransform", false]], "rescaletransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.RescaleTransform", false]], "reset_none_to_default() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.reset_none_to_default", false]], "reshape_in_channel_to_last() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.reshape_in_channel_to_last", false]], "reshape_scale_as_input() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.reshape_scale_as_input", false]], "reshape_scale_as_weight() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.reshape_scale_as_weight", false]], "resizemxnettransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ResizeMXNetTransform", false]], "resizeoperator (class in neural_compressor.adaptor.ox_utils.operators.resize)": [[25, "neural_compressor.adaptor.ox_utils.operators.resize.ResizeOperator", false]], "resizepytorchtransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ResizePytorchTransform", false]], "resizetftransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ResizeTFTransform", false]], "resizetransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ResizeTransform", false]], "resizewithaspectratio (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.ResizeWithAspectRatio", false]], "resizewithratio (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ResizeWithRatio", false]], "retrainfreecriterion (class in neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.RetrainFreeCriterion", false]], "retrainfreepruning (class in neural_compressor.compression.pruner.pruning)": [[188, "neural_compressor.compression.pruner.pruning.RetrainFreePruning", false]], "reverted_data_type() (in module neural_compressor.strategy.utils.utility)": [[280, "neural_compressor.strategy.utils.utility.reverted_data_type", false]], "rmse (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.RMSE", false]], "roc (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.ROC", false]], "roundingmode (class in neural_compressor.torch.algorithms.mx_quant.utils)": [[404, "neural_compressor.torch.algorithms.mx_quant.utils.RoundingMode", false]], "rtn_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.rtn_entry", false]], "rtn_quantize() (in module neural_compressor.adaptor.ox_utils.weight_only)": [[31, "neural_compressor.adaptor.ox_utils.weight_only.rtn_quantize", false]], "rtnconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.RTNConfig", false]], "rtnconfig (class in neural_compressor.transformers.utils.quantization_config)": [[452, "neural_compressor.transformers.utils.quantization_config.RtnConfig", false]], "rtnquantizer (class in neural_compressor.torch.algorithms.weight_only.rtn)": [[430, "neural_compressor.torch.algorithms.weight_only.rtn.RTNQuantizer", false]], "run_fn_for_vlm_autoround() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.run_fn_for_vlm_autoround", false]], "run_forward() (in module neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.run_forward", false]], "run_instance() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.run_instance", false]], "run_multi_instance_command() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.run_multi_instance_command", false]], "sample (neural_compressor.metric.metric.accuracy attribute)": [[234, "neural_compressor.metric.metric.Accuracy.sample", false]], "sample (neural_compressor.metric.metric.loss attribute)": [[234, "neural_compressor.metric.metric.Loss.sample", false]], "sampler (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.Sampler", false]], "sampler (class in neural_compressor.data.dataloaders.sampler)": [[207, "neural_compressor.data.dataloaders.sampler.Sampler", false]], "save() (in module neural_compressor.compression.pruner)": [[170, "neural_compressor.compression.pruner.save", false]], "save() (in module neural_compressor.torch.algorithms.pt2e_quant.save_load)": [[408, "neural_compressor.torch.algorithms.pt2e_quant.save_load.save", false]], "save() (in module neural_compressor.torch.algorithms.static_quant.save_load)": [[415, "neural_compressor.torch.algorithms.static_quant.save_load.save", false]], "save() (in module neural_compressor.torch.algorithms.weight_only.save_load)": [[431, "neural_compressor.torch.algorithms.weight_only.save_load.save", false]], "save_config_mapping() (in module neural_compressor.common.utils.save_load)": [[160, "neural_compressor.common.utils.save_load.save_config_mapping", false]], "save_for_huggingface_upstream() (in module neural_compressor.utils.load_huggingface)": [[462, "neural_compressor.utils.load_huggingface.save_for_huggingface_upstream", false]], "save_protobuf() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.save_protobuf", false]], "saved_model_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.saved_model_session", false]], "saved_model_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.saved_model_session", false]], "scalepropagationtransformer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation)": [[85, "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation.ScaleProPagationTransformer", false]], "scalepropagationtransformer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation)": [[355, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation.ScaleProPagationTransformer", false]], "scheduler (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.scheduler", false]], "scheduler (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.scheduler", false]], "scheduler (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.scheduler", false]], "scheduler (neural_compressor.compression.pruner.pruners.basic.kerasbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.KerasBasicPruner.scheduler", false]], "scheduler (neural_compressor.compression.pruner.pruners.basic.pytorchbasicpruner attribute)": [[181, "neural_compressor.compression.pruner.pruners.basic.PytorchBasicPruner.scheduler", false]], "scheduler (neural_compressor.compression.pruner.pruners.block_mask.pytorchblockmaskpruner attribute)": [[182, "neural_compressor.compression.pruner.pruners.block_mask.PytorchBlockMaskPruner.scheduler", false]], "scheduler (neural_compressor.compression.pruner.pruners.retrain_free.pytorchretrainfreepruner attribute)": [[187, "neural_compressor.compression.pruner.pruners.retrain_free.PytorchRetrainFreePruner.scheduler", false]], "scores (neural_compressor.compression.pruner.criteria.blockmaskcriterion attribute)": [[169, "neural_compressor.compression.pruner.criteria.BlockMaskCriterion.scores", false]], "scores (neural_compressor.compression.pruner.criteria.gradientcriterion attribute)": [[169, "neural_compressor.compression.pruner.criteria.GradientCriterion.scores", false]], "scores (neural_compressor.compression.pruner.criteria.magnitudecriterion attribute)": [[169, "neural_compressor.compression.pruner.criteria.MagnitudeCriterion.scores", false]], "scores (neural_compressor.compression.pruner.criteria.pruningcriterion attribute)": [[169, "neural_compressor.compression.pruner.criteria.PruningCriterion.scores", false]], "scores (neural_compressor.compression.pruner.criteria.retrainfreecriterion attribute)": [[169, "neural_compressor.compression.pruner.criteria.RetrainFreeCriterion.scores", false]], "scores (neural_compressor.compression.pruner.criteria.snipcriterion attribute)": [[169, "neural_compressor.compression.pruner.criteria.SnipCriterion.scores", false]], "scores (neural_compressor.compression.pruner.criteria.snipmomentumcriterion attribute)": [[169, "neural_compressor.compression.pruner.criteria.SnipMomentumCriterion.scores", false]], "scores (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.scores", false]], "scores (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.scores", false]], "scores (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.scores", false]], "scores (neural_compressor.compression.pruner.tf_criteria.magnitudecriterion attribute)": [[191, "neural_compressor.compression.pruner.tf_criteria.MagnitudeCriterion.scores", false]], "scores (neural_compressor.compression.pruner.tf_criteria.pruningcriterion attribute)": [[191, "neural_compressor.compression.pruner.tf_criteria.PruningCriterion.scores", false]], "search_clip() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.search_clip", false]], "search_pattern (neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.patternpair attribute)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.PatternPair.search_pattern", false]], "searching_results (neural_compressor.compression.pruner.model_slim.pattern_analyzer.jitbasicsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher.searching_results", false]], "searching_results (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.searching_results", false]], "searching_results (neural_compressor.compression.pruner.model_slim.pattern_analyzer.recipesearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.RecipeSearcher.searching_results", false]], "selfknowledgedistillationloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.SelfKnowledgeDistillationLoss", false]], "selfknowledgedistillationlossconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.SelfKnowledgeDistillationLossConfig", false]], "selfmhasearcher (class in neural_compressor.compression.pruner.model_slim.pattern_analyzer)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.SelfMHASearcher", false]], "seqtype (class in neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.SeqType", false]], "sequentialsampler (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.SequentialSampler", false]], "sequentialsampler (class in neural_compressor.data.dataloaders.sampler)": [[207, "neural_compressor.data.dataloaders.sampler.SequentialSampler", false]], "sequentialsampler (class in neural_compressor.tensorflow.utils.data)": [[387, "neural_compressor.tensorflow.utils.data.SequentialSampler", false]], "set_all_env_var() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.set_all_env_var", false]], "set_cores_for_instance() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.set_cores_for_instance", false]], "set_eager_execution() (in module neural_compressor.profiling.profiler.tensorflow_profiler.utils)": [[261, "neural_compressor.profiling.profiler.tensorflow_profiler.utils.set_eager_execution", false]], "set_env_var() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.set_env_var", false]], "set_module() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.set_module", false]], "set_module() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.set_module", false]], "set_module() (in module neural_compressor.torch.algorithms.weight_only.utility)": [[433, "neural_compressor.torch.algorithms.weight_only.utility.set_module", false]], "set_module() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.set_module", false]], "set_name() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils)": [[90, "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils.set_name", false]], "set_random_seed() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.set_random_seed", false]], "set_random_seed() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.set_random_seed", false]], "set_resume_from() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.set_resume_from", false]], "set_resume_from() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.set_resume_from", false]], "set_tensorboard() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.set_tensorboard", false]], "set_tensorboard() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.set_tensorboard", false]], "set_workspace() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.set_workspace", false]], "set_workspace() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.set_workspace", false]], "shape (neural_compressor.torch.algorithms.weight_only.hqq.qtensor.qtensormetainfo attribute)": [[426, "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensorMetaInfo.shape", false]], "shareqdqforitexypatternoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern)": [[94, "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern.ShareQDQForItexYPatternOptimizer", false]], "shareqdqforitexypatternoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern)": [[359, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern.ShareQDQForItexYPatternOptimizer", false]], "show_memory_info() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.show_memory_info", false]], "sigopttunestrategy (class in neural_compressor.contrib.strategy.sigopt)": [[198, "neural_compressor.contrib.strategy.sigopt.SigOptTuneStrategy", false]], "simple_inference() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.simple_inference", false]], "simple_inference() (in module neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.simple_inference", false]], "simple_progress_bar() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.simple_progress_bar", false]], "singleton() (in module neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.singleton", false]], "singleton() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.singleton", false]], "singleton() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.singleton", false]], "slim_session() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.slim_session", false]], "slim_session() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.slim_session", false]], "smooth_distribution() (in module neural_compressor.adaptor.ox_utils.calibrator)": [[3, "neural_compressor.adaptor.ox_utils.calibrator.smooth_distribution", false]], "smooth_quant_entry() (in module neural_compressor.tensorflow.quantization.algorithm_entry)": [[301, "neural_compressor.tensorflow.quantization.algorithm_entry.smooth_quant_entry", false]], "smooth_quant_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.smooth_quant_entry", false]], "smoothquant (class in neural_compressor.algorithm.smooth_quant)": [[149, "neural_compressor.algorithm.smooth_quant.SmoothQuant", false]], "smoothquant (class in neural_compressor.tensorflow.algorithms.smoother.core)": [[284, "neural_compressor.tensorflow.algorithms.smoother.core.SmoothQuant", false]], "smoothquantcalibration (class in neural_compressor.adaptor.tf_utils.smooth_quant_calibration)": [[125, "neural_compressor.adaptor.tf_utils.smooth_quant_calibration.SmoothQuantCalibration", false]], "smoothquantcalibration (class in neural_compressor.tensorflow.algorithms.smoother.calibration)": [[283, "neural_compressor.tensorflow.algorithms.smoother.calibration.SmoothQuantCalibration", false]], "smoothquantcalibrationllm (class in neural_compressor.adaptor.tf_utils.smooth_quant_calibration)": [[125, "neural_compressor.adaptor.tf_utils.smooth_quant_calibration.SmoothQuantCalibrationLLM", false]], "smoothquantcalibrationllm (class in neural_compressor.tensorflow.algorithms.smoother.calibration)": [[283, "neural_compressor.tensorflow.algorithms.smoother.calibration.SmoothQuantCalibrationLLM", false]], "smoothquantconfig (class in neural_compressor.tensorflow.quantization.config)": [[303, "neural_compressor.tensorflow.quantization.config.SmoothQuantConfig", false]], "smoothquantconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.SmoothQuantConfig", false]], "smoothquantquantizer (class in neural_compressor.torch.algorithms.smooth_quant.smooth_quant)": [[412, "neural_compressor.torch.algorithms.smooth_quant.smooth_quant.SmoothQuantQuantizer", false]], "smoothquantsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.SmoothQuantSampler", false]], "smoothquantscaler (class in neural_compressor.adaptor.tf_utils.smooth_quant_scaler)": [[126, "neural_compressor.adaptor.tf_utils.smooth_quant_scaler.SmoothQuantScaler", false]], "smoothquantscaler (class in neural_compressor.tensorflow.algorithms.smoother.scaler)": [[286, "neural_compressor.tensorflow.algorithms.smoother.scaler.SmoothQuantScaler", false]], "smoothquantscalerllm (class in neural_compressor.adaptor.tf_utils.smooth_quant_scaler)": [[126, "neural_compressor.adaptor.tf_utils.smooth_quant_scaler.SmoothQuantScalerLLM", false]], "smoothquantscalerllm (class in neural_compressor.tensorflow.algorithms.smoother.scaler)": [[286, "neural_compressor.tensorflow.algorithms.smoother.scaler.SmoothQuantScalerLLM", false]], "snipcriterion (class in neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.SnipCriterion", false]], "snipmomentumcriterion (class in neural_compressor.compression.pruner.criteria)": [[169, "neural_compressor.compression.pruner.criteria.SnipMomentumCriterion", false]], "sparsedummydataset (class in neural_compressor.data.datasets.dummy_dataset_v2)": [[213, "neural_compressor.data.datasets.dummy_dataset_v2.SparseDummyDataset", false]], "sparsegptpruning (class in neural_compressor.compression.pruner.pruning)": [[188, "neural_compressor.compression.pruner.pruning.SparseGPTPruning", false]], "split_shared_bias() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.split_shared_bias", false]], "splitoperator (class in neural_compressor.adaptor.ox_utils.operators.split)": [[26, "neural_compressor.adaptor.ox_utils.operators.split.SplitOperator", false]], "splitsharedinputoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input)": [[67, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input.SplitSharedInputOptimizer", false]], "splitsharedinputoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input)": [[339, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input.SplitSharedInputOptimizer", false]], "sqlinearwrapper (class in neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.SQLinearWrapper", false]], "squadexample (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.SquadExample", false]], "squadf1 (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.SquadF1", false]], "start_step (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.start_step", false]], "start_step (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.start_step", false]], "start_step (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.start_step", false]], "static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcher.static_graph", false]], "static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.classifierheadsearchertf attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcherTF.static_graph", false]], "static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.jitbasicsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher.static_graph", false]], "static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.static_graph", false]], "static_graph (neural_compressor.compression.pruner.model_slim.pattern_analyzer.selfmhasearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.SelfMHASearcher.static_graph", false]], "static_quant_entry() (in module neural_compressor.tensorflow.quantization.algorithm_entry)": [[301, "neural_compressor.tensorflow.quantization.algorithm_entry.static_quant_entry", false]], "static_quant_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.static_quant_entry", false]], "static_quant_export() (in module neural_compressor.utils.export.torch2onnx)": [[459, "neural_compressor.utils.export.torch2onnx.static_quant_export", false]], "staticquantconfig (class in neural_compressor.tensorflow.keras.quantization.config)": [[299, "neural_compressor.tensorflow.keras.quantization.config.StaticQuantConfig", false]], "staticquantconfig (class in neural_compressor.tensorflow.quantization.config)": [[303, "neural_compressor.tensorflow.quantization.config.StaticQuantConfig", false]], "staticquantconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.StaticQuantConfig", false]], "staticquantquantizer (class in neural_compressor.torch.algorithms.static_quant.static_quant)": [[416, "neural_compressor.torch.algorithms.static_quant.static_quant.StaticQuantQuantizer", false]], "statistics (class in neural_compressor.common.utils.utility)": [[161, "neural_compressor.common.utils.utility.Statistics", false]], "statistics (class in neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.Statistics", false]], "str2array() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.str2array", false]], "strategy_registry() (in module neural_compressor.strategy.strategy)": [[274, "neural_compressor.strategy.strategy.strategy_registry", false]], "strip_equivalent_nodes() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.strip_equivalent_nodes", false]], "strip_equivalent_nodes() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.strip_equivalent_nodes", false]], "strip_unused_nodes() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.strip_unused_nodes", false]], "strip_unused_nodes() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.strip_unused_nodes", false]], "stripequivalentnodesoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes)": [[68, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes.StripEquivalentNodesOptimizer", false]], "stripequivalentnodesoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes)": [[340, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes.StripEquivalentNodesOptimizer", false]], "stripunusednodesoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes)": [[69, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes.StripUnusedNodesOptimizer", false]], "stripunusednodesoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes)": [[341, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes.StripUnusedNodesOptimizer", false]], "styletransferdataset (class in neural_compressor.data.datasets.style_transfer_dataset)": [[216, "neural_compressor.data.datasets.style_transfer_dataset.StyleTransferDataset", false]], "sum (neural_compressor.metric.metric.loss attribute)": [[234, "neural_compressor.metric.metric.Loss.sum", false]], "summary_benchmark() (in module neural_compressor.benchmark)": [[151, "neural_compressor.benchmark.summary_benchmark", false]], "summary_latency_throughput() (in module neural_compressor.common.benchmark)": [[154, "neural_compressor.common.benchmark.summary_latency_throughput", false]], "switchoptimizer (class in neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer)": [[70, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer.SwitchOptimizer", false]], "switchoptimizer (class in neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer)": [[342, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer.SwitchOptimizer", false]], "symbol_re (neural_compressor.metric.bleu.unicoderegex attribute)": [[227, "neural_compressor.metric.bleu.UnicodeRegex.symbol_re", false]], "symbolic_trace() (in module neural_compressor.adaptor.torch_utils.symbolic_trace)": [[144, "neural_compressor.adaptor.torch_utils.symbolic_trace.symbolic_trace", false]], "target_layers (neural_compressor.compression.pruner.model_slim.pattern_analyzer.jitbasicsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher.target_layers", false]], "target_layers (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.target_layers", false]], "target_op_lut (neural_compressor.compression.pruner.model_slim.pattern_analyzer.linear2linearsearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher.target_op_lut", false]], "target_sparsity (neural_compressor.compression.pruner.patterns.base.basepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.BasePattern.target_sparsity", false]], "target_sparsity (neural_compressor.compression.pruner.patterns.base.kerasbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.KerasBasePattern.target_sparsity", false]], "target_sparsity (neural_compressor.compression.pruner.patterns.base.pytorchbasepattern attribute)": [[175, "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern.target_sparsity", false]], "target_sparsity_ratio (neural_compressor.compression.pruner.pruners.base.basepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.BasePruner.target_sparsity_ratio", false]], "target_sparsity_ratio (neural_compressor.compression.pruner.pruners.base.kerasbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.KerasBasePruner.target_sparsity_ratio", false]], "target_sparsity_ratio (neural_compressor.compression.pruner.pruners.base.pytorchbasepruner attribute)": [[180, "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner.target_sparsity_ratio", false]], "targets (neural_compressor.compression.pruner.model_slim.pattern_analyzer.recipesearcher attribute)": [[173, "neural_compressor.compression.pruner.model_slim.pattern_analyzer.RecipeSearcher.targets", false]], "targetspace (class in neural_compressor.strategy.bayesian)": [[266, "neural_compressor.strategy.bayesian.TargetSpace", false]], "tensorcollector (class in neural_compressor.adaptor.mxnet_utils.util)": [[1, "neural_compressor.adaptor.mxnet_utils.util.TensorCollector", false]], "tensorflow (class in neural_compressor.config)": [[195, "neural_compressor.config.TensorFlow", false]], "tensorflow (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.Tensorflow", false]], "tensorflow_itexadaptor (class in neural_compressor.adaptor.tensorflow)": [[32, "neural_compressor.adaptor.tensorflow.Tensorflow_ITEXAdaptor", false]], "tensorflow_itexadaptor (class in neural_compressor.tensorflow.algorithms.static_quant.tensorflow)": [[289, "neural_compressor.tensorflow.algorithms.static_quant.tensorflow.Tensorflow_ITEXAdaptor", false]], "tensorflowadam (class in neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.TensorFlowAdam", false]], "tensorflowadamw (class in neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.TensorFlowAdamW", false]], "tensorflowadaptor (class in neural_compressor.adaptor.tensorflow)": [[32, "neural_compressor.adaptor.tensorflow.TensorFlowAdaptor", false]], "tensorflowadaptor (class in neural_compressor.tensorflow.algorithms.static_quant.tensorflow)": [[289, "neural_compressor.tensorflow.algorithms.static_quant.tensorflow.TensorFlowAdaptor", false]], "tensorflowbasemodel (class in neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.TensorflowBaseModel", false]], "tensorflowbasemodel (class in neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.TensorflowBaseModel", false]], "tensorflowbertdataloader (class in neural_compressor.data.dataloaders.tensorflow_dataloader)": [[208, "neural_compressor.data.dataloaders.tensorflow_dataloader.TensorflowBertDataLoader", false]], "tensorflowbertdataset (class in neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.TensorflowBertDataset", false]], "tensorflowcheckpointmodel (class in neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.TensorflowCheckpointModel", false]], "tensorflowcheckpointmodel (class in neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.TensorflowCheckpointModel", false]], "tensorflowcifar10 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowCIFAR10", false]], "tensorflowcifar100 (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowCIFAR100", false]], "tensorflowcocomap (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.TensorflowCOCOMAP", false]], "tensorflowconfig (class in neural_compressor.tensorflow.algorithms.static_quant.tensorflow)": [[289, "neural_compressor.tensorflow.algorithms.static_quant.tensorflow.TensorFlowConfig", false]], "tensorflowconfigconverter (class in neural_compressor.tensorflow.algorithms.static_quant.tensorflow)": [[289, "neural_compressor.tensorflow.algorithms.static_quant.tensorflow.TensorflowConfigConverter", false]], "tensorflowcriterions (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.TensorflowCriterions", false]], "tensorflowcroptoboundingbox (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowCropToBoundingBox", false]], "tensorflowcrossentropyloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.TensorFlowCrossEntropyLoss", false]], "tensorflowdataloader (class in neural_compressor.data.dataloaders.tensorflow_dataloader)": [[208, "neural_compressor.data.dataloaders.tensorflow_dataloader.TensorflowDataLoader", false]], "tensorflowdatasets (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowDatasets", false]], "tensorflowfashionmnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowFashionMNIST", false]], "tensorflowfilters (class in neural_compressor.data.filters.filter)": [[218, "neural_compressor.data.filters.filter.TensorflowFilters", false]], "tensorflowglobalconfig (class in neural_compressor.tensorflow.utils.model)": [[389, "neural_compressor.tensorflow.utils.model.TensorflowGlobalConfig", false]], "tensorflowimagenetdataset (class in neural_compressor.data.datasets.imagenet_dataset)": [[214, "neural_compressor.data.datasets.imagenet_dataset.TensorflowImagenetDataset", false]], "tensorflowimagenetraw (class in neural_compressor.data.datasets.imagenet_dataset)": [[214, "neural_compressor.data.datasets.imagenet_dataset.TensorflowImagenetRaw", false]], "tensorflowimagerecord (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowImageRecord", false]], "tensorflowknowledgedistillationloss (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.TensorflowKnowledgeDistillationLoss", false]], "tensorflowknowledgedistillationlossexternal (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.TensorflowKnowledgeDistillationLossExternal", false]], "tensorflowknowledgedistillationlosswrapper (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.TensorflowKnowledgeDistillationLossWrapper", false]], "tensorflowllmmodel (class in neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.TensorflowLLMModel", false]], "tensorflowllmmodel (class in neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.TensorflowLLMModel", false]], "tensorflowmap (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.TensorflowMAP", false]], "tensorflowmetrics (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.TensorflowMetrics", false]], "tensorflowmnist (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowMNIST", false]], "tensorflowmodel (class in neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.TensorflowModel", false]], "tensorflowmodel (class in neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.TensorflowModel", false]], "tensorflowmodelzoobertdataloader (class in neural_compressor.data.dataloaders.tensorflow_dataloader)": [[208, "neural_compressor.data.dataloaders.tensorflow_dataloader.TensorflowModelZooBertDataLoader", false]], "tensorflowmodelzoobertdataset (class in neural_compressor.data.datasets.bert_dataset)": [[209, "neural_compressor.data.datasets.bert_dataset.TensorflowModelZooBertDataset", false]], "tensorflowoptimizers (class in neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.TensorflowOptimizers", false]], "tensorflowparserfactory (class in neural_compressor.profiling.parser.tensorflow_parser.factory)": [[252, "neural_compressor.profiling.parser.tensorflow_parser.factory.TensorFlowParserFactory", false]], "tensorflowprofilingparser (class in neural_compressor.profiling.parser.tensorflow_parser.parser)": [[253, "neural_compressor.profiling.parser.tensorflow_parser.parser.TensorFlowProfilingParser", false]], "tensorflowqatmodel (class in neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.TensorflowQATModel", false]], "tensorflowqdqtoonnxqdqconverter (class in neural_compressor.adaptor.tf_utils.tf2onnx_converter)": [[127, "neural_compressor.adaptor.tf_utils.tf2onnx_converter.TensorflowQDQToOnnxQDQConverter", false]], "tensorflowquery (class in neural_compressor.adaptor.tensorflow)": [[32, "neural_compressor.adaptor.tensorflow.TensorflowQuery", false]], "tensorflowquery (class in neural_compressor.tensorflow.algorithms.static_quant.tensorflow)": [[289, "neural_compressor.tensorflow.algorithms.static_quant.tensorflow.TensorflowQuery", false]], "tensorflowrandomhorizontalflip (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowRandomHorizontalFlip", false]], "tensorflowrandomverticalflip (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowRandomVerticalFlip", false]], "tensorflowresizecropimagenettransform (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.TensorflowResizeCropImagenetTransform", false]], "tensorflowresizewithratio (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowResizeWithRatio", false]], "tensorflowsavedmodelmodel (class in neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.TensorflowSavedModelModel", false]], "tensorflowsavedmodelmodel (class in neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.TensorflowSavedModelModel", false]], "tensorflowsgd (class in neural_compressor.compression.distillation.optimizers)": [[165, "neural_compressor.compression.distillation.optimizers.TensorFlowSGD", false]], "tensorflowshiftrescale (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.TensorflowShiftRescale", false]], "tensorflowsparsecategoricalcrossentropy (class in neural_compressor.compression.distillation.criterions)": [[163, "neural_compressor.compression.distillation.criterions.TensorFlowSparseCategoricalCrossentropy", false]], "tensorflowtfrecorddataset (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowTFRecordDataset", false]], "tensorflowtopk (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.TensorflowTopK", false]], "tensorflowtransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowTransform", false]], "tensorflowtransforms (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowTransforms", false]], "tensorflowtranspose (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowTranspose", false]], "tensorflowtransposelastchannel (class in neural_compressor.data.transforms.imagenet_transform)": [[221, "neural_compressor.data.transforms.imagenet_transform.TensorflowTransposeLastChannel", false]], "tensorflowvocmap (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.TensorflowVOCMAP", false]], "tensorflowvocrecord (class in neural_compressor.data.datasets.dataset)": [[211, "neural_compressor.data.datasets.dataset.TensorflowVOCRecord", false]], "tensorflowwrapfunction (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TensorflowWrapFunction", false]], "teq_quantize_entry() (in module neural_compressor.torch.quantization.algorithm_entry)": [[437, "neural_compressor.torch.quantization.algorithm_entry.teq_quantize_entry", false]], "teqconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.TEQConfig", false]], "teqconfig (class in neural_compressor.transformers.utils.quantization_config)": [[452, "neural_compressor.transformers.utils.quantization_config.TeqConfig", false]], "teqlinearfakequant (class in neural_compressor.adaptor.torch_utils.model_wrapper)": [[142, "neural_compressor.adaptor.torch_utils.model_wrapper.TEQLinearFakeQuant", false]], "teqlinearfakequant (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.TEQLinearFakeQuant", false]], "tequantizer (class in neural_compressor.torch.algorithms.weight_only.teq)": [[432, "neural_compressor.torch.algorithms.weight_only.teq.TEQuantizer", false]], "tf2onnxconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.TF2ONNXConfig", false]], "tf_to_fp32_onnx() (in module neural_compressor.utils.export.tf2onnx)": [[458, "neural_compressor.utils.export.tf2onnx.tf_to_fp32_onnx", false]], "tf_to_int8_onnx() (in module neural_compressor.utils.export.tf2onnx)": [[458, "neural_compressor.utils.export.tf2onnx.tf_to_int8_onnx", false]], "tfdatadataloader (class in neural_compressor.data.dataloaders.tensorflow_dataloader)": [[208, "neural_compressor.data.dataloaders.tensorflow_dataloader.TFDataDataLoader", false]], "tfmodelzoocollecttransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TFModelZooCollectTransform", false]], "tfslimnetsfactory (class in neural_compressor.model.nets_factory)": [[241, "neural_compressor.model.nets_factory.TFSlimNetsFactory", false]], "tfslimnetsfactory (class in neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.TFSlimNetsFactory", false]], "tfsquadv1modelzooposttransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TFSquadV1ModelZooPostTransform", false]], "tfsquadv1posttransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TFSquadV1PostTransform", false]], "time_limit() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.time_limit", false]], "to_device() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.to_device", false]], "to_dtype() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.to_dtype", false]], "to_numpy() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.to_numpy", false]], "toarray (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ToArray", false]], "tondarraytransform (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.ToNDArrayTransform", false]], "torch2onnxconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.Torch2ONNXConfig", false]], "torch_to_fp32_onnx() (in module neural_compressor.utils.export.torch2onnx)": [[459, "neural_compressor.utils.export.torch2onnx.torch_to_fp32_onnx", false]], "torch_to_int8_onnx() (in module neural_compressor.utils.export.torch2onnx)": [[459, "neural_compressor.utils.export.torch2onnx.torch_to_int8_onnx", false]], "torchbaseconfig (class in neural_compressor.torch.quantization.config)": [[439, "neural_compressor.torch.quantization.config.TorchBaseConfig", false]], "torchsmoothquant (class in neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.TorchSmoothQuant", false]], "tpetunestrategy (class in neural_compressor.contrib.strategy.tpe)": [[199, "neural_compressor.contrib.strategy.tpe.TpeTuneStrategy", false]], "trace_and_fuse_sub_graph() (in module neural_compressor.adaptor.torch_utils.symbolic_trace)": [[144, "neural_compressor.adaptor.torch_utils.symbolic_trace.trace_and_fuse_sub_graph", false]], "trace_gptq_target_blocks() (in module neural_compressor.torch.algorithms.weight_only.gptq)": [[420, "neural_compressor.torch.algorithms.weight_only.gptq.trace_gptq_target_blocks", false]], "trainableequivalenttransformation (class in neural_compressor.torch.algorithms.weight_only.teq)": [[432, "neural_compressor.torch.algorithms.weight_only.teq.TrainableEquivalentTransformation", false]], "transform_registry() (in module neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.transform_registry", false]], "transformation() (in module neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter)": [[406, "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.transformation", false]], "transformerbasedmodelblockpatterndetector (class in neural_compressor.adaptor.torch_utils.pattern_detector)": [[143, "neural_compressor.adaptor.torch_utils.pattern_detector.TransformerBasedModelBlockPatternDetector", false]], "transformerbasedmodelblockpatterndetector (class in neural_compressor.torch.algorithms.static_quant.utility)": [[417, "neural_compressor.torch.algorithms.static_quant.utility.TransformerBasedModelBlockPatternDetector", false]], "transforms (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.TRANSFORMS", false], [225, "neural_compressor.data.transforms.transform.Transforms", false]], "transpose (class in neural_compressor.data.transforms.transform)": [[225, "neural_compressor.data.transforms.transform.Transpose", false]], "trt_env_setup() (in module neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.trt_env_setup", false]], "try_loading_keras() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.try_loading_keras", false]], "try_loading_keras() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.try_loading_keras", false]], "tunestrategy (class in neural_compressor.strategy.strategy)": [[274, "neural_compressor.strategy.strategy.TuneStrategy", false]], "tunestrategymeta (class in neural_compressor.strategy.strategy)": [[274, "neural_compressor.strategy.strategy.TuneStrategyMeta", false]], "tuningconfig (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.TuningConfig", false]], "tuningcriterion (class in neural_compressor.config)": [[195, "neural_compressor.config.TuningCriterion", false]], "tuningitem (class in neural_compressor.strategy.utils.tuning_space)": [[278, "neural_compressor.strategy.utils.tuning_space.TuningItem", false]], "tuninglogger (class in neural_compressor.common.utils.logger)": [[159, "neural_compressor.common.utils.logger.TuningLogger", false]], "tuningmonitor (class in neural_compressor.common.base_tuning)": [[153, "neural_compressor.common.base_tuning.TuningMonitor", false]], "tuningorder (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.TuningOrder", false]], "tuningparam (class in neural_compressor.common.tuning_param)": [[156, "neural_compressor.common.tuning_param.TuningParam", false]], "tuningsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.TuningSampler", false]], "tuningspace (class in neural_compressor.strategy.utils.tuning_space)": [[278, "neural_compressor.strategy.utils.tuning_space.TuningSpace", false]], "unarydirect8bitoperator (class in neural_compressor.adaptor.ox_utils.operators.unary_op)": [[27, "neural_compressor.adaptor.ox_utils.operators.unary_op.UnaryDirect8BitOperator", false]], "unaryoperator (class in neural_compressor.adaptor.ox_utils.operators.unary_op)": [[27, "neural_compressor.adaptor.ox_utils.operators.unary_op.UnaryOperator", false]], "unicoderegex (class in neural_compressor.metric.bleu)": [[227, "neural_compressor.metric.bleu.UnicodeRegex", false]], "unpackedweightonlylinearparams (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.UnpackedWeightOnlyLinearParams", false]], "unpicklingerror": [[138, "neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle.UnpicklingError", false], [397, "neural_compressor.torch.algorithms.layer_wise.modified_pickle.UnpicklingError", false]], "update_module() (in module neural_compressor.adaptor.torch_utils.layer_wise_quant.utils)": [[141, "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils.update_module", false]], "update_module() (in module neural_compressor.torch.algorithms.layer_wise.utils)": [[398, "neural_compressor.torch.algorithms.layer_wise.utils.update_module", false]], "update_params() (in module neural_compressor.compression.pruner.utils)": [[192, "neural_compressor.compression.pruner.utils.update_params", false]], "update_sq_scale() (in module neural_compressor.adaptor.torch_utils.util)": [[145, "neural_compressor.adaptor.torch_utils.util.update_sq_scale", false]], "update_sq_scale() (in module neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.update_sq_scale", false]], "valid_keras_format() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.valid_keras_format", false]], "valid_reshape_inputs() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn)": [[52, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn.valid_reshape_inputs", false]], "valid_reshape_inputs() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in)": [[53, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in.valid_reshape_inputs", false]], "valid_reshape_inputs() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn)": [[324, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn.valid_reshape_inputs", false]], "valid_reshape_inputs() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in)": [[325, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in.valid_reshape_inputs", false]], "validate_and_inference_input_output() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.validate_and_inference_input_output", false]], "validate_and_inference_input_output() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.validate_and_inference_input_output", false]], "validate_graph_node() (in module neural_compressor.model.tensorflow_model)": [[243, "neural_compressor.model.tensorflow_model.validate_graph_node", false]], "validate_graph_node() (in module neural_compressor.tensorflow.utils.model_wrappers)": [[390, "neural_compressor.tensorflow.utils.model_wrappers.validate_graph_node", false]], "validate_modules() (in module neural_compressor.torch.utils.utility)": [[448, "neural_compressor.torch.utils.utility.validate_modules", false]], "valueinfo (class in neural_compressor.adaptor.ox_utils.util)": [[30, "neural_compressor.adaptor.ox_utils.util.ValueInfo", false]], "values_from_const() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn)": [[52, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn.values_from_const", false]], "values_from_const() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in)": [[53, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in.values_from_const", false]], "values_from_const() (in module neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm)": [[55, "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm.values_from_const", false]], "values_from_const() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn)": [[324, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn.values_from_const", false]], "values_from_const() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in)": [[325, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in.values_from_const", false]], "values_from_const() (in module neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm)": [[327, "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm.values_from_const", false]], "version1_eq_version2() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.version1_eq_version2", false]], "version1_eq_version2() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.version1_eq_version2", false]], "version1_eq_version2() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.version1_eq_version2", false]], "version1_gt_version2() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.version1_gt_version2", false]], "version1_gt_version2() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.version1_gt_version2", false]], "version1_gt_version2() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.version1_gt_version2", false]], "version1_gte_version2() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.version1_gte_version2", false]], "version1_gte_version2() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.version1_gte_version2", false]], "version1_gte_version2() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.version1_gte_version2", false]], "version1_lt_version2() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.version1_lt_version2", false]], "version1_lt_version2() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.version1_lt_version2", false]], "version1_lt_version2() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.version1_lt_version2", false]], "version1_lte_version2() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.version1_lte_version2", false]], "version1_lte_version2() (in module neural_compressor.tensorflow.utils.utility)": [[391, "neural_compressor.tensorflow.utils.utility.version1_lte_version2", false]], "version1_lte_version2() (in module neural_compressor.utils.utility)": [[466, "neural_compressor.utils.utility.version1_lte_version2", false]], "w8a8pt2equantizer (class in neural_compressor.torch.algorithms.pt2e_quant.core)": [[405, "neural_compressor.torch.algorithms.pt2e_quant.core.W8A8PT2EQuantizer", false]], "warn() (in module neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.warn", false]], "warning() (in module neural_compressor.utils.logger)": [[463, "neural_compressor.utils.logger.warning", false]], "weightcorrection (class in neural_compressor.algorithm.weight_correction)": [[150, "neural_compressor.algorithm.weight_correction.WeightCorrection", false]], "weightonlylinear (class in neural_compressor.torch.algorithms.weight_only.modules)": [[429, "neural_compressor.torch.algorithms.weight_only.modules.WeightOnlyLinear", false]], "weightonlyquantsampler (class in neural_compressor.strategy.utils.tuning_sampler)": [[277, "neural_compressor.strategy.utils.tuning_sampler.WeightOnlyQuantSampler", false]], "weightpruningconfig (class in neural_compressor.config)": [[195, "neural_compressor.config.WeightPruningConfig", false]], "weightsdetails (class in neural_compressor.utils.weights_details)": [[467, "neural_compressor.utils.weights_details.WeightsDetails", false]], "weightsstatistics (class in neural_compressor.utils.weights_details)": [[467, "neural_compressor.utils.weights_details.WeightsStatistics", false]], "whitespace_tokenize() (in module neural_compressor.data.transforms.tokenization)": [[224, "neural_compressor.data.transforms.tokenization.whitespace_tokenize", false]], "woqmodelloader (class in neural_compressor.torch.algorithms.weight_only.save_load)": [[431, "neural_compressor.torch.algorithms.weight_only.save_load.WOQModelLoader", false]], "wordpiecetokenizer (class in neural_compressor.data.transforms.tokenization)": [[224, "neural_compressor.data.transforms.tokenization.WordpieceTokenizer", false]], "wrapmxnetmetric (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.WrapMXNetMetric", false]], "wraponnxrtmetric (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.WrapONNXRTMetric", false]], "wrapperlayer (class in neural_compressor.torch.algorithms.smooth_quant.utility)": [[413, "neural_compressor.torch.algorithms.smooth_quant.utility.WrapperLayer", false]], "wrappytorchmetric (class in neural_compressor.metric.metric)": [[234, "neural_compressor.metric.metric.WrapPyTorchMetric", false]], "write_graph() (in module neural_compressor.adaptor.tf_utils.util)": [[133, "neural_compressor.adaptor.tf_utils.util.write_graph", false]], "write_graph() (in module neural_compressor.tensorflow.quantization.utils.utility)": [[385, "neural_compressor.tensorflow.quantization.utils.utility.write_graph", false]], "xpu_accelerator (class in neural_compressor.torch.utils.auto_accelerator)": [[443, "neural_compressor.torch.utils.auto_accelerator.XPU_Accelerator", false]]}, "objects": {"": [[226, 0, 0, "-", "neural_compressor"]], "neural_compressor": [[148, 0, 0, "-", "algorithm"], [151, 0, 0, "-", "benchmark"], [155, 0, 0, "-", "common"], [195, 0, 0, "-", "config"], [196, 0, 0, "-", "contrib"], [220, 0, 0, "-", "data"], [233, 0, 0, "-", "metric"], [235, 0, 0, "-", "mix_precision"], [237, 0, 0, "-", "model"], [245, 0, 0, "-", "objective"], [246, 0, 0, "-", "profiling"], [262, 0, 0, "-", "quantization"], [270, 0, 0, "-", "strategy"], [290, 0, 0, "-", "tensorflow"], [436, 0, 0, "-", "torch"], [449, 0, 0, "-", "training"], [460, 0, 0, "-", "utils"], [468, 0, 0, "-", "version"]], "neural_compressor.adaptor": [[0, 0, 0, "-", "mxnet_utils"], [4, 0, 0, "-", "ox_utils"], [32, 0, 0, "-", "tensorflow"], [96, 0, 0, "-", "tf_utils"], [136, 0, 0, "-", "torch_utils"]], "neural_compressor.adaptor.mxnet_utils": [[1, 0, 0, "-", "util"]], "neural_compressor.adaptor.mxnet_utils.util": [[1, 1, 1, "", "CalibCollector"], [1, 1, 1, "", "CalibData"], [1, 1, 1, "", "CollectorBase"], [1, 1, 1, "", "DataIterLoader"], [1, 1, 1, "", "DataLoaderWrap"], [1, 1, 1, "", "NameCollector"], [1, 1, 1, "", "OpType"], [1, 1, 1, "", "TensorCollector"], [1, 2, 1, "", "amp_convert"], [1, 2, 1, "", "calib_model"], [1, 2, 1, "", "check_mx_version"], [1, 2, 1, "", "combine_capabilities"], [1, 2, 1, "", "create_data_example"], [1, 2, 1, "", "distribute_calib_tensors"], [1, 2, 1, "", "ensure_list"], [1, 2, 1, "", "fuse"], [1, 2, 1, "", "get_framework_name"], [1, 2, 1, "", "is_model_quantized"], [1, 2, 1, "", "isiterable"], [1, 2, 1, "", "make_module"], [1, 2, 1, "", "make_nc_model"], [1, 2, 1, "", "make_symbol_block"], [1, 2, 1, "", "ndarray_to_device"], [1, 2, 1, "", "parse_tune_config"], [1, 2, 1, "", "prepare_dataloader"], [1, 2, 1, "", "prepare_model"], [1, 2, 1, "", "prepare_model_data"], [1, 2, 1, "", "quantize_sym_model"], [1, 2, 1, "", "query_quantizable_nodes"], [1, 2, 1, "", "run_forward"]], "neural_compressor.adaptor.ox_utils": [[2, 0, 0, "-", "calibration"], [3, 0, 0, "-", "calibrator"], [16, 0, 0, "-", "operators"], [28, 0, 0, "-", "quantizer"], [29, 0, 0, "-", "smooth_quant"], [30, 0, 0, "-", "util"], [31, 0, 0, "-", "weight_only"]], "neural_compressor.adaptor.ox_utils.calibration": [[2, 1, 1, "", "ONNXRTAugment"]], "neural_compressor.adaptor.ox_utils.calibrator": [[3, 1, 1, "", "CalibratorBase"], [3, 1, 1, "", "HistogramCollector"], [3, 1, 1, "", "KLCalibrator"], [3, 1, 1, "", "MinMaxCalibrator"], [3, 1, 1, "", "PercentileCalibrator"], [3, 2, 1, "", "calib_registry"], [3, 2, 1, "", "smooth_distribution"]], "neural_compressor.adaptor.ox_utils.operators": [[5, 0, 0, "-", "activation"], [6, 0, 0, "-", "argmax"], [7, 0, 0, "-", "attention"], [8, 0, 0, "-", "binary_op"], [9, 0, 0, "-", "concat"], [10, 0, 0, "-", "conv"], [11, 0, 0, "-", "direct_q8"], [12, 0, 0, "-", "embed_layernorm"], [13, 0, 0, "-", "gather"], [14, 0, 0, "-", "gavgpool"], [15, 0, 0, "-", "gemm"], [17, 0, 0, "-", "lstm"], [18, 0, 0, "-", "matmul"], [19, 0, 0, "-", "maxpool"], [20, 0, 0, "-", "norm"], [21, 0, 0, "-", "ops"], [22, 0, 0, "-", "pad"], [23, 0, 0, "-", "pooling"], [24, 0, 0, "-", "reduce"], [25, 0, 0, "-", "resize"], [26, 0, 0, "-", "split"], [27, 0, 0, "-", "unary_op"]], "neural_compressor.adaptor.ox_utils.operators.activation": [[5, 1, 1, "", "ActivationOperator"], [5, 1, 1, "", "Float16ActivationOperator"], [5, 1, 1, "", "QActivationOperator"], [5, 1, 1, "", "RemovableActivationOperator"]], "neural_compressor.adaptor.ox_utils.operators.argmax": [[6, 1, 1, "", "ArgMaxOperator"], [6, 1, 1, "", "QArgMaxOperator"]], "neural_compressor.adaptor.ox_utils.operators.attention": [[7, 1, 1, "", "AttentionOperator"], [7, 1, 1, "", "QAttentionOperator"]], "neural_compressor.adaptor.ox_utils.operators.binary_op": [[8, 1, 1, "", "BinaryDirect8BitOperator"], [8, 1, 1, "", "BinaryOperator"], [8, 1, 1, "", "Float16BinaryOperator"], [8, 1, 1, "", "QBinaryOperator"]], "neural_compressor.adaptor.ox_utils.operators.concat": [[9, 1, 1, "", "ConcatOperator"], [9, 1, 1, "", "QConcatOperator"]], "neural_compressor.adaptor.ox_utils.operators.conv": [[10, 1, 1, "", "ConvOperator"], [10, 1, 1, "", "QConvOperator"]], "neural_compressor.adaptor.ox_utils.operators.direct_q8": [[11, 1, 1, "", "Direct8BitOperator"], [11, 1, 1, "", "QDirectOperator"]], "neural_compressor.adaptor.ox_utils.operators.embed_layernorm": [[12, 1, 1, "", "EmbedLayerNormalizationOperator"], [12, 1, 1, "", "QEmbedLayerNormalizationOperator"]], "neural_compressor.adaptor.ox_utils.operators.gather": [[13, 1, 1, "", "GatherOperator"], [13, 1, 1, "", "QGatherOperator"]], "neural_compressor.adaptor.ox_utils.operators.gavgpool": [[14, 1, 1, "", "GlobalAveragePoolOperator"], [14, 1, 1, "", "QGlobalAveragePoolOperator"]], "neural_compressor.adaptor.ox_utils.operators.gemm": [[15, 1, 1, "", "GemmOperator"], [15, 1, 1, "", "QGemmOperator"]], "neural_compressor.adaptor.ox_utils.operators.lstm": [[17, 1, 1, "", "LSTMOperator"]], "neural_compressor.adaptor.ox_utils.operators.matmul": [[18, 1, 1, "", "FusedMatMulOperator"], [18, 1, 1, "", "MatMulOperator"], [18, 1, 1, "", "QMatMulOperator"]], "neural_compressor.adaptor.ox_utils.operators.maxpool": [[19, 1, 1, "", "MaxPoolOperator"], [19, 1, 1, "", "QMaxPoolOperator"]], "neural_compressor.adaptor.ox_utils.operators.norm": [[20, 1, 1, "", "BatchNormalizationOperator"], [20, 1, 1, "", "NormalizationOperator"]], "neural_compressor.adaptor.ox_utils.operators.ops": [[21, 1, 1, "", "Operator"], [21, 1, 1, "", "QOperator"], [21, 2, 1, "", "op_registry"], [21, 2, 1, "", "qop_registry"]], "neural_compressor.adaptor.ox_utils.operators.pad": [[22, 1, 1, "", "PadOperator"], [22, 1, 1, "", "QPadOperator"]], "neural_compressor.adaptor.ox_utils.operators.pooling": [[23, 1, 1, "", "PoolOperator"], [23, 1, 1, "", "QPoolOperator"]], "neural_compressor.adaptor.ox_utils.operators.reduce": [[24, 1, 1, "", "ReduceMinMaxOperator"], [24, 1, 1, "", "ReduceOperator"]], "neural_compressor.adaptor.ox_utils.operators.resize": [[25, 1, 1, "", "QResizeOperator"], [25, 1, 1, "", "ResizeOperator"]], "neural_compressor.adaptor.ox_utils.operators.split": [[26, 1, 1, "", "QSplitOperator"], [26, 1, 1, "", "SplitOperator"]], "neural_compressor.adaptor.ox_utils.operators.unary_op": [[27, 1, 1, "", "UnaryDirect8BitOperator"], [27, 1, 1, "", "UnaryOperator"]], "neural_compressor.adaptor.ox_utils.quantizer": [[28, 1, 1, "", "Quantizer"]], "neural_compressor.adaptor.ox_utils.smooth_quant": [[29, 1, 1, "", "ORTSmoothQuant"], [29, 2, 1, "", "get_quant_dequant_output"], [29, 2, 1, "", "make_sub_graph"], [29, 2, 1, "", "quant_dequant_data"]], "neural_compressor.adaptor.ox_utils.util": [[30, 1, 1, "", "QuantFormat"], [30, 1, 1, "", "QuantType"], [30, 1, 1, "", "QuantizationMode"], [30, 1, 1, "", "QuantizedInitializer"], [30, 1, 1, "", "QuantizedValue"], [30, 1, 1, "", "QuantizedValueType"], [30, 1, 1, "", "ValueInfo"], [30, 2, 1, "", "attribute_to_kwarg"], [30, 2, 1, "", "calculate_scale_zp"], [30, 2, 1, "", "cast_tensor"], [30, 2, 1, "", "collate_preds"], [30, 2, 1, "", "dequantize_data"], [30, 2, 1, "", "dequantize_data_with_scale_zero"], [30, 2, 1, "", "dtype_to_name"], [30, 2, 1, "", "find_by_name"], [30, 2, 1, "", "float_to_bfloat16"], [30, 2, 1, "", "float_to_float16"], [30, 2, 1, "", "get_node_original_name"], [30, 2, 1, "", "infer_shapes"], [30, 2, 1, "", "is_B_transposed"], [30, 2, 1, "", "make_dquant_node"], [30, 2, 1, "", "make_quant_node"], [30, 2, 1, "", "quantize_data"], [30, 2, 1, "", "quantize_data_per_channel"], [30, 2, 1, "", "quantize_data_with_scale_zero"], [30, 2, 1, "", "quantize_nparray"], [30, 2, 1, "", "remove_init_from_model_input"], [30, 2, 1, "", "simple_progress_bar"], [30, 2, 1, "", "split_shared_bias"], [30, 2, 1, "", "to_numpy"], [30, 2, 1, "", "trt_env_setup"]], "neural_compressor.adaptor.ox_utils.weight_only": [[31, 2, 1, "", "apply_awq_clip"], [31, 2, 1, "", "apply_awq_scale"], [31, 2, 1, "", "awq_quantize"], [31, 2, 1, "", "get_blob_size"], [31, 2, 1, "", "get_weight_scale"], [31, 2, 1, "", "gptq"], [31, 2, 1, "", "gptq_quantize"], [31, 2, 1, "", "make_matmul_weight_only_node"], [31, 2, 1, "", "pad_tensor"], [31, 2, 1, "", "prepare_inputs"], [31, 2, 1, "", "qdq_tensor"], [31, 2, 1, "", "quant_tensor"], [31, 2, 1, "", "rtn_quantize"]], "neural_compressor.adaptor.tensorflow": [[32, 1, 1, "", "TensorFlowAdaptor"], [32, 1, 1, "", "TensorflowQuery"], [32, 1, 1, "", "Tensorflow_ITEXAdaptor"]], "neural_compressor.adaptor.tf_utils": [[33, 0, 0, "-", "graph_converter"], [34, 0, 0, "-", "graph_converter_without_calib"], [72, 0, 0, "-", "graph_rewriter"], [95, 0, 0, "-", "graph_util"], [97, 0, 0, "-", "quantize_graph"], [124, 0, 0, "-", "quantize_graph_common"], [125, 0, 0, "-", "smooth_quant_calibration"], [126, 0, 0, "-", "smooth_quant_scaler"], [127, 0, 0, "-", "tf2onnx_converter"], [130, 0, 0, "-", "transform_graph"], [133, 0, 0, "-", "util"]], "neural_compressor.adaptor.tf_utils.graph_converter": [[33, 1, 1, "", "GraphConverter"]], "neural_compressor.adaptor.tf_utils.graph_converter_without_calib": [[34, 1, 1, "", "GraphConverterWithoutCalib"]], "neural_compressor.adaptor.tf_utils.graph_rewriter": [[37, 0, 0, "-", "bf16"], [61, 0, 0, "-", "generic"], [71, 0, 0, "-", "graph_base"], [80, 0, 0, "-", "int8"], [86, 0, 0, "-", "onnx"], [91, 0, 0, "-", "qdq"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16": [[35, 0, 0, "-", "bf16_convert"], [36, 0, 0, "-", "dequantize_cast_optimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert": [[35, 1, 1, "", "BF16Convert"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer": [[36, 1, 1, "", "DequantizeCastOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic": [[38, 0, 0, "-", "convert_add_to_biasadd"], [39, 0, 0, "-", "convert_layout"], [40, 0, 0, "-", "convert_leakyrelu"], [41, 0, 0, "-", "convert_nan_to_random"], [42, 0, 0, "-", "convert_placeholder_to_const"], [43, 0, 0, "-", "dilated_contraction"], [44, 0, 0, "-", "dummy_biasadd"], [45, 0, 0, "-", "expanddims_optimizer"], [46, 0, 0, "-", "fetch_weight_from_reshape"], [47, 0, 0, "-", "fold_batch_norm"], [48, 0, 0, "-", "fold_constant"], [49, 0, 0, "-", "fuse_biasadd_add"], [50, 0, 0, "-", "fuse_column_wise_mul"], [51, 0, 0, "-", "fuse_conv_with_math"], [52, 0, 0, "-", "fuse_decomposed_bn"], [53, 0, 0, "-", "fuse_decomposed_in"], [54, 0, 0, "-", "fuse_gelu"], [55, 0, 0, "-", "fuse_layer_norm"], [56, 0, 0, "-", "fuse_pad_with_conv"], [57, 0, 0, "-", "fuse_pad_with_fp32_conv"], [58, 0, 0, "-", "fuse_reshape_transpose"], [59, 0, 0, "-", "graph_cse_optimizer"], [60, 0, 0, "-", "grappler_pass"], [62, 0, 0, "-", "insert_print_node"], [63, 0, 0, "-", "move_squeeze_after_relu"], [64, 0, 0, "-", "pre_optimize"], [65, 0, 0, "-", "remove_training_nodes"], [66, 0, 0, "-", "rename_batch_norm"], [67, 0, 0, "-", "split_shared_input"], [68, 0, 0, "-", "strip_equivalent_nodes"], [69, 0, 0, "-", "strip_unused_nodes"], [70, 0, 0, "-", "switch_optimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd": [[38, 1, 1, "", "ConvertAddToBiasAddOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout": [[39, 1, 1, "", "ConvertLayoutOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu": [[40, 1, 1, "", "ConvertLeakyReluOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random": [[41, 1, 1, "", "ConvertNanToRandom"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const": [[42, 1, 1, "", "ConvertPlaceholderToConst"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction": [[43, 1, 1, "", "DilatedContraction"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd": [[44, 1, 1, "", "InjectDummyBiasAddOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer": [[45, 1, 1, "", "ExpandDimsOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape": [[46, 1, 1, "", "FetchWeightFromReshapeOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm": [[47, 1, 1, "", "FoldBatchNormNodesOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant": [[48, 1, 1, "", "GraphFoldConstantOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add": [[49, 1, 1, "", "FuseBiasAddAndAddOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul": [[50, 1, 1, "", "FuseColumnWiseMulOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math": [[51, 1, 1, "", "FuseConvWithMathOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn": [[52, 1, 1, "", "FuseDecomposedBNOptimizer"], [52, 2, 1, "", "bypass_reshape"], [52, 2, 1, "", "get_const_dim_count"], [52, 2, 1, "", "node_from_map"], [52, 2, 1, "", "node_name_from_input"], [52, 2, 1, "", "valid_reshape_inputs"], [52, 2, 1, "", "values_from_const"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in": [[53, 1, 1, "", "FuseDecomposedINOptimizer"], [53, 2, 1, "", "bypass_reshape"], [53, 2, 1, "", "get_const_dim_count"], [53, 2, 1, "", "node_from_map"], [53, 2, 1, "", "node_name_from_input"], [53, 2, 1, "", "valid_reshape_inputs"], [53, 2, 1, "", "values_from_const"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu": [[54, 1, 1, "", "FuseGeluOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm": [[55, 1, 1, "", "FuseLayerNormOptimizer"], [55, 2, 1, "", "node_from_map"], [55, 2, 1, "", "node_name_from_input"], [55, 2, 1, "", "values_from_const"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv": [[56, 1, 1, "", "FusePadWithConv2DOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv": [[57, 1, 1, "", "FusePadWithFP32Conv2DOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose": [[58, 1, 1, "", "FuseTransposeReshapeOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer": [[59, 1, 1, "", "GraphCseOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass": [[60, 1, 1, "", "GrapplerOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node": [[62, 1, 1, "", "InsertPrintMinMaxNode"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu": [[63, 1, 1, "", "MoveSqueezeAfterReluOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize": [[64, 1, 1, "", "PreOptimization"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes": [[65, 1, 1, "", "RemoveTrainingNodesOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm": [[66, 1, 1, "", "RenameBatchNormOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input": [[67, 1, 1, "", "SplitSharedInputOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes": [[68, 1, 1, "", "StripEquivalentNodesOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes": [[69, 1, 1, "", "StripUnusedNodesOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer": [[70, 1, 1, "", "SwitchOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base": [[71, 1, 1, "", "GraphRewriterBase"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8": [[73, 0, 0, "-", "freeze_fake_quant"], [74, 0, 0, "-", "freeze_value"], [75, 0, 0, "-", "freeze_value_without_calib"], [76, 0, 0, "-", "fuse_conv_redundant_dequantize"], [77, 0, 0, "-", "fuse_conv_requantize"], [78, 0, 0, "-", "fuse_matmul_redundant_dequantize"], [79, 0, 0, "-", "fuse_matmul_requantize"], [81, 0, 0, "-", "meta_op_optimizer"], [82, 0, 0, "-", "post_hostconst_converter"], [83, 0, 0, "-", "post_quantized_op_cse"], [84, 0, 0, "-", "rnn_convert"], [85, 0, 0, "-", "scale_propagation"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant": [[73, 1, 1, "", "FreezeFakeQuantOpOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value": [[74, 1, 1, "", "FreezeValueTransformer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib": [[75, 1, 1, "", "FreezeValueWithoutCalibTransformer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize": [[76, 1, 1, "", "FuseConvRedundantDequantizeTransformer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize": [[77, 1, 1, "", "FuseConvRequantizeTransformer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize": [[78, 1, 1, "", "FuseMatMulRedundantDequantizeTransformer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize": [[79, 1, 1, "", "FuseMatMulRequantizeDequantizeNewAPITransformer"], [79, 1, 1, "", "FuseMatMulRequantizeDequantizeTransformer"], [79, 1, 1, "", "FuseMatMulRequantizeNewAPITransformer"], [79, 1, 1, "", "FuseMatMulRequantizeTransformer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer": [[81, 1, 1, "", "MetaInfoChangingMemOpOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter": [[82, 1, 1, "", "PostHostConstConverter"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse": [[83, 1, 1, "", "PostCseOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert": [[84, 1, 1, "", "QuantizedRNNConverter"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation": [[85, 1, 1, "", "ScaleProPagationTransformer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx": [[87, 0, 0, "-", "onnx_graph"], [88, 0, 0, "-", "onnx_node"], [89, 0, 0, "-", "onnx_schema"], [90, 0, 0, "-", "tf2onnx_utils"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph": [[87, 1, 1, "", "OnnxGraph"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node": [[88, 1, 1, "", "OnnxNode"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema": [[89, 1, 1, "", "OnnxOpSchema"], [89, 2, 1, "", "get_max_supported_opset_version"], [89, 2, 1, "", "get_schema"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils": [[90, 1, 1, "", "SeqType"], [90, 2, 1, "", "add_port_to_name"], [90, 2, 1, "", "are_shapes_equal"], [90, 2, 1, "", "assert_error"], [90, 2, 1, "", "compute_const_folding_using_tf"], [90, 2, 1, "", "convert_tensorflow_tensor_to_onnx"], [90, 2, 1, "", "find_opset"], [90, 2, 1, "", "get_index_from_strided_slice_of_shape"], [90, 2, 1, "", "get_subgraphs_from_onnx"], [90, 2, 1, "", "get_tensorflow_node_attr"], [90, 2, 1, "", "get_tensorflow_node_shape_attr"], [90, 2, 1, "", "get_tensorflow_tensor_data"], [90, 2, 1, "", "get_tensorflow_tensor_shape"], [90, 2, 1, "", "infer_onnx_shape_dtype"], [90, 2, 1, "", "initialize_name_counter"], [90, 2, 1, "", "is_list_or_tuple"], [90, 2, 1, "", "is_onnx_domain"], [90, 2, 1, "", "make_onnx_inputs_outputs"], [90, 2, 1, "", "make_onnx_shape"], [90, 2, 1, "", "map_numpy_to_onnx_dtype"], [90, 2, 1, "", "map_onnx_to_numpy_type"], [90, 2, 1, "", "map_tensorflow_dtype"], [90, 2, 1, "", "read_tensorflow_node_attrs"], [90, 2, 1, "", "save_protobuf"], [90, 2, 1, "", "set_name"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq": [[92, 0, 0, "-", "insert_qdq_pattern"], [93, 0, 0, "-", "merge_duplicated_qdq"], [94, 0, 0, "-", "share_qdq_y_pattern"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern": [[92, 1, 1, "", "GenerateGraphWithQDQPattern"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq": [[93, 1, 1, "", "MergeDuplicatedQDQOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern": [[94, 1, 1, "", "ShareQDQForItexYPatternOptimizer"]], "neural_compressor.adaptor.tf_utils.graph_util": [[95, 1, 1, "", "GraphAnalyzer"], [95, 1, 1, "", "GraphRewriterHelper"]], "neural_compressor.adaptor.tf_utils.quantize_graph": [[99, 0, 0, "-", "qat"], [115, 0, 0, "-", "qdq"], [117, 0, 0, "-", "quantize_graph_base"], [118, 0, 0, "-", "quantize_graph_bn"], [119, 0, 0, "-", "quantize_graph_concatv2"], [120, 0, 0, "-", "quantize_graph_conv"], [121, 0, 0, "-", "quantize_graph_for_intel_cpu"], [122, 0, 0, "-", "quantize_graph_matmul"], [123, 0, 0, "-", "quantize_graph_pooling"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat": [[98, 0, 0, "-", "fake_quantize"], [100, 0, 0, "-", "quantize_config"], [101, 0, 0, "-", "quantize_helper"], [102, 0, 0, "-", "quantize_layers"], [107, 0, 0, "-", "quantize_wrapper"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize": [[98, 1, 1, "", "FakeQuantize"], [98, 1, 1, "", "FakeQuantizeBase"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config": [[100, 1, 1, "", "QuantizeConfig"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper": [[101, 2, 1, "", "init_quantize_config"], [101, 2, 1, "", "qat_clone_function"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers": [[103, 0, 0, "-", "optimize_layer"], [104, 0, 0, "-", "quantize_layer_add"], [105, 0, 0, "-", "quantize_layer_base"], [106, 0, 0, "-", "quantize_layer_bn"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer": [[103, 2, 1, "", "config_quantizable_layers"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add": [[104, 1, 1, "", "QuantizeLayerAdd"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base": [[105, 1, 1, "", "QuantizeLayerBase"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn": [[106, 1, 1, "", "QuantizeLayerBatchNormalization"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper": [[107, 1, 1, "", "QuantizeWrapper"], [107, 1, 1, "", "QuantizeWrapperBase"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq": [[108, 0, 0, "-", "fuse_qdq_bn"], [109, 0, 0, "-", "fuse_qdq_concatv2"], [110, 0, 0, "-", "fuse_qdq_conv"], [111, 0, 0, "-", "fuse_qdq_deconv"], [112, 0, 0, "-", "fuse_qdq_in"], [113, 0, 0, "-", "fuse_qdq_matmul"], [114, 0, 0, "-", "fuse_qdq_pooling"], [116, 0, 0, "-", "optimize_qdq"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn": [[108, 1, 1, "", "FuseNodeStartWithFusedBatchNormV3"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2": [[109, 1, 1, "", "FuseNodeStartWithConcatV2"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv": [[110, 1, 1, "", "FuseNodeStartWithConv2d"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv": [[111, 1, 1, "", "FuseNodeStartWithDeconv2d"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in": [[112, 1, 1, "", "FuseNodeStartWithFusedInstanceNorm"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul": [[113, 1, 1, "", "FuseNodeStartWithMatmul"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling": [[114, 1, 1, "", "FuseNodeStartWithPooling"]], "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq": [[116, 1, 1, "", "OptimizeQDQGraph"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base": [[117, 1, 1, "", "QuantizeGraphBase"], [117, 1, 1, "", "QuantizeNodeBase"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn": [[118, 1, 1, "", "FuseNodeStartWithFusedBatchNormV3"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2": [[119, 1, 1, "", "FuseNodeStartWithConcatV2"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv": [[120, 1, 1, "", "FuseNodeStartWithConv2d"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu": [[121, 1, 1, "", "QuantizeGraphForIntel"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul": [[122, 1, 1, "", "FuseNodeStartWithMatmul"]], "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling": [[123, 1, 1, "", "FuseNodeStartWithPooling"]], "neural_compressor.adaptor.tf_utils.quantize_graph_common": [[124, 1, 1, "", "QuantizeGraphHelper"]], "neural_compressor.adaptor.tf_utils.smooth_quant_calibration": [[125, 1, 1, "", "SmoothQuantCalibration"], [125, 1, 1, "", "SmoothQuantCalibrationLLM"]], "neural_compressor.adaptor.tf_utils.smooth_quant_scaler": [[126, 1, 1, "", "SmoothQuantScaler"], [126, 1, 1, "", "SmoothQuantScalerLLM"]], "neural_compressor.adaptor.tf_utils.tf2onnx_converter": [[127, 1, 1, "", "TensorflowQDQToOnnxQDQConverter"]], "neural_compressor.adaptor.tf_utils.transform_graph": [[128, 0, 0, "-", "bias_correction"], [129, 0, 0, "-", "graph_transform_base"], [131, 0, 0, "-", "insert_logging"], [132, 0, 0, "-", "rerange_quantized_concat"]], "neural_compressor.adaptor.tf_utils.transform_graph.bias_correction": [[128, 1, 1, "", "BiasCorrection"]], "neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base": [[129, 1, 1, "", "GraphTransformBase"]], "neural_compressor.adaptor.tf_utils.transform_graph.insert_logging": [[131, 1, 1, "", "InsertLogging"]], "neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat": [[132, 1, 1, "", "RerangeQuantizedConcat"]], "neural_compressor.adaptor.tf_utils.util": [[133, 2, 1, "", "apply_inlining"], [133, 2, 1, "", "collate_tf_preds"], [133, 2, 1, "", "construct_function_from_graph_def"], [133, 2, 1, "", "disable_random"], [133, 2, 1, "", "fix_ref_type_of_graph_def"], [133, 2, 1, "", "generate_feed_dict"], [133, 2, 1, "", "get_estimator_graph"], [133, 2, 1, "", "get_graph_def"], [133, 2, 1, "", "get_input_output_node_names"], [133, 2, 1, "", "get_model_input_shape"], [133, 2, 1, "", "get_tensor_by_name"], [133, 2, 1, "", "get_tensor_val_from_graph_node"], [133, 2, 1, "", "get_weight_from_input_tensor"], [133, 2, 1, "", "int8_node_name_reverse"], [133, 2, 1, "", "is_ckpt_format"], [133, 2, 1, "", "is_saved_model_format"], [133, 2, 1, "", "iterator_sess_run"], [133, 2, 1, "", "parse_saved_model"], [133, 2, 1, "", "read_graph"], [133, 2, 1, "", "reconstruct_saved_model"], [133, 2, 1, "", "strip_equivalent_nodes"], [133, 2, 1, "", "strip_unused_nodes"], [133, 2, 1, "", "version1_eq_version2"], [133, 2, 1, "", "version1_gt_version2"], [133, 2, 1, "", "version1_gte_version2"], [133, 2, 1, "", "version1_lt_version2"], [133, 2, 1, "", "version1_lte_version2"], [133, 2, 1, "", "write_graph"]], "neural_compressor.adaptor.torch_utils": [[134, 0, 0, "-", "bf16_convert"], [135, 0, 0, "-", "hawq_metric"], [137, 0, 0, "-", "layer_wise_quant"], [142, 0, 0, "-", "model_wrapper"], [143, 0, 0, "-", "pattern_detector"], [144, 0, 0, "-", "symbolic_trace"], [145, 0, 0, "-", "util"]], "neural_compressor.adaptor.torch_utils.bf16_convert": [[134, 1, 1, "", "BF16ModuleWrapper"], [134, 2, 1, "", "Convert"]], "neural_compressor.adaptor.torch_utils.hawq_metric": [[135, 1, 1, "", "HessianTrace"], [135, 1, 1, "", "Node_collector"], [135, 2, 1, "", "compare_weights"], [135, 2, 1, "", "hawq_top"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant": [[138, 0, 0, "-", "modified_pickle"], [139, 0, 0, "-", "quantize"], [140, 0, 0, "-", "torch_load"], [141, 0, 0, "-", "utils"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle": [[138, 3, 1, "", "PickleError"], [138, 3, 1, "", "PicklingError"], [138, 3, 1, "", "UnpicklingError"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize": [[139, 1, 1, "", "LayerWiseQuant"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load": [[140, 2, 1, "", "load"]], "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils": [[141, 2, 1, "", "dowload_hf_model"], [141, 2, 1, "", "get_children"], [141, 2, 1, "", "get_module"], [141, 2, 1, "", "get_named_children"], [141, 2, 1, "", "get_super_module_by_name"], [141, 2, 1, "", "load_empty_model"], [141, 2, 1, "", "load_layer_wise_quantized_model"], [141, 2, 1, "", "load_tensor"], [141, 2, 1, "", "load_tensor_from_shard"], [141, 2, 1, "", "update_module"]], "neural_compressor.adaptor.torch_utils.model_wrapper": [[142, 1, 1, "", "FakeAffineTensorQuantFunction"], [142, 1, 1, "", "MulLinear"], [142, 1, 1, "", "TEQLinearFakeQuant"]], "neural_compressor.adaptor.torch_utils.pattern_detector": [[143, 1, 1, "", "TransformerBasedModelBlockPatternDetector"]], "neural_compressor.adaptor.torch_utils.symbolic_trace": [[144, 2, 1, "", "symbolic_trace"], [144, 2, 1, "", "trace_and_fuse_sub_graph"]], "neural_compressor.adaptor.torch_utils.util": [[145, 2, 1, "", "append_attr"], [145, 2, 1, "", "auto_copy"], [145, 2, 1, "", "calculate_quant_min_max"], [145, 2, 1, "", "calibration"], [145, 2, 1, "", "check_cfg_and_qconfig"], [145, 2, 1, "", "collate_torch_preds"], [145, 2, 1, "", "collect_weight_info"], [145, 2, 1, "", "fetch_module"], [145, 2, 1, "", "forward_wrapper"], [145, 2, 1, "", "generate_activation_observer"], [145, 2, 1, "", "get_absorb_layers"], [145, 2, 1, "", "get_block_prefix"], [145, 2, 1, "", "get_depth"], [145, 2, 1, "", "get_dict_at_depth"], [145, 2, 1, "", "get_element_under_depth"], [145, 2, 1, "", "get_embedding_contiguous"], [145, 2, 1, "", "get_example_input"], [145, 2, 1, "", "get_fallback_order"], [145, 2, 1, "", "get_hidden_states"], [145, 2, 1, "", "get_module_input_output"], [145, 2, 1, "", "get_mse_order_per_fp32"], [145, 2, 1, "", "get_mse_order_per_int8"], [145, 2, 1, "", "get_op_type_by_name"], [145, 2, 1, "", "get_quantizable_ops_from_cfgs"], [145, 2, 1, "", "get_torch_version"], [145, 2, 1, "", "input2tuple"], [145, 2, 1, "", "is_fused_module"], [145, 2, 1, "", "match_datatype_pattern"], [145, 2, 1, "", "move_input_device"], [145, 2, 1, "", "paser_cfgs"], [145, 2, 1, "", "set_module"], [145, 2, 1, "", "simple_inference"], [145, 2, 1, "", "update_sq_scale"]], "neural_compressor.algorithm": [[146, 0, 0, "-", "algorithm"], [147, 0, 0, "-", "fast_bias_correction"], [149, 0, 0, "-", "smooth_quant"], [150, 0, 0, "-", "weight_correction"]], "neural_compressor.algorithm.algorithm": [[146, 1, 1, "", "ALGORITHMS"], [146, 1, 1, "", "Algorithm"], [146, 1, 1, "", "AlgorithmScheduler"], [146, 2, 1, "", "algorithm_registry"]], "neural_compressor.algorithm.fast_bias_correction": [[147, 1, 1, "", "FastBiasCorrection"]], "neural_compressor.algorithm.smooth_quant": [[149, 1, 1, "", "SmoothQuant"]], "neural_compressor.algorithm.weight_correction": [[150, 1, 1, "", "WeightCorrection"]], "neural_compressor.benchmark": [[151, 2, 1, "", "benchmark_with_raw_cmd"], [151, 2, 1, "", "call_one"], [151, 2, 1, "", "config_instance"], [151, 2, 1, "", "fit"], [151, 2, 1, "", "generate_prefix"], [151, 2, 1, "", "get_architecture"], [151, 2, 1, "", "get_bounded_threads"], [151, 2, 1, "", "get_core_ids"], [151, 2, 1, "", "get_physical_ids"], [151, 2, 1, "", "get_threads"], [151, 2, 1, "", "get_threads_per_core"], [151, 2, 1, "", "profile"], [151, 2, 1, "", "run_instance"], [151, 2, 1, "", "set_all_env_var"], [151, 2, 1, "", "set_env_var"], [151, 2, 1, "", "summary_benchmark"]], "neural_compressor.common": [[152, 0, 0, "-", "base_config"], [153, 0, 0, "-", "base_tuning"], [154, 0, 0, "-", "benchmark"], [156, 0, 0, "-", "tuning_param"], [158, 0, 0, "-", "utils"]], "neural_compressor.common.base_config": [[152, 1, 1, "", "BaseConfig"], [152, 1, 1, "", "ComposableConfig"], [152, 1, 1, "", "ConfigRegistry"], [152, 2, 1, "", "get_all_config_set_from_config_registry"], [152, 2, 1, "", "register_config"], [152, 2, 1, "", "register_supported_configs_for_fwk"]], "neural_compressor.common.base_config.BaseConfig": [[152, 4, 1, "", "name"], [152, 4, 1, "", "params_list"]], "neural_compressor.common.base_config.ComposableConfig": [[152, 4, 1, "", "config_list"]], "neural_compressor.common.base_tuning": [[153, 1, 1, "", "ConfigLoader"], [153, 1, 1, "", "ConfigSet"], [153, 1, 1, "", "EvaluationFuncWrapper"], [153, 1, 1, "", "Evaluator"], [153, 1, 1, "", "Sampler"], [153, 1, 1, "", "SequentialSampler"], [153, 1, 1, "", "TuningConfig"], [153, 1, 1, "", "TuningMonitor"], [153, 2, 1, "", "init_tuning"]], "neural_compressor.common.base_tuning.ConfigSet": [[153, 4, 1, "", "config_list"]], "neural_compressor.common.benchmark": [[154, 2, 1, "", "benchmark"], [154, 2, 1, "", "dump_numa_info"], [154, 2, 1, "", "format_list2str"], [154, 2, 1, "", "generate_prefix"], [154, 2, 1, "", "get_linux_numa_info"], [154, 2, 1, "", "get_numa_node"], [154, 2, 1, "", "get_reversed_numa_info"], [154, 2, 1, "", "get_windows_numa_info"], [154, 2, 1, "", "parse_str2list"], [154, 2, 1, "", "run_multi_instance_command"], [154, 2, 1, "", "set_cores_for_instance"], [154, 2, 1, "", "summary_latency_throughput"]], "neural_compressor.common.tuning_param": [[156, 1, 1, "", "ParamLevel"], [156, 1, 1, "", "TuningParam"]], "neural_compressor.common.tuning_param.ParamLevel": [[156, 4, 1, "", "MODEL_LEVEL"], [156, 4, 1, "", "OP_LEVEL"], [156, 4, 1, "", "OP_TYPE_LEVEL"]], "neural_compressor.common.utils": [[157, 0, 0, "-", "constants"], [159, 0, 0, "-", "logger"], [160, 0, 0, "-", "save_load"], [161, 0, 0, "-", "utility"]], "neural_compressor.common.utils.constants": [[157, 1, 1, "", "Mode"]], "neural_compressor.common.utils.logger": [[159, 1, 1, "", "Logger"], [159, 1, 1, "", "TuningLogger"]], "neural_compressor.common.utils.save_load": [[160, 2, 1, "", "load_config_mapping"], [160, 2, 1, "", "save_config_mapping"]], "neural_compressor.common.utils.utility": [[161, 1, 1, "", "CpuInfo"], [161, 1, 1, "", "LazyImport"], [161, 1, 1, "", "ProcessorType"], [161, 1, 1, "", "Statistics"], [161, 2, 1, "", "call_counter"], [161, 2, 1, "", "detect_processor_type_based_on_hw"], [161, 2, 1, "", "dump_elapsed_time"], [161, 2, 1, "", "get_workspace"], [161, 2, 1, "", "log_process"], [161, 2, 1, "", "set_random_seed"], [161, 2, 1, "", "set_resume_from"], [161, 2, 1, "", "set_tensorboard"], [161, 2, 1, "", "set_workspace"], [161, 2, 1, "", "singleton"]], "neural_compressor.compression": [[162, 0, 0, "-", "callbacks"], [164, 0, 0, "-", "distillation"], [167, 0, 0, "-", "hpo"], [170, 0, 0, "-", "pruner"]], "neural_compressor.compression.callbacks": [[162, 1, 1, "", "BaseCallbacks"], [162, 1, 1, "", "DistillationCallbacks"], [162, 1, 1, "", "PruningCallbacks"], [162, 1, 1, "", "QuantizationAwareTrainingCallbacks"]], "neural_compressor.compression.callbacks.DistillationCallbacks": [[162, 4, 1, "", "_epoch_ran"], [162, 4, 1, "", "best_model"], [162, 4, 1, "", "best_score"], [162, 4, 1, "", "eval_frequency"]], "neural_compressor.compression.distillation": [[163, 0, 0, "-", "criterions"], [165, 0, 0, "-", "optimizers"], [166, 0, 0, "-", "utility"]], "neural_compressor.compression.distillation.criterions": [[163, 1, 1, "", "Criterions"], [163, 1, 1, "", "IntermediateLayersKnowledgeDistillationLoss"], [163, 1, 1, "", "KnowledgeDistillationFramework"], [163, 1, 1, "", "KnowledgeDistillationLoss"], [163, 1, 1, "", "PyTorchCriterions"], [163, 1, 1, "", "PyTorchCrossEntropyLoss"], [163, 1, 1, "", "PyTorchIntermediateLayersKnowledgeDistillationLoss"], [163, 1, 1, "", "PyTorchIntermediateLayersKnowledgeDistillationLossWrapper"], [163, 1, 1, "", "PyTorchKnowledgeDistillationLoss"], [163, 1, 1, "", "PyTorchKnowledgeDistillationLossWrapper"], [163, 1, 1, "", "PyTorchSelfKnowledgeDistillationLoss"], [163, 1, 1, "", "PyTorchSelfKnowledgeDistillationLossWrapper"], [163, 1, 1, "", "SelfKnowledgeDistillationLoss"], [163, 1, 1, "", "TensorFlowCrossEntropyLoss"], [163, 1, 1, "", "TensorFlowSparseCategoricalCrossentropy"], [163, 1, 1, "", "TensorflowCriterions"], [163, 1, 1, "", "TensorflowKnowledgeDistillationLoss"], [163, 1, 1, "", "TensorflowKnowledgeDistillationLossExternal"], [163, 1, 1, "", "TensorflowKnowledgeDistillationLossWrapper"], [163, 2, 1, "", "criterion_registry"]], "neural_compressor.compression.distillation.optimizers": [[165, 1, 1, "", "Optimizers"], [165, 1, 1, "", "PyTorchOptimizers"], [165, 1, 1, "", "PyTorchSGD"], [165, 1, 1, "", "TensorFlowAdam"], [165, 1, 1, "", "TensorFlowAdamW"], [165, 1, 1, "", "TensorFlowSGD"], [165, 1, 1, "", "TensorflowOptimizers"], [165, 2, 1, "", "optimizer_registry"]], "neural_compressor.compression.distillation.utility": [[166, 2, 1, "", "get_activation"], [166, 2, 1, "", "record_output"]], "neural_compressor.compression.hpo": [[168, 0, 0, "-", "sa_optimizer"]], "neural_compressor.compression.pruner": [[169, 0, 0, "-", "criteria"], [172, 0, 0, "-", "model_slim"], [176, 0, 0, "-", "patterns"], [170, 2, 1, "", "prepare_pruning"], [183, 0, 0, "-", "pruners"], [188, 0, 0, "-", "pruning"], [189, 0, 0, "-", "regs"], [170, 2, 1, "", "save"], [190, 0, 0, "-", "schedulers"], [191, 0, 0, "-", "tf_criteria"], [192, 0, 0, "-", "utils"], [193, 0, 0, "-", "wanda"]], "neural_compressor.compression.pruner.criteria": [[169, 1, 1, "", "BlockMaskCriterion"], [169, 1, 1, "", "GradientCriterion"], [169, 1, 1, "", "MagnitudeCriterion"], [169, 1, 1, "", "PruningCriterion"], [169, 1, 1, "", "RetrainFreeCriterion"], [169, 1, 1, "", "SnipCriterion"], [169, 1, 1, "", "SnipMomentumCriterion"], [169, 2, 1, "", "get_criterion"], [169, 2, 1, "", "register_criterion"]], "neural_compressor.compression.pruner.criteria.BlockMaskCriterion": [[169, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.criteria.GradientCriterion": [[169, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.criteria.MagnitudeCriterion": [[169, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.criteria.PruningCriterion": [[169, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.criteria.RetrainFreeCriterion": [[169, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.criteria.SnipCriterion": [[169, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.criteria.SnipMomentumCriterion": [[169, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.model_slim": [[171, 0, 0, "-", "auto_slim"], [173, 0, 0, "-", "pattern_analyzer"], [174, 0, 0, "-", "weight_slim"]], "neural_compressor.compression.pruner.model_slim.auto_slim": [[171, 2, 1, "", "generate_ffn2_pruning_config"], [171, 2, 1, "", "generate_mha_pruning_config"], [171, 2, 1, "", "model_slim"], [171, 2, 1, "", "model_slim_ffn2"], [171, 2, 1, "", "model_slim_mha"], [171, 2, 1, "", "parse_auto_slim_config"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer": [[173, 1, 1, "", "ClassifierHeadSearcher"], [173, 1, 1, "", "ClassifierHeadSearcherTF"], [173, 1, 1, "", "JitBasicSearcher"], [173, 1, 1, "", "Linear2LinearSearcher"], [173, 1, 1, "", "RecipeSearcher"], [173, 1, 1, "", "SelfMHASearcher"], [173, 2, 1, "", "get_attributes"], [173, 2, 1, "", "get_common_module"], [173, 2, 1, "", "print_iterables"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcher": [[173, 4, 1, "", "device"], [173, 4, 1, "", "flatten_static_graph"], [173, 4, 1, "", "model"], [173, 4, 1, "", "static_graph"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer.ClassifierHeadSearcherTF": [[173, 4, 1, "", "device"], [173, 4, 1, "", "flatten_static_graph"], [173, 4, 1, "", "model"], [173, 4, 1, "", "static_graph"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer.JitBasicSearcher": [[173, 4, 1, "", "device"], [173, 4, 1, "", "flatten_static_graph"], [173, 4, 1, "", "model"], [173, 4, 1, "", "searching_results"], [173, 4, 1, "", "static_graph"], [173, 4, 1, "", "target_layers"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer.Linear2LinearSearcher": [[173, 4, 1, "", "current_pattern"], [173, 4, 1, "", "device"], [173, 4, 1, "", "flatten_static_graph"], [173, 4, 1, "", "model"], [173, 4, 1, "", "searching_results"], [173, 4, 1, "", "static_graph"], [173, 4, 1, "", "target_layers"], [173, 4, 1, "", "target_op_lut"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer.RecipeSearcher": [[173, 4, 1, "", "model"], [173, 4, 1, "", "recipe"], [173, 4, 1, "", "searching_results"], [173, 4, 1, "", "targets"]], "neural_compressor.compression.pruner.model_slim.pattern_analyzer.SelfMHASearcher": [[173, 4, 1, "", "device"], [173, 4, 1, "", "flatten_static_graph"], [173, 4, 1, "", "model"], [173, 4, 1, "", "static_graph"]], "neural_compressor.compression.pruner.model_slim.weight_slim": [[174, 1, 1, "", "LinearCompression"], [174, 1, 1, "", "LinearCompressionIterator"], [174, 1, 1, "", "PostCompressionUtils"]], "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompression": [[174, 4, 1, "", "device"], [174, 4, 1, "", "layer_1"], [174, 4, 1, "", "layer_2"]], "neural_compressor.compression.pruner.model_slim.weight_slim.LinearCompressionIterator": [[174, 4, 1, "", "linear_patterns"]], "neural_compressor.compression.pruner.patterns": [[175, 0, 0, "-", "base"], [176, 2, 1, "", "get_pattern"], [177, 0, 0, "-", "mha"], [178, 0, 0, "-", "ninm"], [179, 0, 0, "-", "nxm"]], "neural_compressor.compression.pruner.patterns.base": [[175, 1, 1, "", "BasePattern"], [175, 1, 1, "", "KerasBasePattern"], [175, 1, 1, "", "PytorchBasePattern"], [175, 2, 1, "", "register_pattern"]], "neural_compressor.compression.pruner.patterns.base.BasePattern": [[175, 4, 1, "", "config"], [175, 4, 1, "", "invalid_layers"], [175, 4, 1, "", "is_global"], [175, 4, 1, "", "keep_mask_layers"], [175, 4, 1, "", "max_sparsity_ratio_per_op"], [175, 4, 1, "", "min_sparsity_ratio_per_op"], [175, 4, 1, "", "modules"], [175, 4, 1, "", "pattern"], [175, 4, 1, "", "target_sparsity"]], "neural_compressor.compression.pruner.patterns.base.KerasBasePattern": [[175, 4, 1, "", "config"], [175, 4, 1, "", "invalid_layers"], [175, 4, 1, "", "is_global"], [175, 4, 1, "", "keep_mask_layers"], [175, 4, 1, "", "max_sparsity_ratio_per_op"], [175, 4, 1, "", "min_sparsity_ratio_per_op"], [175, 4, 1, "", "modules"], [175, 4, 1, "", "pattern"], [175, 4, 1, "", "target_sparsity"]], "neural_compressor.compression.pruner.patterns.base.PytorchBasePattern": [[175, 4, 1, "", "config"], [175, 4, 1, "", "invalid_layers"], [175, 4, 1, "", "is_global"], [175, 4, 1, "", "keep_mask_layers"], [175, 4, 1, "", "max_sparsity_ratio_per_op"], [175, 4, 1, "", "min_sparsity_ratio_per_op"], [175, 4, 1, "", "modules"], [175, 4, 1, "", "pattern"], [175, 4, 1, "", "target_sparsity"]], "neural_compressor.compression.pruner.patterns.mha": [[177, 1, 1, "", "PatternMHA"]], "neural_compressor.compression.pruner.patterns.mha.PatternMHA": [[177, 4, 1, "", "M"], [177, 4, 1, "", "N"]], "neural_compressor.compression.pruner.patterns.ninm": [[178, 1, 1, "", "PytorchPatternNInM"]], "neural_compressor.compression.pruner.patterns.ninm.PytorchPatternNInM": [[178, 4, 1, "", "M"], [178, 4, 1, "", "N"]], "neural_compressor.compression.pruner.patterns.nxm": [[179, 1, 1, "", "KerasPatternNxM"], [179, 1, 1, "", "PytorchPatternNxM"]], "neural_compressor.compression.pruner.patterns.nxm.KerasPatternNxM": [[179, 4, 1, "", "block_size"]], "neural_compressor.compression.pruner.patterns.nxm.PytorchPatternNxM": [[179, 4, 1, "", "block_size"]], "neural_compressor.compression.pruner.pruners": [[180, 0, 0, "-", "base"], [181, 0, 0, "-", "basic"], [182, 0, 0, "-", "block_mask"], [183, 2, 1, "", "get_pruner"], [184, 0, 0, "-", "mha"], [183, 2, 1, "", "parse_valid_pruner_types"], [185, 0, 0, "-", "pattern_lock"], [186, 0, 0, "-", "progressive"], [187, 0, 0, "-", "retrain_free"]], "neural_compressor.compression.pruner.pruners.base": [[180, 1, 1, "", "BasePruner"], [180, 1, 1, "", "KerasBasePruner"], [180, 1, 1, "", "PytorchBasePruner"], [180, 2, 1, "", "register_pruner"]], "neural_compressor.compression.pruner.pruners.base.BasePruner": [[180, 4, 1, "", "config"], [180, 4, 1, "", "current_sparsity_ratio"], [180, 4, 1, "", "end_step"], [180, 4, 1, "", "global_step"], [180, 4, 1, "", "masks"], [180, 4, 1, "", "max_sparsity_ratio_per_op"], [180, 4, 1, "", "modules"], [180, 4, 1, "", "pattern"], [180, 4, 1, "", "pruning_frequency"], [180, 4, 1, "", "scheduler"], [180, 4, 1, "", "scores"], [180, 4, 1, "", "start_step"], [180, 4, 1, "", "target_sparsity_ratio"]], "neural_compressor.compression.pruner.pruners.base.KerasBasePruner": [[180, 4, 1, "", "config"], [180, 4, 1, "", "current_sparsity_ratio"], [180, 4, 1, "", "end_step"], [180, 4, 1, "", "global_step"], [180, 4, 1, "", "masks"], [180, 4, 1, "", "max_sparsity_ratio_per_op"], [180, 4, 1, "", "modules"], [180, 4, 1, "", "pattern"], [180, 4, 1, "", "pruning_frequency"], [180, 4, 1, "", "scheduler"], [180, 4, 1, "", "scores"], [180, 4, 1, "", "start_step"], [180, 4, 1, "", "target_sparsity_ratio"]], "neural_compressor.compression.pruner.pruners.base.PytorchBasePruner": [[180, 4, 1, "", "config"], [180, 4, 1, "", "current_sparsity_ratio"], [180, 4, 1, "", "end_step"], [180, 4, 1, "", "global_step"], [180, 4, 1, "", "masks"], [180, 4, 1, "", "max_sparsity_ratio_per_op"], [180, 4, 1, "", "modules"], [180, 4, 1, "", "pattern"], [180, 4, 1, "", "pruning_frequency"], [180, 4, 1, "", "scheduler"], [180, 4, 1, "", "scores"], [180, 4, 1, "", "start_step"], [180, 4, 1, "", "target_sparsity_ratio"]], "neural_compressor.compression.pruner.pruners.basic": [[181, 1, 1, "", "KerasBasicPruner"], [181, 1, 1, "", "PytorchBasicPruner"]], "neural_compressor.compression.pruner.pruners.basic.KerasBasicPruner": [[181, 4, 1, "", "criterion"], [181, 4, 1, "", "pattern"], [181, 4, 1, "", "reg"], [181, 4, 1, "", "scheduler"]], "neural_compressor.compression.pruner.pruners.basic.PytorchBasicPruner": [[181, 4, 1, "", "criterion"], [181, 4, 1, "", "pattern"], [181, 4, 1, "", "reg"], [181, 4, 1, "", "scheduler"]], "neural_compressor.compression.pruner.pruners.block_mask": [[182, 1, 1, "", "PytorchBlockMaskPruner"]], "neural_compressor.compression.pruner.pruners.block_mask.PytorchBlockMaskPruner": [[182, 4, 1, "", "criterion"], [182, 4, 1, "", "pattern"], [182, 4, 1, "", "reg"], [182, 4, 1, "", "scheduler"]], "neural_compressor.compression.pruner.pruners.mha": [[184, 1, 1, "", "PythonMultiheadAttentionPruner"]], "neural_compressor.compression.pruner.pruners.mha.PythonMultiheadAttentionPruner": [[184, 4, 1, "", "head_masks"], [184, 4, 1, "", "linear_layers"], [184, 4, 1, "", "mha_compressions"], [184, 4, 1, "", "mha_scores"]], "neural_compressor.compression.pruner.pruners.pattern_lock": [[185, 1, 1, "", "PytorchPatternLockPruner"]], "neural_compressor.compression.pruner.pruners.progressive": [[186, 1, 1, "", "PytorchProgressivePruner"]], "neural_compressor.compression.pruner.pruners.retrain_free": [[187, 1, 1, "", "PytorchRetrainFreePruner"]], "neural_compressor.compression.pruner.pruners.retrain_free.PytorchRetrainFreePruner": [[187, 4, 1, "", "criterion"], [187, 4, 1, "", "pattern"], [187, 4, 1, "", "reg"], [187, 4, 1, "", "scheduler"]], "neural_compressor.compression.pruner.pruning": [[188, 1, 1, "", "BasePruning"], [188, 1, 1, "", "BasicPruning"], [188, 1, 1, "", "RetrainFreePruning"], [188, 1, 1, "", "SparseGPTPruning"], [188, 2, 1, "", "register_pruning"]], "neural_compressor.compression.pruner.pruning.BasePruning": [[188, 4, 1, "", "config_file_path"], [188, 4, 1, "", "model"], [188, 4, 1, "", "pruner_info"], [188, 4, 1, "", "pruners"]], "neural_compressor.compression.pruner.pruning.BasicPruning": [[188, 4, 1, "", "config_file_path"], [188, 4, 1, "", "model"], [188, 4, 1, "", "pruner_info"], [188, 4, 1, "", "pruners"]], "neural_compressor.compression.pruner.pruning.RetrainFreePruning": [[188, 4, 1, "", "config_file_path"], [188, 4, 1, "", "model"], [188, 4, 1, "", "pruner_info"], [188, 4, 1, "", "pruners"]], "neural_compressor.compression.pruner.regs": [[189, 1, 1, "", "BaseReg"], [189, 1, 1, "", "GroupLasso"], [189, 2, 1, "", "get_reg"], [189, 2, 1, "", "get_reg_type"], [189, 2, 1, "", "register_reg"]], "neural_compressor.compression.pruner.regs.GroupLasso": [[189, 4, 1, "", "alpha"], [189, 4, 1, "", "reg_terms"]], "neural_compressor.compression.pruner.schedulers": [[190, 1, 1, "", "IterativeScheduler"], [190, 1, 1, "", "OneshotScheduler"], [190, 1, 1, "", "PruningScheduler"], [190, 2, 1, "", "get_scheduler"], [190, 2, 1, "", "register_scheduler"]], "neural_compressor.compression.pruner.schedulers.PruningScheduler": [[190, 4, 1, "", "config"]], "neural_compressor.compression.pruner.tf_criteria": [[191, 1, 1, "", "MagnitudeCriterion"], [191, 1, 1, "", "PruningCriterion"], [191, 2, 1, "", "get_tf_criterion"], [191, 2, 1, "", "register_criterion"]], "neural_compressor.compression.pruner.tf_criteria.MagnitudeCriterion": [[191, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.tf_criteria.PruningCriterion": [[191, 4, 1, "", "scores"]], "neural_compressor.compression.pruner.utils": [[192, 2, 1, "", "check_config"], [192, 2, 1, "", "check_key_validity"], [192, 2, 1, "", "collect_layer_inputs"], [192, 2, 1, "", "get_layers"], [192, 2, 1, "", "get_sparsity_ratio"], [192, 2, 1, "", "get_sparsity_ratio_tf"], [192, 2, 1, "", "parse_last_linear"], [192, 2, 1, "", "parse_last_linear_tf"], [192, 2, 1, "", "parse_to_prune"], [192, 2, 1, "", "parse_to_prune_tf"], [192, 2, 1, "", "process_and_check_config"], [192, 2, 1, "", "process_config"], [192, 2, 1, "", "process_weight_config"], [192, 2, 1, "", "process_yaml_config"], [192, 2, 1, "", "reset_none_to_default"], [192, 2, 1, "", "update_params"]], "neural_compressor.compression.pruner.wanda": [[194, 0, 0, "-", "utils"]], "neural_compressor.compression.pruner.wanda.utils": [[194, 2, 1, "", "find_layers"]], "neural_compressor.config": [[195, 1, 1, "", "AccuracyCriterion"], [195, 1, 1, "", "BenchmarkConfig"], [195, 1, 1, "", "DistillationConfig"], [195, 1, 1, "", "DotDict"], [195, 1, 1, "", "ExportConfig"], [195, 1, 1, "", "HPOConfig"], [195, 1, 1, "", "IntermediateLayersKnowledgeDistillationLossConfig"], [195, 1, 1, "", "Keras"], [195, 1, 1, "", "KnowledgeDistillationLossConfig"], [195, 1, 1, "", "MXNet"], [195, 1, 1, "", "MixedPrecisionConfig"], [195, 1, 1, "", "NASConfig"], [195, 1, 1, "", "ONNX"], [195, 1, 1, "", "ONNXQlinear2QDQConfig"], [195, 1, 1, "", "Options"], [195, 1, 1, "", "PostTrainingQuantConfig"], [195, 1, 1, "", "PyTorch"], [195, 1, 1, "", "QuantizationAwareTrainingConfig"], [195, 1, 1, "", "SelfKnowledgeDistillationLossConfig"], [195, 1, 1, "", "TF2ONNXConfig"], [195, 1, 1, "", "TensorFlow"], [195, 1, 1, "", "Torch2ONNXConfig"], [195, 1, 1, "", "TuningCriterion"], [195, 1, 1, "", "WeightPruningConfig"]], "neural_compressor.contrib": [[197, 0, 0, "-", "strategy"]], "neural_compressor.contrib.strategy": [[198, 0, 0, "-", "sigopt"], [199, 0, 0, "-", "tpe"]], "neural_compressor.contrib.strategy.sigopt": [[198, 1, 1, "", "SigOptTuneStrategy"]], "neural_compressor.contrib.strategy.tpe": [[199, 1, 1, "", "TpeTuneStrategy"]], "neural_compressor.data": [[215, 0, 0, "-", "datasets"], [219, 0, 0, "-", "filters"], [222, 0, 0, "-", "transforms"]], "neural_compressor.data.dataloaders": [[200, 0, 0, "-", "base_dataloader"], [201, 0, 0, "-", "dataloader"], [202, 0, 0, "-", "default_dataloader"], [203, 0, 0, "-", "fetcher"], [204, 0, 0, "-", "mxnet_dataloader"], [205, 0, 0, "-", "onnxrt_dataloader"], [206, 0, 0, "-", "pytorch_dataloader"], [207, 0, 0, "-", "sampler"], [208, 0, 0, "-", "tensorflow_dataloader"]], "neural_compressor.data.dataloaders.base_dataloader": [[200, 1, 1, "", "BaseDataLoader"]], "neural_compressor.data.dataloaders.dataloader": [[201, 1, 1, "", "DataLoader"], [201, 2, 1, "", "check_dataloader"]], "neural_compressor.data.dataloaders.default_dataloader": [[202, 1, 1, "", "DefaultDataLoader"], [202, 2, 1, "", "default_collate"]], "neural_compressor.data.dataloaders.fetcher": [[203, 1, 1, "", "Fetcher"], [203, 1, 1, "", "IndexFetcher"], [203, 1, 1, "", "IterableFetcher"]], "neural_compressor.data.dataloaders.mxnet_dataloader": [[204, 1, 1, "", "MXNetDataLoader"]], "neural_compressor.data.dataloaders.onnxrt_dataloader": [[205, 1, 1, "", "ONNXRTBertDataLoader"], [205, 1, 1, "", "ONNXRTDataLoader"]], "neural_compressor.data.dataloaders.pytorch_dataloader": [[206, 1, 1, "", "PyTorchDataLoader"]], "neural_compressor.data.dataloaders.sampler": [[207, 1, 1, "", "BatchSampler"], [207, 1, 1, "", "IterableSampler"], [207, 1, 1, "", "Sampler"], [207, 1, 1, "", "SequentialSampler"]], "neural_compressor.data.dataloaders.tensorflow_dataloader": [[208, 1, 1, "", "TFDataDataLoader"], [208, 1, 1, "", "TensorflowBertDataLoader"], [208, 1, 1, "", "TensorflowDataLoader"], [208, 1, 1, "", "TensorflowModelZooBertDataLoader"]], "neural_compressor.data.datasets": [[209, 0, 0, "-", "bert_dataset"], [210, 0, 0, "-", "coco_dataset"], [211, 0, 0, "-", "dataset"], [212, 0, 0, "-", "dummy_dataset"], [213, 0, 0, "-", "dummy_dataset_v2"], [214, 0, 0, "-", "imagenet_dataset"], [216, 0, 0, "-", "style_transfer_dataset"]], "neural_compressor.data.datasets.bert_dataset": [[209, 1, 1, "", "InputFeatures"], [209, 1, 1, "", "ONNXRTBertDataset"], [209, 1, 1, "", "ParseDecodeBert"], [209, 1, 1, "", "PytorchBertDataset"], [209, 1, 1, "", "TensorflowBertDataset"], [209, 1, 1, "", "TensorflowModelZooBertDataset"], [209, 2, 1, "", "convert_examples_to_features"], [209, 2, 1, "", "load_and_cache_examples"]], "neural_compressor.data.datasets.coco_dataset": [[210, 1, 1, "", "COCONpy"], [210, 1, 1, "", "COCORaw"], [210, 1, 1, "", "COCORecordDataset"], [210, 1, 1, "", "ParseDecodeCoco"]], "neural_compressor.data.datasets.dataset": [[211, 1, 1, "", "CIFAR10"], [211, 1, 1, "", "CIFAR100"], [211, 1, 1, "", "Dataset"], [211, 1, 1, "", "Datasets"], [211, 1, 1, "", "FashionMNIST"], [211, 1, 1, "", "ImageFolder"], [211, 1, 1, "", "IterableDataset"], [211, 1, 1, "", "MNIST"], [211, 1, 1, "", "MXNetCIFAR10"], [211, 1, 1, "", "MXNetCIFAR100"], [211, 1, 1, "", "MXNetDatasets"], [211, 1, 1, "", "MXNetFashionMNIST"], [211, 1, 1, "", "MXNetImageFolder"], [211, 1, 1, "", "MXNetMNIST"], [211, 1, 1, "", "ONNXRTITDatasets"], [211, 1, 1, "", "ONNXRTQLDatasets"], [211, 1, 1, "", "PyTorchDatasets"], [211, 1, 1, "", "PytorchCIFAR10"], [211, 1, 1, "", "PytorchCIFAR100"], [211, 1, 1, "", "PytorchFashionMNIST"], [211, 1, 1, "", "PytorchMNIST"], [211, 1, 1, "", "PytorchMxnetWrapDataset"], [211, 1, 1, "", "PytorchMxnetWrapFunction"], [211, 1, 1, "", "Tensorflow"], [211, 1, 1, "", "TensorflowCIFAR10"], [211, 1, 1, "", "TensorflowCIFAR100"], [211, 1, 1, "", "TensorflowDatasets"], [211, 1, 1, "", "TensorflowFashionMNIST"], [211, 1, 1, "", "TensorflowImageRecord"], [211, 1, 1, "", "TensorflowMNIST"], [211, 1, 1, "", "TensorflowTFRecordDataset"], [211, 1, 1, "", "TensorflowVOCRecord"], [211, 2, 1, "", "calculate_md5"], [211, 2, 1, "", "check_integrity"], [211, 2, 1, "", "dataset_registry"], [211, 2, 1, "", "download_url"], [211, 5, 1, "", "framework_datasets"], [211, 2, 1, "", "gen_bar_updater"]], "neural_compressor.data.datasets.dummy_dataset": [[212, 1, 1, "", "DummyDataset"]], "neural_compressor.data.datasets.dummy_dataset_v2": [[213, 1, 1, "", "DummyDataset"], [213, 1, 1, "", "SparseDummyDataset"]], "neural_compressor.data.datasets.imagenet_dataset": [[214, 1, 1, "", "ImagenetRaw"], [214, 1, 1, "", "MXNetImagenetRaw"], [214, 1, 1, "", "ONNXRTImagenetDataset"], [214, 1, 1, "", "PytorchImagenetRaw"], [214, 1, 1, "", "TensorflowImagenetDataset"], [214, 1, 1, "", "TensorflowImagenetRaw"]], "neural_compressor.data.datasets.style_transfer_dataset": [[216, 1, 1, "", "StyleTransferDataset"]], "neural_compressor.data.filters": [[217, 0, 0, "-", "coco_filter"], [218, 0, 0, "-", "filter"]], "neural_compressor.data.filters.coco_filter": [[217, 1, 1, "", "LabelBalanceCOCORawFilter"], [217, 1, 1, "", "LabelBalanceCOCORecordFilter"]], "neural_compressor.data.filters.filter": [[218, 1, 1, "", "FILTERS"], [218, 1, 1, "", "Filter"], [218, 1, 1, "", "MXNetFilters"], [218, 1, 1, "", "ONNXRTITFilters"], [218, 1, 1, "", "ONNXRTQLFilters"], [218, 1, 1, "", "PyTorchFilters"], [218, 1, 1, "", "TensorflowFilters"], [218, 2, 1, "", "filter_registry"]], "neural_compressor.data.transforms": [[221, 0, 0, "-", "imagenet_transform"], [223, 0, 0, "-", "postprocess"], [224, 0, 0, "-", "tokenization"], [225, 0, 0, "-", "transform"]], "neural_compressor.data.transforms.imagenet_transform": [[221, 1, 1, "", "BilinearImagenetTransform"], [221, 1, 1, "", "LabelShift"], [221, 1, 1, "", "ONNXResizeCropImagenetTransform"], [221, 1, 1, "", "OnnxBilinearImagenetTransform"], [221, 1, 1, "", "ParseDecodeImagenet"], [221, 1, 1, "", "ParseDecodeImagenetTransform"], [221, 1, 1, "", "QuantizedInput"], [221, 1, 1, "", "ResizeWithAspectRatio"], [221, 1, 1, "", "TensorflowResizeCropImagenetTransform"], [221, 1, 1, "", "TensorflowShiftRescale"], [221, 1, 1, "", "TensorflowTransposeLastChannel"]], "neural_compressor.data.transforms.postprocess": [[223, 1, 1, "", "Postprocess"]], "neural_compressor.data.transforms.tokenization": [[224, 1, 1, "", "BasicTokenizer"], [224, 1, 1, "", "FullTokenizer"], [224, 1, 1, "", "WordpieceTokenizer"], [224, 2, 1, "", "convert_by_vocab"], [224, 2, 1, "", "convert_to_unicode"], [224, 2, 1, "", "load_vocab"], [224, 2, 1, "", "whitespace_tokenize"]], "neural_compressor.data.transforms.transform": [[225, 1, 1, "", "AlignImageChannelTransform"], [225, 1, 1, "", "BaseTransform"], [225, 1, 1, "", "CastONNXTransform"], [225, 1, 1, "", "CastPyTorchTransform"], [225, 1, 1, "", "CastTFTransform"], [225, 1, 1, "", "CenterCropTFTransform"], [225, 1, 1, "", "CenterCropTransform"], [225, 1, 1, "", "CollectTransform"], [225, 1, 1, "", "ComposeTransform"], [225, 1, 1, "", "CropResizeTFTransform"], [225, 1, 1, "", "CropResizeTransform"], [225, 1, 1, "", "CropToBoundingBox"], [225, 1, 1, "", "InputFeatures"], [225, 1, 1, "", "MXNetCropResizeTransform"], [225, 1, 1, "", "MXNetCropToBoundingBox"], [225, 1, 1, "", "MXNetNormalizeTransform"], [225, 1, 1, "", "MXNetTransforms"], [225, 1, 1, "", "MXNetTranspose"], [225, 1, 1, "", "NormalizeTFTransform"], [225, 1, 1, "", "NormalizeTransform"], [225, 1, 1, "", "ONNXRTCropToBoundingBox"], [225, 1, 1, "", "ONNXRTITTransforms"], [225, 1, 1, "", "ONNXRTQLTransforms"], [225, 1, 1, "", "PaddedCenterCropTransform"], [225, 1, 1, "", "ParseDecodeVocTransform"], [225, 1, 1, "", "PyTorchAlignImageChannel"], [225, 1, 1, "", "PyTorchCropResizeTransform"], [225, 1, 1, "", "PyTorchNormalizeTransform"], [225, 1, 1, "", "PyTorchTransforms"], [225, 1, 1, "", "PyTorchTranspose"], [225, 1, 1, "", "PytorchMxnetTransform"], [225, 1, 1, "", "PytorchMxnetWrapFunction"], [225, 1, 1, "", "RandomCropTFTransform"], [225, 1, 1, "", "RandomCropTransform"], [225, 1, 1, "", "RandomHorizontalFlip"], [225, 1, 1, "", "RandomResizedCropMXNetTransform"], [225, 1, 1, "", "RandomResizedCropPytorchTransform"], [225, 1, 1, "", "RandomResizedCropTFTransform"], [225, 1, 1, "", "RandomResizedCropTransform"], [225, 1, 1, "", "RandomVerticalFlip"], [225, 1, 1, "", "RescaleKerasPretrainTransform"], [225, 1, 1, "", "RescaleTFTransform"], [225, 1, 1, "", "RescaleTransform"], [225, 1, 1, "", "ResizeMXNetTransform"], [225, 1, 1, "", "ResizePytorchTransform"], [225, 1, 1, "", "ResizeTFTransform"], [225, 1, 1, "", "ResizeTransform"], [225, 1, 1, "", "ResizeWithRatio"], [225, 1, 1, "", "SquadExample"], [225, 1, 1, "", "TFModelZooCollectTransform"], [225, 1, 1, "", "TFSquadV1ModelZooPostTransform"], [225, 1, 1, "", "TFSquadV1PostTransform"], [225, 1, 1, "", "TRANSFORMS"], [225, 1, 1, "", "TensorflowCropToBoundingBox"], [225, 1, 1, "", "TensorflowRandomHorizontalFlip"], [225, 1, 1, "", "TensorflowRandomVerticalFlip"], [225, 1, 1, "", "TensorflowResizeWithRatio"], [225, 1, 1, "", "TensorflowTransform"], [225, 1, 1, "", "TensorflowTransforms"], [225, 1, 1, "", "TensorflowTranspose"], [225, 1, 1, "", "TensorflowWrapFunction"], [225, 1, 1, "", "ToArray"], [225, 1, 1, "", "ToNDArrayTransform"], [225, 1, 1, "", "Transforms"], [225, 1, 1, "", "Transpose"], [225, 2, 1, "", "convert_examples_to_features"], [225, 2, 1, "", "get_final_text"], [225, 2, 1, "", "get_torchvision_map"], [225, 2, 1, "", "read_squad_examples"], [225, 2, 1, "", "transform_registry"]], "neural_compressor.metric": [[227, 0, 0, "-", "bleu"], [228, 0, 0, "-", "bleu_util"], [229, 0, 0, "-", "coco_label_map"], [230, 0, 0, "-", "coco_tools"], [231, 0, 0, "-", "evaluate_squad"], [232, 0, 0, "-", "f1"], [234, 0, 0, "-", "metric"]], "neural_compressor.metric.bleu": [[227, 1, 1, "", "BLEU"], [227, 1, 1, "", "UnicodeRegex"], [227, 2, 1, "", "bleu_tokenize"]], "neural_compressor.metric.bleu.BLEU": [[227, 4, 1, "", "labels"], [227, 4, 1, "", "predictions"]], "neural_compressor.metric.bleu.UnicodeRegex": [[227, 4, 1, "", "nondigit_punct_re"], [227, 4, 1, "", "punct_nondigit_re"], [227, 4, 1, "", "symbol_re"]], "neural_compressor.metric.bleu_util": [[228, 2, 1, "", "compute_bleu"]], "neural_compressor.metric.coco_tools": [[230, 1, 1, "", "COCOEvalWrapper"], [230, 1, 1, "", "COCOWrapper"], [230, 2, 1, "", "ExportSingleImageDetectionBoxesToCoco"], [230, 2, 1, "", "ExportSingleImageDetectionMasksToCoco"], [230, 2, 1, "", "ExportSingleImageGroundtruthToCoco"]], "neural_compressor.metric.coco_tools.COCOWrapper": [[230, 4, 1, "", "dataset"], [230, 4, 1, "", "detection_type"]], "neural_compressor.metric.evaluate_squad": [[231, 2, 1, "", "evaluate"], [231, 2, 1, "", "exact_match_score"], [231, 2, 1, "", "f1_score"], [231, 2, 1, "", "metric_max_over_ground_truths"]], "neural_compressor.metric.f1": [[232, 2, 1, "", "evaluate"], [232, 2, 1, "", "f1_score"], [232, 2, 1, "", "metric_max_over_ground_truths"], [232, 2, 1, "", "normalize_answer"]], "neural_compressor.metric.metric": [[234, 1, 1, "", "Accuracy"], [234, 1, 1, "", "BaseMetric"], [234, 1, 1, "", "COCOmAPv2"], [234, 1, 1, "", "F1"], [234, 1, 1, "", "GeneralTopK"], [234, 1, 1, "", "Loss"], [234, 1, 1, "", "MAE"], [234, 1, 1, "", "METRICS"], [234, 1, 1, "", "MSE"], [234, 1, 1, "", "MXNetMetrics"], [234, 1, 1, "", "Metric"], [234, 1, 1, "", "ONNXRTGLUE"], [234, 1, 1, "", "ONNXRTITMetrics"], [234, 1, 1, "", "ONNXRTQLMetrics"], [234, 1, 1, "", "PyTorchLoss"], [234, 1, 1, "", "PyTorchMetrics"], [234, 1, 1, "", "RMSE"], [234, 1, 1, "", "ROC"], [234, 1, 1, "", "SquadF1"], [234, 1, 1, "", "TensorflowCOCOMAP"], [234, 1, 1, "", "TensorflowMAP"], [234, 1, 1, "", "TensorflowMetrics"], [234, 1, 1, "", "TensorflowTopK"], [234, 1, 1, "", "TensorflowVOCMAP"], [234, 1, 1, "", "WrapMXNetMetric"], [234, 1, 1, "", "WrapONNXRTMetric"], [234, 1, 1, "", "WrapPyTorchMetric"], [234, 1, 1, "", "mIOU"], [234, 2, 1, "", "metric_registry"], [234, 2, 1, "", "register_customer_metric"]], "neural_compressor.metric.metric.Accuracy": [[234, 4, 1, "", "label_list"], [234, 4, 1, "", "pred_list"], [234, 4, 1, "", "sample"]], "neural_compressor.metric.metric.GeneralTopK": [[234, 4, 1, "", "k"], [234, 4, 1, "", "num_correct"], [234, 4, 1, "", "num_sample"]], "neural_compressor.metric.metric.Loss": [[234, 4, 1, "", "sample"], [234, 4, 1, "", "sum"]], "neural_compressor.metric.metric.MAE": [[234, 4, 1, "", "compare_label"], [234, 4, 1, "", "label_list"], [234, 4, 1, "", "pred_list"]], "neural_compressor.metric.metric.METRICS": [[234, 4, 1, "", "metrics"]], "neural_compressor.metric.metric.MSE": [[234, 4, 1, "", "compare_label"], [234, 4, 1, "", "label_list"], [234, 4, 1, "", "pred_list"]], "neural_compressor.metric.metric.MXNetMetrics": [[234, 4, 1, "", "metrics"]], "neural_compressor.metric.metric.ONNXRTITMetrics": [[234, 4, 1, "", "metrics"]], "neural_compressor.metric.metric.ONNXRTQLMetrics": [[234, 4, 1, "", "metrics"]], "neural_compressor.metric.metric.PyTorchMetrics": [[234, 4, 1, "", "metrics"]], "neural_compressor.metric.metric.RMSE": [[234, 4, 1, "", "mse"]], "neural_compressor.metric.metric.TensorflowMetrics": [[234, 4, 1, "", "metrics"]], "neural_compressor.metric.metric.TensorflowTopK": [[234, 4, 1, "", "k"], [234, 4, 1, "", "num_correct"], [234, 4, 1, "", "num_sample"]], "neural_compressor.mix_precision": [[235, 2, 1, "", "fit"]], "neural_compressor.model": [[236, 0, 0, "-", "base_model"], [238, 0, 0, "-", "keras_model"], [239, 0, 0, "-", "model"], [240, 0, 0, "-", "mxnet_model"], [241, 0, 0, "-", "nets_factory"], [242, 0, 0, "-", "onnx_model"], [243, 0, 0, "-", "tensorflow_model"], [244, 0, 0, "-", "torch_model"]], "neural_compressor.model.base_model": [[236, 1, 1, "", "BaseModel"]], "neural_compressor.model.keras_model": [[238, 1, 1, "", "KerasModel"]], "neural_compressor.model.model": [[239, 1, 1, "", "Model"], [239, 2, 1, "", "get_model_fwk_name"]], "neural_compressor.model.mxnet_model": [[240, 1, 1, "", "MXNetModel"]], "neural_compressor.model.nets_factory": [[241, 1, 1, "", "TFSlimNetsFactory"]], "neural_compressor.model.onnx_model": [[242, 1, 1, "", "ONNXModel"]], "neural_compressor.model.tensorflow_model": [[243, 1, 1, "", "TensorflowBaseModel"], [243, 1, 1, "", "TensorflowCheckpointModel"], [243, 1, 1, "", "TensorflowLLMModel"], [243, 1, 1, "", "TensorflowModel"], [243, 1, 1, "", "TensorflowQATModel"], [243, 1, 1, "", "TensorflowSavedModelModel"], [243, 2, 1, "", "checkpoint_session"], [243, 2, 1, "", "estimator_session"], [243, 2, 1, "", "frozen_pb_session"], [243, 2, 1, "", "get_model_type"], [243, 2, 1, "", "graph_def_session"], [243, 2, 1, "", "graph_session"], [243, 2, 1, "", "keras_session"], [243, 2, 1, "", "load_saved_model"], [243, 2, 1, "", "saved_model_session"], [243, 2, 1, "", "slim_session"], [243, 2, 1, "", "try_loading_keras"], [243, 2, 1, "", "validate_and_inference_input_output"], [243, 2, 1, "", "validate_graph_node"]], "neural_compressor.model.torch_model": [[244, 1, 1, "", "IPEXModel"], [244, 1, 1, "", "PyTorchBaseModel"], [244, 1, 1, "", "PyTorchFXModel"], [244, 1, 1, "", "PyTorchModel"]], "neural_compressor.objective": [[245, 1, 1, "", "Accuracy"], [245, 1, 1, "", "Footprint"], [245, 1, 1, "", "ModelSize"], [245, 1, 1, "", "MultiObjective"], [245, 1, 1, "", "Objective"], [245, 1, 1, "", "Performance"], [245, 2, 1, "", "objective_custom_registry"], [245, 2, 1, "", "objective_registry"]], "neural_compressor.profiling.parser": [[247, 0, 0, "-", "factory"], [250, 0, 0, "-", "parser"], [251, 0, 0, "-", "result"]], "neural_compressor.profiling.parser.factory": [[247, 1, 1, "", "ParserFactory"]], "neural_compressor.profiling.parser.onnx_parser": [[248, 0, 0, "-", "factory"], [249, 0, 0, "-", "parser"]], "neural_compressor.profiling.parser.onnx_parser.factory": [[248, 1, 1, "", "OnnxrtParserFactory"]], "neural_compressor.profiling.parser.onnx_parser.parser": [[249, 1, 1, "", "OnnxProfilingParser"]], "neural_compressor.profiling.parser.parser": [[250, 1, 1, "", "ProfilingParser"]], "neural_compressor.profiling.parser.result": [[251, 1, 1, "", "ProfilingResult"]], "neural_compressor.profiling.parser.tensorflow_parser": [[252, 0, 0, "-", "factory"], [253, 0, 0, "-", "parser"]], "neural_compressor.profiling.parser.tensorflow_parser.factory": [[252, 1, 1, "", "TensorFlowParserFactory"]], "neural_compressor.profiling.parser.tensorflow_parser.parser": [[253, 1, 1, "", "TensorFlowProfilingParser"]], "neural_compressor.profiling.profiler": [[254, 0, 0, "-", "factory"], [258, 0, 0, "-", "profiler"]], "neural_compressor.profiling.profiler.factory": [[254, 1, 1, "", "ProfilerFactory"]], "neural_compressor.profiling.profiler.onnxrt_profiler": [[255, 0, 0, "-", "factory"], [256, 0, 0, "-", "profiler"], [257, 0, 0, "-", "utils"]], "neural_compressor.profiling.profiler.onnxrt_profiler.factory": [[255, 1, 1, "", "ProfilerFactory"]], "neural_compressor.profiling.profiler.onnxrt_profiler.profiler": [[256, 1, 1, "", "Profiler"]], "neural_compressor.profiling.profiler.onnxrt_profiler.utils": [[257, 2, 1, "", "create_onnx_config"]], "neural_compressor.profiling.profiler.profiler": [[258, 1, 1, "", "Profiler"]], "neural_compressor.profiling.profiler.tensorflow_profiler": [[259, 0, 0, "-", "factory"], [260, 0, 0, "-", "profiler"], [261, 0, 0, "-", "utils"]], "neural_compressor.profiling.profiler.tensorflow_profiler.factory": [[259, 1, 1, "", "ProfilerFactory"]], "neural_compressor.profiling.profiler.tensorflow_profiler.profiler": [[260, 1, 1, "", "Profiler"]], "neural_compressor.profiling.profiler.tensorflow_profiler.utils": [[261, 2, 1, "", "create_tf_config"], [261, 2, 1, "", "delete_assign"], [261, 2, 1, "", "set_eager_execution"]], "neural_compressor.quantization": [[262, 2, 1, "", "fit"]], "neural_compressor.strategy": [[263, 0, 0, "-", "auto"], [264, 0, 0, "-", "auto_mixed_precision"], [265, 0, 0, "-", "basic"], [266, 0, 0, "-", "bayesian"], [267, 0, 0, "-", "conservative"], [268, 0, 0, "-", "exhaustive"], [269, 0, 0, "-", "hawq_v2"], [271, 0, 0, "-", "mse"], [272, 0, 0, "-", "mse_v2"], [273, 0, 0, "-", "random"], [274, 0, 0, "-", "strategy"], [276, 0, 0, "-", "utils"]], "neural_compressor.strategy.auto": [[263, 1, 1, "", "AutoTuneStrategy"]], "neural_compressor.strategy.auto_mixed_precision": [[264, 1, 1, "", "AutoMixedPrecisionTuneStrategy"]], "neural_compressor.strategy.basic": [[265, 1, 1, "", "BasicTuneStrategy"]], "neural_compressor.strategy.bayesian": [[266, 1, 1, "", "BayesianOptimization"], [266, 1, 1, "", "BayesianTuneStrategy"], [266, 1, 1, "", "TargetSpace"], [266, 2, 1, "", "acq_max"]], "neural_compressor.strategy.conservative": [[267, 1, 1, "", "ConservativeTuneStrategy"]], "neural_compressor.strategy.exhaustive": [[268, 1, 1, "", "ExhaustiveTuneStrategy"]], "neural_compressor.strategy.hawq_v2": [[269, 1, 1, "", "HAWQ_V2TuneStrategy"]], "neural_compressor.strategy.mse": [[271, 1, 1, "", "MSETuneStrategy"]], "neural_compressor.strategy.mse_v2": [[272, 1, 1, "", "MSE_V2TuneStrategy"]], "neural_compressor.strategy.random": [[273, 1, 1, "", "RandomTuneStrategy"]], "neural_compressor.strategy.strategy": [[274, 1, 1, "", "TuneStrategy"], [274, 1, 1, "", "TuneStrategyMeta"], [274, 2, 1, "", "strategy_registry"]], "neural_compressor.strategy.utils": [[275, 0, 0, "-", "constant"], [277, 0, 0, "-", "tuning_sampler"], [278, 0, 0, "-", "tuning_space"], [279, 0, 0, "-", "tuning_structs"], [280, 0, 0, "-", "utility"]], "neural_compressor.strategy.utils.tuning_sampler": [[277, 1, 1, "", "BlockFallbackTuningSampler"], [277, 1, 1, "", "FallbackTuningSampler"], [277, 1, 1, "", "LowerBitsSampler"], [277, 1, 1, "", "ModelWiseTuningSampler"], [277, 1, 1, "", "OpTypeWiseTuningSampler"], [277, 1, 1, "", "OpWiseTuningSampler"], [277, 1, 1, "", "SmoothQuantSampler"], [277, 1, 1, "", "TuningOrder"], [277, 1, 1, "", "TuningSampler"], [277, 1, 1, "", "WeightOnlyQuantSampler"]], "neural_compressor.strategy.utils.tuning_space": [[278, 1, 1, "", "TuningItem"], [278, 1, 1, "", "TuningSpace"], [278, 2, 1, "", "initial_tuning_cfg_with_quant_mode"], [278, 2, 1, "", "pattern_to_internal"], [278, 2, 1, "", "pattern_to_path"], [278, 2, 1, "", "quant_mode_from_pattern"]], "neural_compressor.strategy.utils.tuning_structs": [[279, 1, 1, "", "OpTuningConfig"]], "neural_compressor.strategy.utils.utility": [[280, 1, 1, "", "ClassRegister"], [280, 1, 1, "", "OrderedDefaultDict"], [280, 1, 1, "", "QuantOptions"], [280, 1, 1, "", "QuantType"], [280, 2, 1, "", "build_slave_faker_model"], [280, 2, 1, "", "extract_data_type"], [280, 2, 1, "", "get_adaptor_name"], [280, 2, 1, "", "preprocess_user_cfg"], [280, 2, 1, "", "reverted_data_type"]], "neural_compressor.template": [[281, 0, 0, "-", "api_doc_example"]], "neural_compressor.template.api_doc_example": [[281, 1, 1, "", "ExampleClass"], [281, 4, 1, "", "attribute1"], [281, 2, 1, "", "function1"], [281, 2, 1, "", "function2"], [281, 2, 1, "", "function3"], [281, 2, 1, "", "generator1"], [281, 5, 1, "", "module_debug_level1"]], "neural_compressor.template.api_doc_example.ExampleClass": [[281, 4, 1, "", "attr1"], [281, 4, 1, "", "attr2"], [281, 4, 1, "", "attr5"]], "neural_compressor.tensorflow": [[282, 0, 0, "-", "algorithms"], [291, 0, 0, "-", "keras"], [304, 0, 0, "-", "quantization"], [388, 0, 0, "-", "utils"]], "neural_compressor.tensorflow.algorithms": [[285, 0, 0, "-", "smoother"], [287, 0, 0, "-", "static_quant"]], "neural_compressor.tensorflow.algorithms.smoother": [[283, 0, 0, "-", "calibration"], [284, 0, 0, "-", "core"], [286, 0, 0, "-", "scaler"]], "neural_compressor.tensorflow.algorithms.smoother.calibration": [[283, 1, 1, "", "SmoothQuantCalibration"], [283, 1, 1, "", "SmoothQuantCalibrationLLM"]], "neural_compressor.tensorflow.algorithms.smoother.core": [[284, 1, 1, "", "SmoothQuant"]], "neural_compressor.tensorflow.algorithms.smoother.scaler": [[286, 1, 1, "", "SmoothQuantScaler"], [286, 1, 1, "", "SmoothQuantScalerLLM"]], "neural_compressor.tensorflow.algorithms.static_quant": [[288, 0, 0, "-", "keras"], [289, 0, 0, "-", "tensorflow"]], "neural_compressor.tensorflow.algorithms.static_quant.keras": [[288, 1, 1, "", "KerasAdaptor"], [288, 1, 1, "", "KerasConfigConverter"], [288, 1, 1, "", "KerasQuery"], [288, 1, 1, "", "KerasSurgery"]], "neural_compressor.tensorflow.algorithms.static_quant.tensorflow": [[289, 1, 1, "", "TensorFlowAdaptor"], [289, 1, 1, "", "TensorFlowConfig"], [289, 1, 1, "", "TensorflowConfigConverter"], [289, 1, 1, "", "TensorflowQuery"], [289, 1, 1, "", "Tensorflow_ITEXAdaptor"]], "neural_compressor.tensorflow.keras": [[295, 0, 0, "-", "layers"], [300, 0, 0, "-", "quantization"]], "neural_compressor.tensorflow.keras.layers": [[292, 0, 0, "-", "conv2d"], [293, 0, 0, "-", "dense"], [294, 0, 0, "-", "depthwise_conv2d"], [296, 0, 0, "-", "layer_initializer"], [297, 0, 0, "-", "pool2d"], [298, 0, 0, "-", "separable_conv2d"]], "neural_compressor.tensorflow.keras.layers.conv2d": [[292, 1, 1, "", "QConv2D"], [292, 2, 1, "", "initialize_int8_conv2d"]], "neural_compressor.tensorflow.keras.layers.dense": [[293, 1, 1, "", "QDense"], [293, 2, 1, "", "initialize_int8_dense"]], "neural_compressor.tensorflow.keras.layers.depthwise_conv2d": [[294, 1, 1, "", "QDepthwiseConv2D"], [294, 2, 1, "", "initialize_int8_depthwise_conv2d"]], "neural_compressor.tensorflow.keras.layers.pool2d": [[297, 1, 1, "", "QAvgPool2D"], [297, 1, 1, "", "QMaxPool2D"], [297, 2, 1, "", "initialize_int8_avgpool"], [297, 2, 1, "", "initialize_int8_maxpool"]], "neural_compressor.tensorflow.keras.layers.separable_conv2d": [[298, 1, 1, "", "QSeparableConv2D"], [298, 2, 1, "", "initialize_int8_separable_conv2d"]], "neural_compressor.tensorflow.keras.quantization": [[299, 0, 0, "-", "config"]], "neural_compressor.tensorflow.keras.quantization.config": [[299, 1, 1, "", "OperatorConfig"], [299, 1, 1, "", "StaticQuantConfig"], [299, 2, 1, "", "get_all_registered_configs"], [299, 2, 1, "", "get_default_static_quant_config"]], "neural_compressor.tensorflow.quantization": [[301, 0, 0, "-", "algorithm_entry"], [302, 0, 0, "-", "autotune"], [303, 0, 0, "-", "config"], [305, 0, 0, "-", "quantize"], [361, 0, 0, "-", "utils"]], "neural_compressor.tensorflow.quantization.algorithm_entry": [[301, 2, 1, "", "smooth_quant_entry"], [301, 2, 1, "", "static_quant_entry"]], "neural_compressor.tensorflow.quantization.autotune": [[302, 2, 1, "", "autotune"], [302, 2, 1, "", "get_all_config_set"]], "neural_compressor.tensorflow.quantization.config": [[303, 1, 1, "", "SmoothQuantConfig"], [303, 1, 1, "", "StaticQuantConfig"], [303, 2, 1, "", "get_default_sq_config"], [303, 2, 1, "", "get_default_static_quant_config"]], "neural_compressor.tensorflow.quantization.quantize": [[305, 2, 1, "", "need_apply"], [305, 2, 1, "", "quantize_model"], [305, 2, 1, "", "quantize_model_with_single_config"]], "neural_compressor.tensorflow.quantization.utils": [[306, 0, 0, "-", "graph_converter"], [344, 0, 0, "-", "graph_rewriter"], [360, 0, 0, "-", "graph_util"], [362, 0, 0, "-", "quantize_graph"], [379, 0, 0, "-", "quantize_graph_common"], [382, 0, 0, "-", "transform_graph"], [385, 0, 0, "-", "utility"]], "neural_compressor.tensorflow.quantization.utils.graph_converter": [[306, 1, 1, "", "GraphConverter"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter": [[309, 0, 0, "-", "bf16"], [333, 0, 0, "-", "generic"], [343, 0, 0, "-", "graph_base"], [351, 0, 0, "-", "int8"], [356, 0, 0, "-", "qdq"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16": [[307, 0, 0, "-", "bf16_convert"], [308, 0, 0, "-", "dequantize_cast_optimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert": [[307, 1, 1, "", "BF16Convert"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer": [[308, 1, 1, "", "DequantizeCastOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic": [[310, 0, 0, "-", "convert_add_to_biasadd"], [311, 0, 0, "-", "convert_layout"], [312, 0, 0, "-", "convert_leakyrelu"], [313, 0, 0, "-", "convert_nan_to_random"], [314, 0, 0, "-", "convert_placeholder_to_const"], [315, 0, 0, "-", "dilated_contraction"], [316, 0, 0, "-", "dummy_biasadd"], [317, 0, 0, "-", "expanddims_optimizer"], [318, 0, 0, "-", "fetch_weight_from_reshape"], [319, 0, 0, "-", "fold_batch_norm"], [320, 0, 0, "-", "fold_constant"], [321, 0, 0, "-", "fuse_biasadd_add"], [322, 0, 0, "-", "fuse_column_wise_mul"], [323, 0, 0, "-", "fuse_conv_with_math"], [324, 0, 0, "-", "fuse_decomposed_bn"], [325, 0, 0, "-", "fuse_decomposed_in"], [326, 0, 0, "-", "fuse_gelu"], [327, 0, 0, "-", "fuse_layer_norm"], [328, 0, 0, "-", "fuse_pad_with_conv"], [329, 0, 0, "-", "fuse_pad_with_fp32_conv"], [330, 0, 0, "-", "fuse_reshape_transpose"], [331, 0, 0, "-", "graph_cse_optimizer"], [332, 0, 0, "-", "grappler_pass"], [334, 0, 0, "-", "insert_print_node"], [335, 0, 0, "-", "move_squeeze_after_relu"], [336, 0, 0, "-", "pre_optimize"], [337, 0, 0, "-", "remove_training_nodes"], [338, 0, 0, "-", "rename_batch_norm"], [339, 0, 0, "-", "split_shared_input"], [340, 0, 0, "-", "strip_equivalent_nodes"], [341, 0, 0, "-", "strip_unused_nodes"], [342, 0, 0, "-", "switch_optimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd": [[310, 1, 1, "", "ConvertAddToBiasAddOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout": [[311, 1, 1, "", "ConvertLayoutOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu": [[312, 1, 1, "", "ConvertLeakyReluOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random": [[313, 1, 1, "", "ConvertNanToRandom"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const": [[314, 1, 1, "", "ConvertPlaceholderToConst"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction": [[315, 1, 1, "", "DilatedContraction"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd": [[316, 1, 1, "", "InjectDummyBiasAddOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer": [[317, 1, 1, "", "ExpandDimsOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape": [[318, 1, 1, "", "FetchWeightFromReshapeOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm": [[319, 1, 1, "", "FoldBatchNormNodesOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant": [[320, 1, 1, "", "GraphFoldConstantOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add": [[321, 1, 1, "", "FuseBiasAddAndAddOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul": [[322, 1, 1, "", "FuseColumnWiseMulOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math": [[323, 1, 1, "", "FuseConvWithMathOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn": [[324, 1, 1, "", "FuseDecomposedBNOptimizer"], [324, 2, 1, "", "bypass_reshape"], [324, 2, 1, "", "get_const_dim_count"], [324, 2, 1, "", "node_from_map"], [324, 2, 1, "", "node_name_from_input"], [324, 2, 1, "", "valid_reshape_inputs"], [324, 2, 1, "", "values_from_const"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in": [[325, 1, 1, "", "FuseDecomposedINOptimizer"], [325, 2, 1, "", "bypass_reshape"], [325, 2, 1, "", "get_const_dim_count"], [325, 2, 1, "", "node_from_map"], [325, 2, 1, "", "node_name_from_input"], [325, 2, 1, "", "valid_reshape_inputs"], [325, 2, 1, "", "values_from_const"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu": [[326, 1, 1, "", "FuseGeluOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm": [[327, 1, 1, "", "FuseLayerNormOptimizer"], [327, 2, 1, "", "node_from_map"], [327, 2, 1, "", "node_name_from_input"], [327, 2, 1, "", "values_from_const"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv": [[328, 1, 1, "", "FusePadWithConv2DOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv": [[329, 1, 1, "", "FusePadWithFP32Conv2DOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose": [[330, 1, 1, "", "FuseTransposeReshapeOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer": [[331, 1, 1, "", "GraphCseOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass": [[332, 1, 1, "", "GrapplerOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node": [[334, 1, 1, "", "InsertPrintMinMaxNode"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu": [[335, 1, 1, "", "MoveSqueezeAfterReluOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize": [[336, 1, 1, "", "PreOptimization"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes": [[337, 1, 1, "", "RemoveTrainingNodesOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm": [[338, 1, 1, "", "RenameBatchNormOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input": [[339, 1, 1, "", "SplitSharedInputOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes": [[340, 1, 1, "", "StripEquivalentNodesOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes": [[341, 1, 1, "", "StripUnusedNodesOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer": [[342, 1, 1, "", "SwitchOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base": [[343, 1, 1, "", "GraphRewriterBase"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8": [[345, 0, 0, "-", "freeze_fake_quant"], [346, 0, 0, "-", "freeze_value"], [347, 0, 0, "-", "fuse_conv_redundant_dequantize"], [348, 0, 0, "-", "fuse_conv_requantize"], [349, 0, 0, "-", "fuse_matmul_redundant_dequantize"], [350, 0, 0, "-", "fuse_matmul_requantize"], [352, 0, 0, "-", "meta_op_optimizer"], [353, 0, 0, "-", "post_hostconst_converter"], [354, 0, 0, "-", "post_quantized_op_cse"], [355, 0, 0, "-", "scale_propagation"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant": [[345, 1, 1, "", "FreezeFakeQuantOpOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value": [[346, 1, 1, "", "FreezeValueTransformer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize": [[347, 1, 1, "", "FuseConvRedundantDequantizeTransformer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize": [[348, 1, 1, "", "FuseConvRequantizeTransformer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize": [[349, 1, 1, "", "FuseMatMulRedundantDequantizeTransformer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize": [[350, 1, 1, "", "FuseMatMulRequantizeDequantizeNewAPITransformer"], [350, 1, 1, "", "FuseMatMulRequantizeDequantizeTransformer"], [350, 1, 1, "", "FuseMatMulRequantizeNewAPITransformer"], [350, 1, 1, "", "FuseMatMulRequantizeTransformer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer": [[352, 1, 1, "", "MetaInfoChangingMemOpOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter": [[353, 1, 1, "", "PostHostConstConverter"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse": [[354, 1, 1, "", "PostCseOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation": [[355, 1, 1, "", "ScaleProPagationTransformer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq": [[357, 0, 0, "-", "insert_qdq_pattern"], [358, 0, 0, "-", "merge_duplicated_qdq"], [359, 0, 0, "-", "share_qdq_y_pattern"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern": [[357, 1, 1, "", "GenerateGraphWithQDQPattern"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq": [[358, 1, 1, "", "MergeDuplicatedQDQOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern": [[359, 1, 1, "", "ShareQDQForItexYPatternOptimizer"]], "neural_compressor.tensorflow.quantization.utils.graph_util": [[360, 1, 1, "", "GraphAnalyzer"], [360, 1, 1, "", "GraphRewriterHelper"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph": [[370, 0, 0, "-", "qdq"], [372, 0, 0, "-", "quantize_graph_base"], [373, 0, 0, "-", "quantize_graph_bn"], [374, 0, 0, "-", "quantize_graph_concatv2"], [375, 0, 0, "-", "quantize_graph_conv"], [376, 0, 0, "-", "quantize_graph_for_intel_cpu"], [377, 0, 0, "-", "quantize_graph_matmul"], [378, 0, 0, "-", "quantize_graph_pooling"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq": [[363, 0, 0, "-", "fuse_qdq_bn"], [364, 0, 0, "-", "fuse_qdq_concatv2"], [365, 0, 0, "-", "fuse_qdq_conv"], [366, 0, 0, "-", "fuse_qdq_deconv"], [367, 0, 0, "-", "fuse_qdq_in"], [368, 0, 0, "-", "fuse_qdq_matmul"], [369, 0, 0, "-", "fuse_qdq_pooling"], [371, 0, 0, "-", "optimize_qdq"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn": [[363, 1, 1, "", "FuseNodeStartWithFusedBatchNormV3"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2": [[364, 1, 1, "", "FuseNodeStartWithConcatV2"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv": [[365, 1, 1, "", "FuseNodeStartWithConv2d"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv": [[366, 1, 1, "", "FuseNodeStartWithDeconv2d"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in": [[367, 1, 1, "", "FuseNodeStartWithFusedInstanceNorm"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul": [[368, 1, 1, "", "FuseNodeStartWithMatmul"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling": [[369, 1, 1, "", "FuseNodeStartWithPooling"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq": [[371, 1, 1, "", "OptimizeQDQGraph"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base": [[372, 1, 1, "", "QuantizeGraphBase"], [372, 1, 1, "", "QuantizeNodeBase"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn": [[373, 1, 1, "", "FuseNodeStartWithFusedBatchNormV3"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2": [[374, 1, 1, "", "FuseNodeStartWithConcatV2"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv": [[375, 1, 1, "", "FuseNodeStartWithConv2d"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu": [[376, 1, 1, "", "QuantizeGraphForIntel"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul": [[377, 1, 1, "", "FuseNodeStartWithMatmul"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling": [[378, 1, 1, "", "FuseNodeStartWithPooling"]], "neural_compressor.tensorflow.quantization.utils.quantize_graph_common": [[379, 1, 1, "", "QuantizeGraphHelper"]], "neural_compressor.tensorflow.quantization.utils.transform_graph": [[380, 0, 0, "-", "bias_correction"], [381, 0, 0, "-", "graph_transform_base"], [383, 0, 0, "-", "insert_logging"], [384, 0, 0, "-", "rerange_quantized_concat"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction": [[380, 1, 1, "", "BiasCorrection"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base": [[381, 1, 1, "", "GraphTransformBase"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging": [[383, 1, 1, "", "InsertLogging"]], "neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat": [[384, 1, 1, "", "RerangeQuantizedConcat"]], "neural_compressor.tensorflow.quantization.utils.utility": [[385, 2, 1, "", "apply_inlining"], [385, 2, 1, "", "collate_tf_preds"], [385, 2, 1, "", "construct_function_from_graph_def"], [385, 2, 1, "", "fix_ref_type_of_graph_def"], [385, 2, 1, "", "generate_feed_dict"], [385, 2, 1, "", "get_graph_def"], [385, 2, 1, "", "get_input_output_node_names"], [385, 2, 1, "", "get_model_input_shape"], [385, 2, 1, "", "get_tensor_by_name"], [385, 2, 1, "", "is_ckpt_format"], [385, 2, 1, "", "is_saved_model_format"], [385, 2, 1, "", "iterator_sess_run"], [385, 2, 1, "", "parse_saved_model"], [385, 2, 1, "", "read_graph"], [385, 2, 1, "", "reconstruct_saved_model"], [385, 2, 1, "", "strip_equivalent_nodes"], [385, 2, 1, "", "strip_unused_nodes"], [385, 2, 1, "", "write_graph"]], "neural_compressor.tensorflow.utils": [[386, 0, 0, "-", "constants"], [387, 0, 0, "-", "data"], [389, 0, 0, "-", "model"], [390, 0, 0, "-", "model_wrappers"], [391, 0, 0, "-", "utility"]], "neural_compressor.tensorflow.utils.data": [[387, 1, 1, "", "BaseDataLoader"], [387, 1, 1, "", "BatchSampler"], [387, 1, 1, "", "DummyDataset"], [387, 1, 1, "", "DummyDatasetV2"], [387, 1, 1, "", "IndexFetcher"], [387, 1, 1, "", "IterableFetcher"], [387, 1, 1, "", "IterableSampler"], [387, 1, 1, "", "SequentialSampler"], [387, 2, 1, "", "default_collate"]], "neural_compressor.tensorflow.utils.model": [[389, 1, 1, "", "Model"], [389, 1, 1, "", "TensorflowGlobalConfig"]], "neural_compressor.tensorflow.utils.model_wrappers": [[390, 1, 1, "", "BaseModel"], [390, 1, 1, "", "KerasModel"], [390, 1, 1, "", "TensorflowBaseModel"], [390, 1, 1, "", "TensorflowCheckpointModel"], [390, 1, 1, "", "TensorflowLLMModel"], [390, 1, 1, "", "TensorflowModel"], [390, 1, 1, "", "TensorflowSavedModelModel"], [390, 2, 1, "", "checkpoint_session"], [390, 2, 1, "", "estimator_session"], [390, 2, 1, "", "frozen_pb_session"], [390, 2, 1, "", "get_model_type"], [390, 2, 1, "", "get_tf_model_type"], [390, 2, 1, "", "graph_def_session"], [390, 2, 1, "", "graph_session"], [390, 2, 1, "", "keras_session"], [390, 2, 1, "", "load_saved_model"], [390, 2, 1, "", "saved_model_session"], [390, 2, 1, "", "slim_session"], [390, 2, 1, "", "try_loading_keras"], [390, 2, 1, "", "validate_and_inference_input_output"], [390, 2, 1, "", "validate_graph_node"]], "neural_compressor.tensorflow.utils.utility": [[391, 1, 1, "", "CaptureOutputToFile"], [391, 1, 1, "", "CpuInfo"], [391, 1, 1, "", "TFSlimNetsFactory"], [391, 2, 1, "", "combine_histogram"], [391, 2, 1, "", "deep_get"], [391, 2, 1, "", "disable_random"], [391, 2, 1, "", "dump_elapsed_time"], [391, 2, 1, "", "get_all_fp32_data"], [391, 2, 1, "", "get_tensor_histogram"], [391, 2, 1, "", "itex_installed"], [391, 2, 1, "", "register_algo"], [391, 2, 1, "", "singleton"], [391, 2, 1, "", "valid_keras_format"], [391, 2, 1, "", "version1_eq_version2"], [391, 2, 1, "", "version1_gt_version2"], [391, 2, 1, "", "version1_gte_version2"], [391, 2, 1, "", "version1_lt_version2"], [391, 2, 1, "", "version1_lte_version2"]], "neural_compressor.torch": [[394, 0, 0, "-", "algorithms"], [434, 0, 0, "-", "export"], [440, 0, 0, "-", "quantization"], [447, 0, 0, "-", "utils"]], "neural_compressor.torch.algorithms": [[392, 0, 0, "-", "base_algorithm"], [395, 0, 0, "-", "layer_wise"], [400, 0, 0, "-", "mixed_precision"], [402, 0, 0, "-", "mx_quant"], [407, 0, 0, "-", "pt2e_quant"], [410, 0, 0, "-", "smooth_quant"], [414, 0, 0, "-", "static_quant"], [428, 0, 0, "-", "weight_only"]], "neural_compressor.torch.algorithms.base_algorithm": [[392, 1, 1, "", "Quantizer"]], "neural_compressor.torch.algorithms.fp8_quant.utils": [[393, 0, 0, "-", "logger"]], "neural_compressor.torch.algorithms.layer_wise": [[396, 0, 0, "-", "load"], [397, 0, 0, "-", "modified_pickle"], [398, 0, 0, "-", "utils"]], "neural_compressor.torch.algorithms.layer_wise.load": [[396, 2, 1, "", "load"]], "neural_compressor.torch.algorithms.layer_wise.modified_pickle": [[397, 3, 1, "", "PickleError"], [397, 3, 1, "", "PicklingError"], [397, 3, 1, "", "UnpicklingError"]], "neural_compressor.torch.algorithms.layer_wise.utils": [[398, 1, 1, "", "QDQLayer"], [398, 2, 1, "", "clean_module_weight"], [398, 2, 1, "", "dowload_hf_model"], [398, 2, 1, "", "get_children"], [398, 2, 1, "", "get_module"], [398, 2, 1, "", "get_named_children"], [398, 2, 1, "", "get_super_module_by_name"], [398, 2, 1, "", "load_empty_model"], [398, 2, 1, "", "load_layer_wise_quantized_model"], [398, 2, 1, "", "load_module"], [398, 2, 1, "", "load_tensor"], [398, 2, 1, "", "load_tensor_from_shard"], [398, 2, 1, "", "load_value"], [398, 2, 1, "", "register_weight_hooks"], [398, 2, 1, "", "update_module"]], "neural_compressor.torch.algorithms.mixed_precision": [[399, 0, 0, "-", "half_precision_convert"], [401, 0, 0, "-", "module_wrappers"]], "neural_compressor.torch.algorithms.mixed_precision.half_precision_convert": [[399, 1, 1, "", "HalfPrecisionConverter"]], "neural_compressor.torch.algorithms.mixed_precision.module_wrappers": [[401, 1, 1, "", "HalfPrecisionModuleWrapper"]], "neural_compressor.torch.algorithms.mx_quant": [[403, 0, 0, "-", "mx"], [404, 0, 0, "-", "utils"]], "neural_compressor.torch.algorithms.mx_quant.mx": [[403, 1, 1, "", "MXLinear"], [403, 1, 1, "", "MXQuantizer"]], "neural_compressor.torch.algorithms.mx_quant.utils": [[404, 1, 1, "", "ElemFormat"], [404, 1, 1, "", "RoundingMode"], [404, 2, 1, "", "quantize_elemwise_op"], [404, 2, 1, "", "quantize_mx_op"]], "neural_compressor.torch.algorithms.pt2e_quant": [[405, 0, 0, "-", "core"], [406, 0, 0, "-", "half_precision_rewriter"], [408, 0, 0, "-", "save_load"], [409, 0, 0, "-", "utility"]], "neural_compressor.torch.algorithms.pt2e_quant.core": [[405, 1, 1, "", "W8A8PT2EQuantizer"]], "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter": [[406, 1, 1, "", "PatternPair"], [406, 2, 1, "", "apply_single_pattern_pair"], [406, 2, 1, "", "get_filter_fn"], [406, 2, 1, "", "get_half_precision_node_set"], [406, 2, 1, "", "get_unquantized_node_set"], [406, 2, 1, "", "pattern_factory"], [406, 2, 1, "", "transformation"]], "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter.PatternPair": [[406, 4, 1, "", "fn"], [406, 4, 1, "", "replace_pattern"], [406, 4, 1, "", "search_pattern"]], "neural_compressor.torch.algorithms.pt2e_quant.save_load": [[408, 2, 1, "", "load"], [408, 2, 1, "", "save"]], "neural_compressor.torch.algorithms.pt2e_quant.utility": [[409, 2, 1, "", "create_quant_spec_from_config"], [409, 2, 1, "", "create_xiq_quantizer_from_pt2e_config"]], "neural_compressor.torch.algorithms.smooth_quant": [[411, 0, 0, "-", "save_load"], [412, 0, 0, "-", "smooth_quant"], [413, 0, 0, "-", "utility"]], "neural_compressor.torch.algorithms.smooth_quant.save_load": [[411, 2, 1, "", "recover_model_from_json"]], "neural_compressor.torch.algorithms.smooth_quant.smooth_quant": [[412, 1, 1, "", "SmoothQuantQuantizer"], [412, 2, 1, "", "qdq_quantize"]], "neural_compressor.torch.algorithms.smooth_quant.utility": [[413, 1, 1, "", "AutoAlpha"], [413, 1, 1, "", "Calibration"], [413, 1, 1, "", "GraphTrace"], [413, 1, 1, "", "SQLinearWrapper"], [413, 1, 1, "", "TorchSmoothQuant"], [413, 1, 1, "", "WrapperLayer"], [413, 2, 1, "", "build_captured_dataloader"], [413, 2, 1, "", "cal_scale"], [413, 2, 1, "", "cfg_to_qconfig"], [413, 2, 1, "", "check_cfg_and_qconfig"], [413, 2, 1, "", "dump_model_op_stats"], [413, 2, 1, "", "enough_memo_store_scale"], [413, 2, 1, "", "forward_wrapper"], [413, 2, 1, "", "get_module"], [413, 2, 1, "", "get_parent"], [413, 2, 1, "", "get_quantizable_ops_recursively"], [413, 2, 1, "", "model_forward"], [413, 2, 1, "", "model_forward_per_sample"], [413, 2, 1, "", "move_input_to_device"], [413, 2, 1, "", "quant_dequant_w_v1"], [413, 2, 1, "", "quant_dequant_x_v1"], [413, 2, 1, "", "register_autotune"], [413, 2, 1, "", "reshape_in_channel_to_last"], [413, 2, 1, "", "reshape_scale_as_input"], [413, 2, 1, "", "reshape_scale_as_weight"], [413, 2, 1, "", "set_module"], [413, 2, 1, "", "update_sq_scale"]], "neural_compressor.torch.algorithms.static_quant": [[415, 0, 0, "-", "save_load"], [416, 0, 0, "-", "static_quant"], [417, 0, 0, "-", "utility"]], "neural_compressor.torch.algorithms.static_quant.save_load": [[415, 2, 1, "", "load"], [415, 2, 1, "", "save"]], "neural_compressor.torch.algorithms.static_quant.static_quant": [[416, 1, 1, "", "StaticQuantQuantizer"]], "neural_compressor.torch.algorithms.static_quant.utility": [[417, 1, 1, "", "TransformerBasedModelBlockPatternDetector"], [417, 2, 1, "", "cfg_to_qconfig"], [417, 2, 1, "", "check_cfg_and_qconfig"], [417, 2, 1, "", "dump_model_op_stats"], [417, 2, 1, "", "generate_activation_observer"], [417, 2, 1, "", "generate_xpu_qconfig"], [417, 2, 1, "", "get_depth"], [417, 2, 1, "", "get_dict_at_depth"], [417, 2, 1, "", "get_element_under_depth"], [417, 2, 1, "", "get_quantizable_ops_from_cfgs"], [417, 2, 1, "", "get_quantizable_ops_recursively"], [417, 2, 1, "", "parse_cfgs"], [417, 2, 1, "", "simple_inference"]], "neural_compressor.torch.algorithms.weight_only": [[418, 0, 0, "-", "autoround"], [419, 0, 0, "-", "awq"], [420, 0, 0, "-", "gptq"], [424, 0, 0, "-", "hqq"], [429, 0, 0, "-", "modules"], [430, 0, 0, "-", "rtn"], [431, 0, 0, "-", "save_load"], [432, 0, 0, "-", "teq"], [433, 0, 0, "-", "utility"]], "neural_compressor.torch.algorithms.weight_only.autoround": [[418, 1, 1, "", "AutoRoundQuantizer"], [418, 2, 1, "", "get_dataloader"]], "neural_compressor.torch.algorithms.weight_only.awq": [[419, 1, 1, "", "AWQQuantizer"]], "neural_compressor.torch.algorithms.weight_only.gptq": [[420, 1, 1, "", "GPTQ"], [420, 1, 1, "", "GPTQuantizer"], [420, 1, 1, "", "Quantizer"], [420, 1, 1, "", "RAWGPTQuantizer"], [420, 2, 1, "", "find_layers"], [420, 2, 1, "", "find_layers_name"], [420, 2, 1, "", "is_leaf"], [420, 2, 1, "", "log_quantizable_layers_per_transformer"], [420, 2, 1, "", "trace_gptq_target_blocks"]], "neural_compressor.torch.algorithms.weight_only.hqq": [[421, 0, 0, "-", "bitpack"], [422, 0, 0, "-", "config"], [423, 0, 0, "-", "core"], [425, 0, 0, "-", "optimizer"], [426, 0, 0, "-", "qtensor"], [427, 0, 0, "-", "quantizer"]], "neural_compressor.torch.algorithms.weight_only.hqq.bitpack": [[421, 1, 1, "", "Packer"]], "neural_compressor.torch.algorithms.weight_only.hqq.config": [[422, 1, 1, "", "HQQModuleConfig"], [422, 1, 1, "", "QTensorConfig"]], "neural_compressor.torch.algorithms.weight_only.hqq.core": [[423, 1, 1, "", "HQQLinear"], [423, 1, 1, "", "HQQTensorHandle"]], "neural_compressor.torch.algorithms.weight_only.hqq.optimizer": [[425, 2, 1, "", "optimize_weights_proximal_legacy"]], "neural_compressor.torch.algorithms.weight_only.hqq.qtensor": [[426, 1, 1, "", "QTensor"], [426, 1, 1, "", "QTensorMetaInfo"]], "neural_compressor.torch.algorithms.weight_only.hqq.qtensor.QTensorMetaInfo": [[426, 4, 1, "", "axis"], [426, 4, 1, "", "group_size"], [426, 4, 1, "", "nbits"], [426, 4, 1, "", "packing"], [426, 4, 1, "", "shape"]], "neural_compressor.torch.algorithms.weight_only.hqq.quantizer": [[427, 1, 1, "", "HQQuantizer"], [427, 2, 1, "", "filter_fn"], [427, 2, 1, "", "patch_hqq_moduile"], [427, 2, 1, "", "replacement_fn"]], "neural_compressor.torch.algorithms.weight_only.modules": [[429, 1, 1, "", "FakeAffineTensorQuantFunction"], [429, 1, 1, "", "HPUWeightOnlyLinear"], [429, 1, 1, "", "INCWeightOnlyLinear"], [429, 1, 1, "", "MulLinear"], [429, 1, 1, "", "QDQLayer"], [429, 1, 1, "", "TEQLinearFakeQuant"], [429, 1, 1, "", "UnpackedWeightOnlyLinearParams"], [429, 1, 1, "", "WeightOnlyLinear"]], "neural_compressor.torch.algorithms.weight_only.rtn": [[430, 1, 1, "", "RTNQuantizer"]], "neural_compressor.torch.algorithms.weight_only.save_load": [[431, 1, 1, "", "WOQModelLoader"], [431, 2, 1, "", "load"], [431, 2, 1, "", "save"]], "neural_compressor.torch.algorithms.weight_only.teq": [[432, 1, 1, "", "TEQuantizer"], [432, 1, 1, "", "TrainableEquivalentTransformation"]], "neural_compressor.torch.algorithms.weight_only.utility": [[433, 1, 1, "", "GraphTrace"], [433, 2, 1, "", "fetch_module"], [433, 2, 1, "", "forward_wrapper"], [433, 2, 1, "", "get_absorb_layers"], [433, 2, 1, "", "get_block_prefix"], [433, 2, 1, "", "get_module"], [433, 2, 1, "", "get_module_input_output"], [433, 2, 1, "", "get_parent"], [433, 2, 1, "", "model_forward"], [433, 2, 1, "", "move_input_to_device"], [433, 2, 1, "", "qdq_weight_actor"], [433, 2, 1, "", "qdq_weight_asym"], [433, 2, 1, "", "qdq_weight_sym"], [433, 2, 1, "", "quant_tensor"], [433, 2, 1, "", "quant_weight_w_scale"], [433, 2, 1, "", "quantize_4bit"], [433, 2, 1, "", "recover_forward"], [433, 2, 1, "", "replace_forward"], [433, 2, 1, "", "search_clip"], [433, 2, 1, "", "set_module"]], "neural_compressor.torch.export": [[435, 0, 0, "-", "pt2e_export"]], "neural_compressor.torch.export.pt2e_export": [[435, 2, 1, "", "export"], [435, 2, 1, "", "export_model_for_pt2e_quant"]], "neural_compressor.torch.quantization": [[437, 0, 0, "-", "algorithm_entry"], [438, 0, 0, "-", "autotune"], [439, 0, 0, "-", "config"], [441, 0, 0, "-", "load_entry"], [442, 0, 0, "-", "quantize"]], "neural_compressor.torch.quantization.algorithm_entry": [[437, 2, 1, "", "autoround_quantize_entry"], [437, 2, 1, "", "awq_quantize_entry"], [437, 2, 1, "", "fp8_entry"], [437, 2, 1, "", "gptq_entry"], [437, 2, 1, "", "hqq_entry"], [437, 2, 1, "", "mixed_precision_entry"], [437, 2, 1, "", "mx_quant_entry"], [437, 2, 1, "", "pt2e_dynamic_quant_entry"], [437, 2, 1, "", "pt2e_static_quant_entry"], [437, 2, 1, "", "rtn_entry"], [437, 2, 1, "", "smooth_quant_entry"], [437, 2, 1, "", "static_quant_entry"], [437, 2, 1, "", "teq_quantize_entry"]], "neural_compressor.torch.quantization.autotune": [[438, 2, 1, "", "autotune"], [438, 2, 1, "", "get_all_config_set"], [438, 2, 1, "", "get_rtn_double_quant_config_set"]], "neural_compressor.torch.quantization.config": [[439, 1, 1, "", "AWQConfig"], [439, 1, 1, "", "AutoRoundConfig"], [439, 1, 1, "", "DynamicQuantConfig"], [439, 1, 1, "", "FP8Config"], [439, 1, 1, "", "GPTQConfig"], [439, 1, 1, "", "HQQConfig"], [439, 1, 1, "", "MXQuantConfig"], [439, 1, 1, "", "MixedPrecisionConfig"], [439, 1, 1, "", "OperatorConfig"], [439, 1, 1, "", "RTNConfig"], [439, 1, 1, "", "SmoothQuantConfig"], [439, 1, 1, "", "StaticQuantConfig"], [439, 1, 1, "", "TEQConfig"], [439, 1, 1, "", "TorchBaseConfig"], [439, 2, 1, "", "get_all_registered_configs"], [439, 2, 1, "", "get_default_AutoRound_config"], [439, 2, 1, "", "get_default_awq_config"], [439, 2, 1, "", "get_default_double_quant_config"], [439, 2, 1, "", "get_default_dynamic_config"], [439, 2, 1, "", "get_default_fp8_config"], [439, 2, 1, "", "get_default_fp8_config_set"], [439, 2, 1, "", "get_default_gptq_config"], [439, 2, 1, "", "get_default_hqq_config"], [439, 2, 1, "", "get_default_mixed_precision_config"], [439, 2, 1, "", "get_default_mixed_precision_config_set"], [439, 2, 1, "", "get_default_mx_config"], [439, 2, 1, "", "get_default_rtn_config"], [439, 2, 1, "", "get_default_sq_config"], [439, 2, 1, "", "get_default_static_config"], [439, 2, 1, "", "get_default_teq_config"], [439, 2, 1, "", "get_woq_tuning_config"]], "neural_compressor.torch.quantization.load_entry": [[441, 2, 1, "", "load"]], "neural_compressor.torch.quantization.quantize": [[442, 2, 1, "", "convert"], [442, 2, 1, "", "finalize_calibration"], [442, 2, 1, "", "need_apply"], [442, 2, 1, "", "prepare"], [442, 2, 1, "", "quantize"]], "neural_compressor.torch.utils": [[443, 0, 0, "-", "auto_accelerator"], [444, 0, 0, "-", "bit_packer"], [445, 0, 0, "-", "constants"], [446, 0, 0, "-", "environ"], [448, 0, 0, "-", "utility"]], "neural_compressor.torch.utils.auto_accelerator": [[443, 1, 1, "", "AcceleratorRegistry"], [443, 1, 1, "", "Auto_Accelerator"], [443, 1, 1, "", "CPU_Accelerator"], [443, 1, 1, "", "CUDA_Accelerator"], [443, 1, 1, "", "HPU_Accelerator"], [443, 1, 1, "", "XPU_Accelerator"], [443, 2, 1, "", "auto_detect_accelerator"], [443, 2, 1, "", "register_accelerator"]], "neural_compressor.torch.utils.bit_packer": [[444, 2, 1, "", "pack_array_with_numba_b2_c16"], [444, 2, 1, "", "pack_array_with_numba_b2_c32"], [444, 2, 1, "", "pack_array_with_numba_b2_c64"], [444, 2, 1, "", "pack_array_with_numba_b2_c8"], [444, 2, 1, "", "pack_array_with_numba_b4_c16"], [444, 2, 1, "", "pack_array_with_numba_b4_c32"], [444, 2, 1, "", "pack_array_with_numba_b4_c64"], [444, 2, 1, "", "pack_array_with_numba_b4_c8"], [444, 2, 1, "", "pack_array_with_numba_b8_c16"], [444, 2, 1, "", "pack_array_with_numba_b8_c32"], [444, 2, 1, "", "pack_array_with_numba_b8_c64"], [444, 2, 1, "", "pack_array_with_numba_b8_c8"], [444, 2, 1, "", "register_pack_func"]], "neural_compressor.torch.utils.constants": [[445, 1, 1, "", "LoadFormat"]], "neural_compressor.torch.utils.environ": [[446, 2, 1, "", "device_synchronize"], [446, 2, 1, "", "get_accelerator"], [446, 2, 1, "", "get_ipex_version"], [446, 2, 1, "", "get_torch_version"], [446, 2, 1, "", "is_hpex_available"], [446, 2, 1, "", "is_ipex_available"], [446, 2, 1, "", "is_ipex_imported"], [446, 2, 1, "", "is_package_available"], [446, 2, 1, "", "is_transformers_imported"]], "neural_compressor.torch.utils.utility": [[448, 2, 1, "", "detect_device"], [448, 2, 1, "", "dowload_hf_model"], [448, 2, 1, "", "dump_model_op_stats"], [448, 2, 1, "", "fetch_module"], [448, 2, 1, "", "get_block_names"], [448, 2, 1, "", "get_double_quant_config_dict"], [448, 2, 1, "", "get_layer_names_in_block"], [448, 2, 1, "", "get_model_device"], [448, 2, 1, "", "get_model_info"], [448, 2, 1, "", "get_module"], [448, 2, 1, "", "get_multimodal_block_names"], [448, 2, 1, "", "get_processor_type_from_user_config"], [448, 2, 1, "", "get_quantizer"], [448, 2, 1, "", "is_optimum_habana_available"], [448, 2, 1, "", "load_empty_model"], [448, 2, 1, "", "postprocess_model"], [448, 2, 1, "", "register_algo"], [448, 2, 1, "", "run_fn_for_vlm_autoround"], [448, 2, 1, "", "set_module"], [448, 2, 1, "", "to_device"], [448, 2, 1, "", "to_dtype"], [448, 2, 1, "", "validate_modules"]], "neural_compressor.training": [[449, 1, 1, "", "CallBacks"], [449, 1, 1, "", "CompressionManager"], [449, 2, 1, "", "fit"], [449, 2, 1, "", "prepare_compression"]], "neural_compressor.transformers": [[451, 0, 0, "-", "utils"]], "neural_compressor.transformers.quantization": [[450, 0, 0, "-", "utils"]], "neural_compressor.transformers.utils": [[452, 0, 0, "-", "quantization_config"]], "neural_compressor.transformers.utils.quantization_config": [[452, 1, 1, "", "AutoRoundConfig"], [452, 1, 1, "", "AwqConfig"], [452, 1, 1, "", "GPTQConfig"], [452, 1, 1, "", "INCQuantizationConfigMixin"], [452, 1, 1, "", "QuantizationMethod"], [452, 1, 1, "", "RtnConfig"], [452, 1, 1, "", "TeqConfig"]], "neural_compressor.utils": [[453, 0, 0, "-", "collect_layer_histogram"], [454, 0, 0, "-", "constant"], [455, 0, 0, "-", "create_obj_from_config"], [456, 0, 0, "-", "export"], [461, 0, 0, "-", "kl_divergence"], [462, 0, 0, "-", "load_huggingface"], [463, 0, 0, "-", "logger"], [464, 0, 0, "-", "options"], [465, 0, 0, "-", "pytorch"], [466, 0, 0, "-", "utility"], [467, 0, 0, "-", "weights_details"]], "neural_compressor.utils.collect_layer_histogram": [[453, 1, 1, "", "LayerHistogramCollector"]], "neural_compressor.utils.create_obj_from_config": [[455, 2, 1, "", "create_dataloader"], [455, 2, 1, "", "create_dataset"], [455, 2, 1, "", "create_eval_func"], [455, 2, 1, "", "create_train_func"], [455, 2, 1, "", "get_algorithm"], [455, 2, 1, "", "get_func_from_config"], [455, 2, 1, "", "get_metrics"], [455, 2, 1, "", "get_postprocess"], [455, 2, 1, "", "get_preprocess"]], "neural_compressor.utils.export": [[457, 0, 0, "-", "qlinear2qdq"], [458, 0, 0, "-", "tf2onnx"], [459, 0, 0, "-", "torch2onnx"]], "neural_compressor.utils.export.qlinear2qdq": [[457, 2, 1, "", "check_model"], [457, 2, 1, "", "onnx_qlinear_to_qdq"]], "neural_compressor.utils.export.tf2onnx": [[458, 2, 1, "", "tf_to_fp32_onnx"], [458, 2, 1, "", "tf_to_int8_onnx"]], "neural_compressor.utils.export.torch2onnx": [[459, 2, 1, "", "dynamic_quant_export"], [459, 2, 1, "", "get_node_mapping"], [459, 2, 1, "", "get_quantizable_onnx_ops"], [459, 2, 1, "", "static_quant_export"], [459, 2, 1, "", "torch_to_fp32_onnx"], [459, 2, 1, "", "torch_to_int8_onnx"]], "neural_compressor.utils.kl_divergence": [[461, 1, 1, "", "KL_Divergence"]], "neural_compressor.utils.load_huggingface": [[462, 1, 1, "", "OptimizedModel"], [462, 2, 1, "", "export_compressed_model"], [462, 2, 1, "", "save_for_huggingface_upstream"]], "neural_compressor.utils.logger": [[463, 1, 1, "", "Logger"], [463, 2, 1, "", "debug"], [463, 2, 1, "", "error"], [463, 2, 1, "", "fatal"], [463, 2, 1, "", "info"], [463, 2, 1, "", "log"], [463, 2, 1, "", "warn"], [463, 2, 1, "", "warning"]], "neural_compressor.utils.options": [[464, 1, 1, "", "onnxrt"]], "neural_compressor.utils.pytorch": [[465, 2, 1, "", "is_int8_model"], [465, 2, 1, "", "load"], [465, 2, 1, "", "load_weight_only"], [465, 2, 1, "", "recover_model_from_json"]], "neural_compressor.utils.utility": [[466, 1, 1, "", "CaptureOutputToFile"], [466, 1, 1, "", "CpuInfo"], [466, 2, 1, "", "Dequantize"], [466, 1, 1, "", "DotDict"], [466, 1, 1, "", "GLOBAL_STATE"], [466, 1, 1, "", "LazyImport"], [466, 1, 1, "", "MODE"], [466, 1, 1, "", "OpEntry"], [466, 1, 1, "", "Statistics"], [466, 2, 1, "", "alias_param"], [466, 2, 1, "", "calculate_mse"], [466, 2, 1, "", "check_key_exist"], [466, 2, 1, "", "combine_histogram"], [466, 2, 1, "", "compare_objects"], [466, 2, 1, "", "compute_sparsity"], [466, 2, 1, "", "deep_get"], [466, 2, 1, "", "deep_set"], [466, 2, 1, "", "dequantize_weight"], [466, 2, 1, "", "dump_class_attrs"], [466, 2, 1, "", "dump_data_to_local"], [466, 2, 1, "", "dump_elapsed_time"], [466, 2, 1, "", "dump_table"], [466, 2, 1, "", "dump_table_to_csv"], [466, 2, 1, "", "equal_dicts"], [466, 2, 1, "", "fault_tolerant_file"], [466, 2, 1, "", "get_all_fp32_data"], [466, 2, 1, "", "get_number_of_sockets"], [466, 2, 1, "", "get_op_list"], [466, 2, 1, "", "get_size"], [466, 2, 1, "", "get_tensor_histogram"], [466, 2, 1, "", "get_tensors_info"], [466, 2, 1, "", "get_tuning_history"], [466, 2, 1, "", "get_weights_details"], [466, 2, 1, "", "load_data_from_pkl"], [466, 2, 1, "", "mse_metric_gap"], [466, 2, 1, "", "print_op_list"], [466, 2, 1, "", "print_table"], [466, 2, 1, "", "recover"], [466, 2, 1, "", "set_random_seed"], [466, 2, 1, "", "set_resume_from"], [466, 2, 1, "", "set_tensorboard"], [466, 2, 1, "", "set_workspace"], [466, 2, 1, "", "show_memory_info"], [466, 2, 1, "", "singleton"], [466, 2, 1, "", "str2array"], [466, 2, 1, "", "time_limit"], [466, 2, 1, "", "version1_eq_version2"], [466, 2, 1, "", "version1_gt_version2"], [466, 2, 1, "", "version1_gte_version2"], [466, 2, 1, "", "version1_lt_version2"], [466, 2, 1, "", "version1_lte_version2"]], "neural_compressor.utils.weights_details": [[467, 1, 1, "", "WeightsDetails"], [467, 1, 1, "", "WeightsStatistics"]]}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "function", "Python function"], "3": ["py", "exception", "Python exception"], "4": ["py", "attribute", "Python attribute"], "5": ["py", "data", "Python data"]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:function", "3": "py:exception", "4": "py:attribute", "5": "py:data"}, "terms": {"": [30, 31, 90, 128, 133, 140, 150, 152, 173, 174, 175, 179, 180, 181, 182, 185, 187, 192, 194, 195, 208, 211, 224, 225, 232, 234, 235, 239, 262, 380, 385, 391, 396, 413, 431, 433, 441, 448, 466, 470, 471, 472, 474, 475, 476, 477, 479, 481, 483, 488, 490, 491, 496, 497, 522, 526, 528, 529, 530, 531, 533, 538, 539, 543, 544, 545, 546, 549, 550, 551, 552, 554, 555], "0": [3, 30, 31, 39, 75, 90, 133, 139, 140, 145, 147, 149, 152, 153, 154, 163, 169, 171, 192, 195, 200, 202, 204, 209, 212, 213, 216, 221, 225, 234, 245, 262, 267, 281, 303, 311, 385, 387, 396, 413, 417, 425, 433, 439, 443, 452, 459, 462, 473, 474, 475, 477, 480, 482, 483, 486, 488, 489, 492, 494, 504, 522, 523, 526, 528, 529, 530, 531, 534, 535, 536, 537, 538, 539, 541, 544, 546, 547, 549, 550, 551, 552, 553, 554], "00": [536, 555], "000": [211, 494], "0000": 540, "00000": 211, "00001": 211, "00003": 211, "00004": 211, "0001": [3, 195, 538], "0003": [536, 555], "0004": 538, "0005": 536, "0006": 536, "0007": 555, "0008": 536, "001": [211, 488, 526, 552], "0016": 536, "002": 526, "0021": 536, "0025": 536, "0029": [488, 552, 555], "00296431384049356": [488, 552], "0030": 555, "0036": [488, 552], "0040": 536, "0043": 536, "0046": [536, 555], "005": 549, "0051": 536, "0058": 555, "0059755356051027775": [488, 552], "0061": 536, "006533813662827015": [488, 552], "00774": 544, "0081": 555, "0086": [488, 552], "0097": 536, "00978": [439, 477, 488, 549], "00x": 555, "01": [31, 153, 195, 425, 439, 477, 536, 538, 549, 554, 555], "0106": 555, "0115": 555, "0119": 555, "0130": 555, "0142": 536, "017": 553, "01x": 555, "02": [195, 555], "0201": 536, "0213": 555, "02340": 169, "027": 555, "02x": 555, "03": [536, 555], "0352": 536, "03x": 555, "04": [494, 534, 555], "04191": 521, "04x": 555, "05": [150, 195, 234, 413, 536, 537, 552, 555], "0500": [488, 552], "05516": [439, 477, 488], "0559": 536, "05754": 544, "05x": 555, "06": 555, "0698": [488, 552], "06x": 555, "07": [488, 494, 552, 555], "0734": 555, "0737": [488, 552], "07x": 555, "08": [225, 536, 553, 555], "0806": [488, 552], "0837": 551, "09": [536, 555], "09656": 187, "099": 211, "09x": 555, "0f": 526, "0x": [140, 396], "0x2b000590": 555, "1": [3, 29, 30, 31, 40, 59, 74, 90, 94, 98, 131, 133, 140, 142, 145, 147, 149, 150, 151, 153, 154, 163, 169, 181, 182, 184, 195, 200, 202, 204, 208, 209, 210, 212, 213, 216, 217, 221, 225, 230, 231, 232, 234, 245, 262, 280, 281, 284, 292, 294, 298, 312, 331, 346, 359, 383, 385, 387, 391, 396, 413, 417, 418, 420, 425, 429, 433, 439, 441, 448, 452, 455, 462, 466, 470, 472, 473, 474, 475, 476, 477, 479, 480, 481, 482, 483, 486, 488, 490, 492, 494, 495, 497, 521, 523, 528, 530, 534, 536, 537, 539, 541, 542, 543, 544, 546, 547, 549, 550, 551, 552, 553, 554], "10": [195, 211, 266, 425, 482, 483, 494, 520, 528, 534, 538, 544, 553, 554, 555], "100": [3, 151, 152, 153, 195, 211, 281, 301, 302, 305, 443, 481, 488, 489, 520, 526, 532, 534, 538, 546, 553, 554, 555], "1000": 538, "10000": [266, 544], "10004": [474, 477, 478, 483, 489, 492, 495, 523, 525, 526, 530, 533, 539, 541, 543, 547, 549], "10005": [541, 547], "10006": 525, "10016": [488, 552], "100x": [477, 488, 549], "101": [209, 234, 488, 537, 552, 555], "102": 555, "1024": [131, 211, 383], "10271": [473, 541], "10281": [473, 541], "103": 553, "1034": 555, "10438": [477, 488, 549, 552], "1048": 555, "10537": [473, 541], "106": 555, "107": 555, "1076": 555, "10833": 225, "1091": 555, "10k": [418, 452, 527], "10x": 555, "11": [474, 488, 534, 537, 539, 552, 554, 555], "1106": 555, "1114": 555, "1115": 555, "1121": 555, "1135": 555, "1136": 555, "1137": 555, "116": 553, "1188": 555, "119": [488, 552], "12": [536, 555], "120": [488, 552, 555], "1202": 555, "1205": 555, "121": 555, "123": [211, 553, 555], "1234": 555, "1236": 555, "1237": 555, "124m": [475, 552], "125": 555, "1259": 555, "125m": [475, 531, 552, 555], "126": 555, "127": [212, 213, 387, 488, 546, 552], "128": [3, 31, 145, 209, 212, 213, 225, 387, 418, 420, 439, 452, 477, 482, 488, 538, 546, 549, 553], "1285": 555, "12b": 536, "13": [488, 529, 534, 552, 555], "130": 555, "1307": 555, "132": 555, "13325": [477, 488, 549, 552], "1344": 555, "135": 555, "1365": [225, 553, 555], "1381": [488, 552], "1388": 555, "139": [488, 552], "13b": [475, 536, 552], "13x": 555, "14": [195, 458, 459, 528, 534, 555], "14314": [477, 488, 549], "144": 555, "1445": 555, "146": 555, "147": 555, "148": 555, "1495": 555, "15": [133, 385, 530, 534, 536], "150": 555, "1506": 555, "151": 555, "1510": [488, 552], "152": 555, "153": 555, "1535": 555, "1547": 555, "156": 555, "1564": 555, "1574": 555, "1583": [488, 552], "15x": 555, "16": [444, 474, 534, 549], "1601": [488, 552], "161": 555, "162": [488, 552, 555], "164": 555, "1644": 555, "16599": [488, 552], "169": 555, "16x": 555, "16x32gb": 555, "17": [486, 494, 534, 536, 555], "1707": 555, "1717": 555, "172": [488, 552, 555], "1732": 555, "17323": [420, 439, 477, 488, 549], "1742": [488, 552], "1749": [488, 552], "175": 555, "17509": [488, 552], "1751": [488, 552], "176": 555, "177": [232, 555], "178": 555, "17x": 555, "18": [486, 534, 536, 545], "1809": 521, "1810": 169, "1818": 555, "182": 555, "1842": 555, "18518": 135, "18529": 135, "187": 555, "1873": 555, "1879": 555, "1890": [488, 552], "1891": 555, "18x": 555, "19": 555, "192": [488, 552], "192795": 522, "193": 555, "195": 555, "1978": 195, "1983354538679123": [488, 552], "1988": 555, "199": 555, "1998": 555, "1_11_capabl": 497, "1b7": [475, 536, 552], "1e": [150, 413, 488, 552], "1e1": 425, "1s4c14ins1bsthroughput": 555, "1x": 555, "1x1": [538, 544], "1x2": [488, 552], "1xchannel": [195, 544], "2": [29, 30, 31, 39, 59, 90, 94, 140, 147, 149, 151, 153, 154, 175, 181, 182, 195, 210, 230, 232, 234, 245, 266, 281, 297, 311, 331, 359, 396, 413, 433, 441, 444, 448, 466, 471, 472, 473, 475, 476, 477, 479, 480, 481, 482, 483, 484, 488, 489, 492, 494, 495, 504, 521, 523, 530, 531, 533, 534, 535, 536, 537, 539, 541, 542, 544, 545, 546, 547, 549, 550, 551, 552, 553, 554], "20": [225, 425, 544, 553, 555], "200": [224, 418, 439, 452, 477, 492, 554], "2000": 538, "2001": 209, "2011": 521, "2012": 211, "2017": 521, "2018": 521, "2019": 544, "2020": [135, 473, 541], "2021": 544, "2022": [195, 477, 488, 535, 544, 549, 552], "2023": [473, 477, 488, 494, 541, 544, 549, 552], "2024": [494, 555], "203": 555, "2043": 209, "2048": [3, 391, 418, 420, 439, 452, 466, 477, 549], "205": 555, "2059": 555, "206": 555, "207": [488, 552], "2070": 555, "2079": 555, "20b": 536, "20x": 555, "21": [234, 488, 536, 545, 552, 555], "210": 555, "21020": [488, 552], "211": 555, "2111": 544, "213": 555, "2132": 551, "214": 555, "2170": 555, "2172": 555, "218": 555, "219": 555, "21x": 555, "22": [534, 536, 555], "2202": 555, "2204": 187, "2205301336": 555, "2209": [477, 488, 549, 552], "2210": [420, 439, 477, 488, 549], "2211": [477, 488, 549, 552], "2220": [488, 552], "224": [195, 221, 526, 528, 538, 553], "22444": [488, 552], "225": [221, 538], "2286": 555, "229": [221, 538], "22x": 555, "23": [154, 536, 555], "230": 555, "2301": 544, "2305": [477, 488, 549], "2306": [439, 477, 488, 549], "2309": [439, 477, 488], "2310": [473, 541], "2326": 555, "23f1": 555, "23ubuntu4": 555, "23x": 555, "24": [154, 483, 544, 554, 555], "24101": 544, "24116": 544, "242": 555, "2420": [488, 552], "2428": 555, "247": 555, "24x": 555, "25": [544, 555], "250": 544, "255": [488, 546, 552], "256": [216, 221, 439, 526, 538, 553], "2567": 555, "2570": [488, 552], "2578": 555, "25x": 555, "26": [536, 555], "26f1": 555, "26x": 555, "27": [529, 536, 555], "279": 555, "27x": 555, "28": [209, 210, 214, 555], "282": 555, "284": 555, "2847": 555, "28x": 555, "29": [536, 555], "294": 555, "2949": 555, "295": 555, "2970": [488, 552], "2991": [488, 552], "29x": 555, "2d": [55, 327, 488, 552], "2e5m2": 472, "2gb": [243, 390], "2x": [527, 533], "2x1": [544, 555], "2x2": [488, 552], "2xlarg": 555, "3": [29, 31, 133, 140, 153, 154, 182, 195, 211, 225, 230, 281, 385, 396, 413, 441, 466, 473, 474, 476, 477, 479, 481, 482, 483, 488, 489, 492, 494, 504, 521, 522, 523, 526, 528, 530, 531, 534, 537, 538, 539, 541, 542, 544, 545, 546, 549, 550, 551, 552, 553, 554], "30": [225, 527, 533, 553, 555], "300": 544, "305": 555, "3087": 555, "30b": [475, 536, 552], "30x": 555, "31": [30, 536, 555], "311": 555, "313": 555, "31x": 555, "32": [31, 171, 280, 413, 418, 429, 433, 439, 444, 452, 473, 474, 481, 482, 526, 532, 541, 549, 555], "322": 555, "3253": [488, 552], "3254": 555, "32accuraci": 555, "32x": 555, "33": [135, 473, 536, 541, 555], "334": 555, "33x": 555, "34": [494, 536, 555], "3424": 555, "346": 555, "348": 555, "34f1": 555, "35": [536, 544, 555], "350": 555, "350m": [475, 552], "354": [475, 552], "3542": [475, 552], "35x": 555, "36": 555, "360": 555, "36x": 555, "37": [536, 555], "3707": 555, "3725": 555, "3740": [488, 552], "3757": [475, 552], "379": [475, 552], "37x": 555, "38": 555, "3804": [475, 552], "381": [544, 555], "3815": [488, 552], "384": [225, 553, 555], "3845": [488, 552], "3850": [488, 552], "385297635664756e": [488, 552], "3852e": [488, 552], "386": 555, "387": 555, "3887": [475, 552], "38x": 555, "39": 555, "3911": [488, 552], "3924": [488, 552], "393": 555, "3930": [475, 552], "394": 555, "3947": [475, 552], "395": 555, "396": 555, "397": 555, "399": 555, "39x": 555, "3b": [475, 536, 552], "3d": [55, 327, 488, 545, 552], "3dgan": 545, "3f": 483, "3rd": [474, 488, 539, 545, 546, 548], "3x": 529, "4": [31, 39, 142, 145, 151, 154, 175, 184, 195, 218, 221, 225, 227, 228, 230, 280, 281, 311, 418, 429, 433, 439, 444, 452, 471, 473, 475, 476, 477, 478, 483, 488, 489, 490, 494, 495, 497, 520, 526, 527, 530, 533, 534, 537, 538, 541, 544, 546, 549, 552, 553, 554, 555], "40": [536, 555], "401": 555, "402": 555, "404": [544, 555], "405": 555, "4055": [488, 552], "406": [538, 555], "407": 555, "40b": 536, "41": 555, "411": 555, "4149": [475, 552], "4172": [475, 552], "4199": 555, "41x": 555, "42": [195, 418, 439, 477, 555], "420": 533, "42x": 555, "43": [536, 555], "431": 555, "434": 555, "43x": 555, "44": 555, "442": 555, "4469": 551, "44x": 555, "45": [492, 536, 555], "4516": [475, 552], "4533": [475, 552], "456": 538, "457": 555, "45x": 555, "46": [536, 555], "461": 555, "4634": [475, 552], "46x": 555, "47": [154, 483, 536, 555], "4734": [488, 552], "4741": [488, 552], "4743": [488, 552], "47x": 555, "48": [154, 488, 552, 555], "4800": 555, "4828": [475, 552], "483": 555, "484": 281, "485": [538, 555], "48x": 555, "49": [536, 555], "4906": [475, 552], "492": 555, "4936": [475, 552], "494": 555, "498": 555, "4980": [475, 552], "499": 555, "4f": 522, "4k": 489, "4th": [474, 488, 536, 545, 546], "4x": [184, 488, 546], "4x1": [175, 195, 538, 544, 555], "5": [139, 149, 153, 163, 195, 213, 221, 234, 262, 303, 413, 417, 439, 475, 477, 480, 488, 495, 528, 530, 534, 537, 538, 544, 546, 549, 552, 553, 554, 555], "50": [232, 488, 496, 544, 552, 555], "5018": [475, 552], "5040": [488, 552], "5048": [475, 552], "505": 555, "5057": [475, 552], "50x": 555, "51": [536, 555], "512": [448, 474, 477], "512gb": 555, "513": 555, "518": [475, 552], "5185": [475, 552], "52": 555, "520": 555, "526": 555, "529": 555, "52f1": 555, "52x": 555, "53": [536, 555], "530": 555, "5382": 555, "539": 555, "53x": 555, "54": 555, "541": 555, "5421": 555, "5436": [475, 552], "5443": [475, 552], "5444": [488, 552], "5494": 555, "54accuraci": 555, "54x": 555, "55": [536, 555], "5519": 555, "5523": 555, "5530": 555, "5540": 555, "5552": [475, 552], "5555": [488, 552], "556": 555, "558": 555, "5593": [475, 552], "55x": 555, "56": 555, "560m": [475, 552], "565": 555, "56be4db0acb8001400a502ec": 232, "56x": 555, "57": [488, 536, 552, 555], "5742": [475, 552], "576": 555, "5764": [475, 552], "5767": 555, "578": 555, "5789": [475, 552], "57x": 555, "58": [536, 555], "582": 555, "5826": [488, 552], "584": 555, "58x": 555, "59": [488, 536, 552, 555], "5972": [488, 552], "5977": [475, 552], "59f1": 555, "59x": 555, "5b": [475, 552], "5gb": 431, "5x": 545, "6": [195, 473, 475, 477, 480, 488, 530, 541, 546, 552, 554, 555], "60": 555, "600": 538, "602": 555, "6038": [488, 552], "6057": 555, "60x": 555, "61": [536, 555], "6113": 555, "6187": 555, "61accuraci": 555, "62": [536, 555], "6247": [475, 552], "626": 555, "6297": [475, 552], "62x": 555, "63": [536, 544, 555], "633": 555, "6354": 555, "6365": [475, 552], "637690492221736e": [488, 552], "6376e": [488, 552], "6392": [475, 552], "64": [225, 439, 444, 474, 482, 488, 536, 549, 552, 553, 555], "6404": [475, 552], "6426": 555, "6437": [475, 552], "6455": 555, "6481": [488, 552], "6499": [475, 552], "64x": 555, "65": 555, "6506": [488, 552], "6534": 555, "6542": [475, 552], "65421": 522, "655": [475, 552], "6569": [475, 552], "65b": [475, 552], "66": 555, "6621": [475, 552], "66b": [475, 552], "66x": 555, "67": [536, 555], "6718": [475, 552], "6735": [475, 552], "6739": 555, "6740": [475, 552], "6769": [475, 552], "67x": 555, "68": [536, 553, 555], "680": 555, "6804": [475, 552], "6814": [475, 552], "6821": [475, 488, 552], "6831": [475, 552], "6835": [488, 552], "6836": [488, 552], "6837": [488, 552], "6839": [488, 552], "684": 555, "6845": 555, "6848": [488, 552], "6866": [475, 552], "6872": [475, 552], "6883": [488, 552], "6895": [475, 552], "68x": 555, "69": 555, "6953": [475, 552], "6994": 552, "69x": 555, "6ap0": 555, "6b": [475, 536, 552], "6f": 526, "7": [29, 151, 154, 195, 245, 413, 425, 433, 475, 477, 480, 488, 497, 520, 529, 530, 538, 544, 546, 550, 552, 554, 555], "70": [536, 555], "702": 555, "7022": 555, "7025": 555, "7034": 555, "704": 555, "705": 555, "7058": 552, "707": 555, "708": 555, "70b": 536, "70x": 555, "71": [154, 536, 555], "711": 555, "7128": [475, 552], "714": 555, "7143": [475, 552], "7149": [475, 552], "715": 555, "7153": 555, "717": 555, "7174": [488, 552], "718": 555, "719": 555, "72": [154, 488, 536, 552, 555], "7221": [475, 552], "72x": 555, "73": [536, 555], "7323": 555, "7326": [475, 552], "7332": 552, "7335": 552, "7357": [475, 552], "7361": [475, 552], "7392": 552, "7398": [475, 552], "7399": 555, "73x": 555, "74": [536, 555], "7415": 555, "7440": [488, 552], "7442": 555, "7451": [488, 552], "749": 555, "7495": 551, "74x": 555, "75": [536, 544, 555], "754": 474, "755": 555, "7589": [488, 552], "7590": [475, 552], "75x": 555, "76": [536, 555], "7608": [488, 552], "7615": 552, "7627": [475, 552], "7632": 552, "7677": 552, "76x": 555, "77": [536, 555], "774m": [475, 552], "7759": [475, 552], "7772": [488, 552], "779": 555, "77x": 555, "78": [553, 555], "7840": [475, 552], "7895": 544, "79": [536, 555], "7908": [475, 552], "7957": [475, 552], "7965": 555, "798": 555, "799": 555, "79x": 555, "7b": [475, 484, 489, 494, 536, 552], "7b1": [475, 552], "8": [98, 140, 154, 195, 224, 396, 404, 413, 418, 433, 439, 444, 452, 471, 472, 473, 475, 477, 478, 488, 489, 492, 496, 521, 522, 530, 534, 541, 544, 545, 549, 552, 554, 555], "80": [529, 536, 547, 555], "800": [225, 553], "8001": [1, 453], "801": 544, "8018": 555, "8025": 555, "8044": 555, "805": 549, "8074": 555, "8084": 555, "80x": 555, "81": 555, "816": 555, "8178": 555, "81x": 555, "82": [536, 555], "8207": [488, 552], "8213": 555, "8235": 555, "8246": [488, 552], "8256": 555, "8259": 555, "8266": 551, "8291": 551, "8294": 551, "8298": [488, 552], "8299": 551, "83": [488, 551, 552, 555], "8314": 555, "8363": 555, "837": 555, "8371": 555, "8372": 551, "8382": 555, "83x": 555, "84": [488, 552, 555], "840": 555, "841": 555, "8411": 555, "844": 555, "8480": 555, "84x": 555, "85": [488, 551, 552, 555], "853": 555, "858": 555, "85x": 555, "86": [536, 555], "8626": 555, "8684": 555, "86x": 555, "87": [221, 536, 555], "875": [221, 553], "8763": [488, 552], "8768": [488, 552], "8782": 555, "87f1": 555, "88": [529, 551, 555], "8814": 555, "89": [488, 536, 552, 555], "893": 555, "8993": 555, "89x": 555, "8b": 489, "8ghz": 555, "8x1": 195, "9": [169, 195, 475, 483, 489, 492, 534, 538, 544, 552, 554, 555], "90": [195, 536, 547, 555], "901": 555, "9048": 555, "9091": 555, "90f1": 555, "90x": 555, "91": [488, 549, 552, 555], "914": 555, "92": [536, 555], "927": 555, "92x": 555, "93": [488, 552, 555], "9301": [488, 552], "9308": [488, 552], "9391": 555, "94": [553, 555], "9403": 555, "947": 555, "948": 555, "94x": 555, "95": [75, 154, 234, 537, 555], "9521": 555, "9522": 555, "9527": [266, 538], "95top1": 555, "96": [536, 555], "96x": 555, "97": [536, 538, 555], "98": [195, 538, 544, 555], "9860": [488, 552], "9867": 536, "98x": 555, "99": [3, 153, 303, 555], "9907": 536, "9911": 536, "9915": 536, "9928": 536, "9930": 536, "9933": 536, "9945": 536, "9955": 536, "9957": 536, "9972": 536, "9975": 536, "9976": 536, "9984": 536, "9986": 536, "9987": 536, "9988": 536, "9989": 536, "999": [3, 303], "9990": 536, "9991": 536, "9992": 536, "9994": 536, "9995": 536, "9997": 536, "99ccff": 554, "99x": 555, "A": [3, 40, 59, 60, 88, 101, 125, 126, 133, 138, 145, 152, 153, 159, 161, 162, 169, 170, 173, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 195, 211, 225, 227, 230, 231, 232, 234, 235, 239, 243, 266, 283, 286, 312, 331, 332, 385, 389, 390, 391, 397, 404, 406, 409, 412, 413, 415, 425, 427, 433, 443, 448, 449, 466, 475, 477, 478, 488, 491, 496, 522, 523, 527, 530, 536, 537, 538, 540, 544, 545, 546, 549, 552, 553, 554], "And": [55, 327, 481, 488, 491, 492, 520, 522, 538, 552], "As": [472, 477, 488, 492, 496, 523, 537, 538, 544, 549, 554], "At": [473, 491, 496, 541, 554], "Being": 490, "By": [140, 173, 195, 227, 396, 477, 484, 496, 497, 537, 540, 544, 545, 549, 554], "For": [29, 31, 145, 149, 153, 156, 160, 173, 174, 177, 178, 188, 195, 211, 225, 231, 232, 234, 235, 262, 413, 431, 433, 439, 466, 472, 475, 477, 478, 479, 480, 484, 488, 490, 492, 493, 495, 497, 525, 526, 528, 530, 531, 533, 544, 548, 549, 552, 554, 555], "IT": [211, 218, 545], "If": [52, 53, 55, 59, 140, 151, 153, 156, 195, 198, 199, 211, 225, 235, 262, 281, 324, 325, 327, 331, 396, 413, 431, 433, 435, 441, 448, 449, 452, 472, 478, 481, 488, 489, 491, 492, 496, 520, 522, 523, 526, 529, 534, 535, 537, 538, 542, 544, 546, 549, 551, 552, 553, 554], "In": [162, 177, 178, 179, 184, 185, 186, 189, 195, 208, 230, 245, 448, 470, 477, 478, 481, 488, 489, 490, 492, 494, 495, 496, 523, 525, 526, 537, 538, 542, 543, 544, 546, 548, 549, 551, 552, 553, 554], "It": [39, 140, 159, 162, 166, 175, 198, 199, 232, 234, 235, 262, 267, 271, 311, 396, 433, 442, 449, 473, 476, 477, 478, 479, 481, 482, 488, 489, 495, 496, 497, 521, 529, 531, 538, 541, 544, 546, 549, 551, 554], "Its": [496, 521, 544], "NOT": [209, 530], "No": [413, 491, 522, 529, 538, 545], "Not": [277, 278, 391, 466, 472, 522], "ON": 548, "Of": 523, "On": [488, 546, 555], "One": [478, 497, 522, 526, 533, 544, 545, 554], "TO": 526, "The": [3, 21, 39, 59, 98, 104, 106, 125, 133, 140, 145, 146, 151, 152, 153, 155, 156, 158, 160, 161, 162, 163, 165, 169, 173, 175, 177, 178, 180, 181, 182, 187, 188, 189, 190, 191, 192, 195, 198, 199, 200, 209, 210, 211, 212, 213, 217, 218, 225, 227, 228, 229, 231, 232, 234, 235, 243, 245, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 278, 280, 281, 283, 284, 288, 292, 293, 294, 297, 298, 299, 301, 302, 305, 311, 331, 344, 361, 385, 386, 387, 388, 389, 390, 391, 392, 396, 405, 406, 407, 408, 409, 410, 412, 413, 414, 416, 417, 418, 422, 423, 425, 426, 427, 431, 433, 435, 437, 438, 439, 441, 442, 443, 448, 449, 453, 455, 461, 462, 465, 466, 467, 470, 471, 472, 473, 474, 476, 477, 478, 480, 481, 482, 488, 489, 490, 492, 495, 496, 497, 498, 500, 506, 509, 512, 520, 521, 522, 523, 526, 528, 530, 531, 532, 533, 534, 536, 537, 538, 539, 540, 541, 542, 543, 544, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555], "Their": 455, "Then": [230, 488, 496, 552, 554], "There": [100, 263, 265, 471, 476, 480, 488, 494, 522, 530, 533, 538, 549, 552, 554], "These": [496, 533], "To": [30, 230, 245, 392, 473, 474, 475, 476, 477, 480, 482, 483, 488, 491, 494, 496, 497, 522, 526, 539, 541, 544, 548, 549, 550, 552, 554], "Will": [128, 380, 441, 533], "With": [81, 245, 352, 481, 488, 492, 495, 497, 523, 534, 538, 542, 544, 545, 546, 551, 552, 554, 555], "_": [198, 199, 262, 474, 475, 477, 478, 481, 483, 488, 489, 492, 494, 495, 496, 521, 526, 528, 530, 534, 538, 539, 540, 542, 544, 546, 549, 550, 551, 552, 554], "__all__": 522, "__call__": 218, "__class__": 245, "__getitem__": [207, 211, 225, 523, 538], "__init__": [431, 441, 495, 522, 523, 537, 554], "__iter__": [207, 211, 523, 538], "__len__": [207, 211], "__name__": 245, "__str__": 452, "__version__": [1, 138, 397, 522], "_configset": 153, "_description_": [438, 478], "_epoch_ran": 162, "_generate_dataload": [200, 387], "_inductor": [471, 476], "_lossandgradi": 522, "_propagate_qconfig_help": 145, "_quantiz": 459, "_quantizedconv": [76, 347], "_quantizeddeconv": [76, 347], "_quantizedfusedbatchnorm": [108, 363], "_quantizedmatmul": [78, 79, 349, 350], "_saved_model": [133, 385], "_type_": [145, 462], "_use_new_zipfile_seri": 170, "a100cuda": 555, "aarch64": 520, "ab": [30, 169, 187, 420, 433, 439, 481, 488, 544, 546, 552], "abbrevi": 195, "abc": [98, 232, 495, 554], "abcadaptor": 495, "abctunestrategi": 554, "abil": [473, 495, 496, 530, 541, 552], "abl": [198, 199, 235, 262, 449, 481, 488, 546], "abound": 545, "about": [169, 191, 466, 470, 472, 490, 522, 523, 538, 544, 548, 554, 555], "abov": [39, 311, 475, 476, 477, 479, 488, 495, 496, 497, 544, 546, 552, 554], "absolut": [169, 195, 234, 413, 473, 488, 537, 538, 541, 544, 546, 554], "absorb": [145, 149, 413, 433, 477, 549], "absorb_layer_dict": [419, 439, 452], "absorb_pair": 31, "absorb_to_lay": [145, 413, 432, 433, 439], "absorbed_1": [145, 433], "absorpt": [145, 433], "abstract": [71, 162, 225, 343, 495, 497, 532], "abus": 490, "ac": 266, "acc": [474, 479, 536, 544, 555], "acceler": [171, 439, 443, 446, 472, 474, 475, 477, 478, 480, 488, 489, 494, 534, 538, 544, 545, 548, 549, 552, 554], "accelerator_execution_tim": 251, "acceleratorregistri": 443, "accept": [195, 281, 482, 489, 490, 520, 522, 523, 549], "access": [52, 53, 55, 195, 324, 325, 327, 462, 466], "accommod": [496, 497], "accompani": [535, 554], "accord": [1, 107, 145, 151, 195, 209, 210, 211, 225, 269, 271, 392, 442, 448, 455, 472, 481, 482, 488, 495, 526, 544, 548, 552, 553, 554], "accordingli": 552, "account": [490, 551, 554], "accumul": [277, 473, 477, 541, 554], "accur": [29, 149, 413, 420, 439, 476, 477, 488, 544, 549, 552], "accuraci": [31, 151, 153, 192, 195, 198, 199, 234, 235, 245, 262, 267, 281, 449, 473, 475, 476, 477, 481, 482, 484, 492, 494, 496, 497, 521, 523, 525, 526, 527, 528, 531, 537, 538, 541, 542, 543, 544, 545, 549, 550, 551, 552, 555], "accuracy_criterion": [195, 245, 538, 542, 554], "accuracy_level": 31, "accuracycriterion": [195, 538, 554], "accuracyspars": 555, "achiev": [475, 476, 477, 478, 479, 482, 484, 488, 489, 528, 536, 538, 543, 544, 545, 546, 552, 554], "acq": 266, "acq_max": 266, "acquisit": 266, "across": [235, 392, 473, 482, 524, 531, 538, 541, 544, 547, 552, 554], "act": [192, 207, 387, 490], "act_algo": [413, 439, 476], "act_algorithm": 303, "act_bit": [418, 439], "act_dtyp": [299, 303, 439, 473, 475, 476, 479, 541], "act_dynam": [418, 439], "act_fn": 174, "act_granular": [299, 303, 439, 479], "act_group_s": [418, 439], "act_max_valu": [292, 293, 294, 297, 298], "act_maxabs_hw_weights_pcs_maxabs_pow2": 472, "act_maxabs_pow2_weights_pcs_opt_pow2": 472, "act_min_valu": [292, 293, 294, 297, 298], "act_ord": [439, 477], "act_sym": [299, 303, 418, 439, 476, 479, 481, 482], "action": 490, "activ": [16, 31, 126, 145, 166, 195, 278, 280, 286, 292, 293, 294, 298, 413, 417, 439, 471, 472, 475, 476, 477, 480, 481, 488, 495, 496, 497, 521, 525, 530, 538, 546, 549, 552], "activation_max": 466, "activation_min": 466, "activation_min_max": 466, "activationoper": 5, "activity_regular": [292, 293, 294, 298], "actord": [31, 477, 549], "actual": [234, 476, 526, 529], "ad": [186, 209, 227, 266, 471, 474, 477, 496, 497, 539, 544, 549], "adadelta": 526, "adam": 165, "adamw": 165, "adapt": [473, 490, 496, 541, 545], "adaptor": [162, 195, 280, 288, 289, 455, 470, 497, 533, 547, 548, 550, 552], "adaptor_registri": 495, "add": [30, 38, 49, 94, 104, 133, 165, 192, 195, 209, 211, 278, 310, 321, 359, 385, 466, 470, 475, 491, 492, 494, 497, 523, 526, 528, 530, 537, 538, 551, 552, 554], "add_origin_loss": [163, 195], "add_port_to_nam": 90, "add_qdq_pair_to_weight": [28, 195, 546], "add_relu": 528, "addit": [145, 195, 431, 449, 477, 496, 497, 549, 550, 554], "addition": [477, 497, 526, 544, 554], "addn": 530, "address": [211, 474, 477, 482, 490, 522, 539, 545], "addv2": [38, 54, 310, 326, 530], "adher": 491, "adjust": [488, 546, 552], "adopt": [488, 491, 544, 545, 552], "advanc": [135, 195, 473, 474, 477, 479, 481, 488, 490, 496, 501, 536, 541, 544, 546, 549], "advantag": [474, 544, 554], "afc": 232, "affect": [488, 552], "affin": [142, 429, 546], "aforement": 538, "after": [1, 63, 133, 145, 171, 175, 180, 182, 187, 195, 209, 221, 225, 267, 281, 335, 385, 406, 413, 446, 462, 466, 471, 472, 476, 477, 480, 488, 494, 496, 525, 528, 533, 537, 538, 542, 543, 544, 546, 548, 549, 551, 552, 553, 554], "ag": 490, "again": [211, 548], "against": [228, 406], "aggress": 554, "agnost": [157, 478], "agnostic_mod": 230, "agre": 491, "ai": [470, 473, 474, 478, 494, 541, 545, 548], "aid": 524, "aim": [470, 482, 494, 531, 536, 544, 552, 554], "ajanthan": 544, "al": [135, 473, 477, 488, 494, 521, 534, 541, 549, 552], "albert": 555, "alemb": 529, "alexnet": 555, "algo": [156, 288, 289, 409, 442, 455], "algo_nam": [152, 305, 442, 522], "algorithm": [1, 31, 145, 152, 156, 195, 226, 227, 234, 290, 299, 301, 305, 391, 436, 437, 439, 441, 442, 448, 455, 461, 477, 478, 481, 482, 484, 488, 494, 495, 496, 497, 522, 525, 530, 531, 533, 536, 544, 546], "algorithm_entri": [304, 440, 522], "algorithm_registri": 146, "algorithm_typ": 146, "algorithmschedul": 146, "algos_map": [391, 448], "alia": [463, 466], "alias": [466, 522], "alias_param": 466, "alibaba": [494, 545], "align": [225, 472, 490, 549, 553], "alignimagechannel": 553, "alignimagechanneltransform": 225, "alistarh": 544, "all": [1, 3, 21, 31, 48, 82, 140, 141, 145, 146, 148, 151, 152, 154, 157, 162, 165, 170, 175, 183, 192, 195, 200, 201, 207, 211, 218, 225, 227, 230, 232, 234, 236, 245, 274, 280, 282, 299, 302, 320, 353, 387, 389, 390, 391, 392, 396, 398, 413, 417, 420, 429, 438, 439, 441, 448, 455, 459, 466, 472, 475, 478, 481, 482, 484, 488, 489, 490, 491, 494, 495, 496, 497, 522, 523, 526, 530, 534, 538, 543, 544, 545, 546, 548, 549, 552, 553, 554], "all_par": [413, 433], "allbalanc": 555, "allenai": [231, 232], "allevi": 552, "alloc": 483, "allow": [145, 152, 266, 433, 474, 476, 477, 482, 526, 537, 538, 539, 544, 549, 552, 554], "allowlist": [439, 472], "along": [425, 426, 431, 477, 534, 544, 549], "alpha": [126, 139, 142, 149, 169, 189, 286, 303, 413, 417, 429, 439, 488, 522, 546, 554], "alpha_list": 277, "alpha_max": [413, 439, 552], "alpha_min": [413, 439, 552], "alpha_step": [413, 439, 552], "alreadi": [140, 151, 211, 224, 396, 474, 478, 495, 529, 533, 554], "also": [138, 174, 208, 211, 225, 227, 234, 245, 262, 397, 472, 474, 477, 478, 479, 480, 488, 489, 491, 495, 496, 497, 523, 527, 531, 533, 537, 538, 542, 544, 545, 546, 548, 549, 552, 554], "altern": [140, 195, 262, 396], "although": [208, 551], "alwai": [153, 230, 234, 262, 472, 482, 537, 538, 549], "amax": [433, 473, 541], "amazon": 494, "amd": [494, 534, 555], "among": [234, 473, 488, 541, 552], "amount": [3, 413, 480, 552], "amp": [1, 418, 549], "amp_cfg": 1, "amp_convert": 1, "amx": [474, 545], "an": [1, 52, 53, 55, 90, 126, 128, 133, 135, 138, 140, 145, 150, 166, 180, 195, 203, 207, 211, 225, 226, 227, 231, 232, 235, 257, 261, 262, 281, 286, 324, 325, 327, 380, 385, 396, 397, 409, 417, 433, 448, 449, 466, 468, 472, 473, 474, 475, 476, 477, 478, 480, 481, 482, 488, 490, 492, 494, 495, 497, 521, 523, 524, 528, 536, 537, 538, 539, 541, 542, 544, 545, 546, 549, 551, 552, 553, 554], "anaconda": 534, "analysi": [473, 541, 544, 551], "analyt": [470, 534, 545], "analyz": [95, 173, 243, 360, 390, 551, 552], "andrew": 521, "ani": [135, 140, 156, 230, 232, 257, 261, 277, 281, 302, 392, 396, 422, 435, 442, 443, 452, 466, 478, 481, 490, 494, 497, 522, 526, 544, 551, 554], "anneal": 168, "anno_dir": 210, "anno_path": [234, 537], "annot": [210, 230, 281, 496, 497, 537, 544, 554], "anoth": [225, 227, 466, 553, 554], "answer": [225, 231, 232, 490, 544, 553, 555], "answer_start": 232, "answeringsquad": 555, "ao": 409, "ap": 537, "ap0": 555, "apach": [3, 535], "api": [55, 60, 95, 154, 230, 234, 262, 277, 278, 281, 290, 302, 303, 304, 305, 327, 332, 360, 389, 391, 392, 420, 436, 438, 439, 440, 442, 445, 466, 472, 475, 477, 482, 488, 492, 494, 498, 500, 506, 509, 512, 528, 529, 531, 533, 534, 538, 550, 551, 554, 556], "appear": [140, 396, 490], "append": [145, 266, 492, 525, 538, 543], "append_attr": 145, "appl": 554, "appli": [31, 98, 111, 112, 113, 116, 118, 122, 133, 142, 162, 184, 186, 228, 301, 305, 366, 367, 368, 371, 373, 377, 385, 392, 406, 413, 429, 437, 442, 448, 470, 473, 476, 480, 481, 482, 488, 490, 496, 497, 538, 541, 543, 544, 546, 548, 551, 552, 554], "applianc": 545, "applic": [221, 431, 477, 479, 488, 497, 528, 545, 549, 552, 553], "apply_awq_clip": 31, "apply_awq_scal": 31, "apply_inlin": [133, 385], "apply_single_pattern_pair": 406, "appoint": 490, "approach": [195, 476, 477, 488, 492, 521, 533, 538, 544, 545, 547, 549, 554], "appropri": [443, 476, 482, 484, 488, 489, 490, 521, 552], "approv": 491, "approx": [477, 488, 549], "approxim": [227, 228, 477, 496, 537, 549], "appu": 477, "apr": [494, 545], "apt": [529, 534], "ar": [52, 53, 90, 140, 145, 154, 175, 180, 181, 182, 187, 195, 209, 225, 230, 234, 263, 265, 266, 280, 281, 324, 325, 392, 396, 406, 409, 413, 417, 448, 455, 465, 466, 471, 472, 473, 474, 476, 477, 478, 480, 481, 484, 488, 489, 490, 491, 492, 495, 496, 522, 523, 525, 526, 527, 528, 530, 532, 533, 535, 536, 537, 538, 539, 541, 542, 543, 544, 545, 546, 548, 549, 550, 551, 552, 553, 554], "arang": 552, "arbitrari": [140, 396, 478, 538, 543], "arc": 489, "arcfac": 555, "architectur": [151, 470, 474, 477, 484, 488, 489, 494, 520, 531, 538, 544, 545, 549], "arctic": 534, "are_shapes_equ": 90, "area": [225, 230, 473, 537, 541, 553], "arg": [1, 39, 90, 145, 154, 209, 210, 211, 266, 281, 311, 399, 420, 433, 437, 448, 462, 463, 473, 478, 526, 538, 541, 544, 546, 549, 552, 553], "argmax": 16, "argmaxoper": 6, "argpars": 154, "argu": 522, "argument": [140, 154, 195, 281, 396, 406, 413, 431, 438, 441, 442, 478, 481, 482, 549], "ariel": 544, "arithmet": 530, "arm": [494, 533, 534, 555], "around": 90, "arr": [30, 391, 466], "arrai": [3, 30, 31, 133, 140, 225, 230, 396, 444, 466, 553], "arrang": [181, 182, 187, 210, 211, 214, 544], "art": 544, "articl": [231, 232], "arxiv": [169, 187, 420, 439, 473, 477, 488, 494, 521, 541, 544, 545, 549, 552], "as_text": 90, "ascii": [140, 396], "asd932_": 211, "ask": [494, 534], "aspect": [221, 225, 553], "asplo": 545, "assert_error": 90, "assertionerror": [170, 176, 183, 192, 235, 435, 448], "assign": [230, 466, 526, 554], "assist": [159, 550], "associ": [133, 140, 230, 396, 406], "assum": [224, 230, 495, 530], "asterisk": [475, 552], "asym": [29, 30, 31, 142, 145, 413, 429, 433, 497, 530, 549], "asymmetr": [413, 481, 497, 546, 549, 554], "atenc": 483, "atom": 535, "att": [231, 232], "attach": [488, 538, 552], "attack": 490, "attempt": 554, "attent": [16, 143, 171, 173, 184, 195, 209, 417, 490, 544], "attention_ffn_nam": 184, "attention_mask": [209, 538], "attentionoper": 7, "attr": [90, 133, 173, 385], "attr1": 281, "attr2": 281, "attr5": 281, "attribut": [30, 68, 89, 90, 133, 145, 173, 184, 195, 340, 448, 466, 472, 495, 523, 530, 532, 538], "attribute1": 281, "attribute_to_kwarg": 30, "attributeerror": [138, 281, 397], "aug": [494, 545], "augment": 2, "author": 535, "auto": [145, 152, 153, 171, 173, 195, 264, 270, 302, 413, 438, 439, 443, 446, 448, 466, 472, 475, 478, 485, 492, 494, 521, 531, 544, 546, 549], "auto_acceler": 447, "auto_alpha_arg": [303, 439, 552], "auto_clip": 452, "auto_config": 544, "auto_copi": 145, "auto_detect_acceler": 443, "auto_input_output": [133, 385], "auto_merg": 30, "auto_mixed_precis": 270, "auto_scal": 452, "auto_slim": 172, "autoalpha": 413, "autom": [545, 552], "automat": [171, 173, 174, 195, 211, 214, 221, 443, 448, 472, 478, 481, 483, 484, 492, 494, 520, 533, 538, 539, 543, 544, 546, 552, 553, 554], "automixedprecisiontunestrategi": 264, "automodelforcausallm": [141, 398, 489, 531], "automodelforsequenceclassif": 538, "autonumb": [496, 497], "autoround": [428, 437, 439, 478, 488, 489, 494, 536], "autoround_arg": 477, "autoround_quantize_entri": 437, "autoroundconfig": [437, 439, 452, 477, 489], "autoroundquant": 418, "autotoken": [489, 538], "autotrack": [125, 133, 283, 385], "autotun": [479, 480, 481, 488, 502, 522], "autotunestrategi": 263, "aux": 1, "auxiliari": 460, "avail": [154, 188, 195, 239, 413, 446, 448, 474, 477, 478, 484, 494, 498, 500, 506, 509, 512, 522, 527, 529, 531, 533, 544, 554, 555], "averag": [31, 231, 232, 234, 477, 537, 538, 549, 554], "averagepool": 23, "averagepooling2d": 297, "avg": 546, "avgpool": [114, 123, 297, 369, 378, 530], "avoid": [90, 140, 145, 209, 396, 413, 433, 448, 483, 492, 522], "avx": 474, "avx512": [474, 488, 539, 546], "avx512_bf16": [474, 539], "avx512_core_amx_fp16": 474, "avx512_fp16": 474, "aw": [545, 555], "awai": 523, "awar": [31, 135, 162, 195, 269, 439, 449, 477, 478, 482, 495, 496, 497, 521, 525, 526, 528, 533, 543, 545, 549, 554], "awq": [31, 428, 433, 437, 439, 478, 488, 489, 547, 549], "awq_arg": [477, 549], "awq_g32asym": 549, "awq_quant": 31, "awq_quantize_entri": 437, "awqconfig": [437, 439, 452, 477, 489], "awqquant": 419, "ax": [195, 404, 459], "axi": [30, 195, 425, 426], "azur": [491, 494, 545], "b": [30, 59, 154, 209, 331, 418, 466, 477, 488, 491, 521, 537, 544, 549, 552, 554], "b1": [59, 331], "b16": [36, 308], "b3": 555, "b_dataload": [151, 195, 520, 538], "b_filter": 1, "b_func": [151, 520], "back": [140, 145, 154, 225, 396, 448], "backbon": 551, "backend": [2, 28, 29, 149, 165, 195, 196, 197, 201, 202, 205, 209, 210, 212, 213, 214, 215, 216, 220, 222, 225, 235, 236, 237, 239, 272, 439, 452, 474, 477, 478, 492, 497, 526, 532, 533, 538, 539, 549, 553, 554], "backward": [449, 488, 525, 526, 538, 543, 544, 546, 550], "badri": 477, "baichuan": 536, "baichuan2": 536, "balanc": [217, 413, 473, 475, 477, 481, 488, 541, 549, 552], "ban": 490, "bandit": 491, "bandwidth": [474, 477, 488, 538, 539, 549], "bar": [30, 211, 545], "bare": [494, 529, 534], "bart": 555, "base": [1, 3, 21, 32, 71, 90, 101, 105, 107, 117, 129, 135, 138, 143, 145, 146, 152, 153, 154, 161, 162, 169, 176, 183, 189, 191, 195, 200, 203, 207, 209, 211, 218, 225, 234, 236, 243, 244, 245, 271, 274, 278, 289, 305, 343, 372, 381, 387, 390, 392, 397, 409, 413, 417, 429, 439, 442, 443, 446, 448, 471, 472, 474, 475, 477, 478, 482, 484, 488, 489, 495, 496, 497, 502, 522, 525, 526, 530, 538, 539, 544, 545, 546, 549, 551, 552, 554, 555], "base_algorithm": 394, "base_config": [153, 155, 299, 301, 302, 303, 305, 438, 439, 442], "base_dir": 30, "base_model": 237, "base_tun": [155, 302, 438, 479, 481], "basecallback": 162, "baseconfig": [152, 153, 156, 299, 301, 302, 305, 438, 439, 442, 478, 481, 522], "basedataload": [200, 204, 206, 387], "basedatalod": [200, 387], "baselin": [455, 482, 551, 552], "baseline_model": [481, 482], "basemetr": [234, 262], "basemodel": [236, 301, 302, 305, 390, 481], "basepattern": [175, 177, 179], "baseprun": [180, 185, 187, 188], "basereg": 189, "basetransform": 225, "bash": 489, "basi": 544, "basic": [173, 175, 183, 195, 224, 270, 274, 277, 449, 473, 533, 538, 541, 551], "basicprun": [186, 188], "basictoken": 224, "basictunestrategi": 265, "batch": [1, 145, 200, 202, 203, 207, 208, 387, 418, 449, 477, 523, 525, 538, 543, 544, 546, 552, 553, 554, 555], "batch_decod": 489, "batch_idx": 526, "batch_sampl": [200, 202, 204, 387, 523], "batch_siz": [195, 200, 202, 204, 207, 208, 209, 210, 387, 418, 439, 452, 459, 477, 523, 526, 528, 532, 538, 546], "batchmatmul": [113, 368], "batchmatmulv2": [113, 368], "batchnorm": [20, 47, 52, 106, 319, 324, 492, 552], "batchnormalizationoper": 20, "batchsampl": [207, 387], "batchtospacend": [43, 315], "bayesian": [195, 270, 544], "bayesianoptim": 266, "bayesiantunestrategi": 266, "bbox": [230, 492, 537], "bboxes_labels_scor": 492, "beam": [227, 537], "becaus": [140, 179, 184, 225, 396, 480, 488, 492, 546, 552, 553, 554], "becom": [477, 488, 495, 528, 544, 549], "been": [3, 140, 170, 176, 183, 227, 391, 396, 404, 412, 413, 417, 474, 477, 481, 488, 497, 522, 538, 539, 549, 552], "befor": [92, 94, 149, 175, 180, 182, 187, 188, 190, 195, 209, 288, 357, 359, 420, 431, 441, 446, 448, 472, 477, 488, 489, 491, 497, 523, 525, 526, 529, 530, 538, 546, 549, 551, 554], "begin": [101, 181, 182, 187, 476, 522, 523, 525, 538, 544, 554], "behavior": [140, 396, 490, 495, 496, 497, 530, 540, 549, 554], "being": [135, 230, 497], "beit": 555, "belong": [211, 239, 546], "below": [40, 51, 59, 154, 195, 234, 262, 312, 323, 331, 470, 472, 473, 475, 477, 478, 481, 482, 488, 489, 491, 492, 494, 495, 496, 497, 523, 526, 531, 532, 537, 541, 542, 543, 544, 546, 549, 551, 552, 554], "benchmark": [155, 195, 226, 245, 466, 470, 489, 494, 501, 502, 531, 540, 554, 555], "benchmark_with_raw_cmd": 151, "benchmarkconf": 538, "benchmarkconfig": [151, 195, 520, 538], "benefici": 521, "benefit": [531, 543], "bert": [173, 195, 205, 208, 209, 225, 494, 537, 539, 544, 553, 555], "bert_dataset": 215, "bertattent": 173, "besid": [477, 488, 495, 549], "best": [162, 225, 271, 433, 477, 478, 479, 480, 482, 490, 522, 534, 542, 546, 549, 552, 553, 554], "best_clip_ratio": 433, "best_configur": 465, "best_model": [162, 465, 474, 479, 480, 481, 482, 549], "best_model_weight": 465, "best_scor": 162, "beta": [169, 425], "better": [81, 195, 198, 199, 235, 262, 352, 449, 474, 477, 488, 522, 525, 537, 539, 544, 545, 546, 549, 551, 552, 554], "between": [3, 29, 128, 150, 186, 195, 225, 231, 234, 257, 261, 380, 406, 413, 466, 472, 473, 477, 478, 481, 488, 492, 494, 495, 496, 497, 521, 533, 537, 538, 541, 546, 549, 550, 551, 552, 553, 554], "bf16": [31, 72, 134, 195, 278, 344, 399, 401, 406, 439, 472, 481, 495, 496, 530, 538, 548, 554, 555], "bf16_convert": [37, 136, 309], "bf16_op": [33, 35, 92, 306, 307, 357, 496, 548], "bf16convert": [35, 307, 548], "bf16modul": 134, "bf16modulewrapp": 134, "bf16wrapper": 548, "bfloat16": [30, 429, 439, 474, 494, 539, 548], "bfloat16fp16": 472, "bi": [231, 232], "bia": [128, 195, 380, 403, 423, 429, 549], "bias_constraint": [292, 293, 294, 298], "bias_correct": [130, 382], "bias_initi": [292, 293, 294, 298], "bias_regular": [292, 293, 294, 298], "biasadd": [38, 44, 49, 51, 94, 310, 316, 321, 323, 359, 530], "biascorrect": [128, 380], "bibtex": 535, "bicub": [225, 553], "big": [135, 477, 549], "bigscienc": [475, 536, 552], "bilibili": 545, "bilinear": [221, 225, 553], "bilinearimagenet": 553, "bilinearimagenettransform": 221, "bilingu": 227, "billion": 544, "bilstm": 555, "bin": [3, 140, 141, 391, 396, 398, 466, 529], "binari": [8, 133, 234, 385, 529, 537, 550], "binary_op": 16, "binarydirect8bitoper": 8, "binaryoper": 8, "bind": [151, 484, 489], "bio": 555, "bit": [29, 31, 145, 195, 280, 413, 421, 426, 429, 433, 439, 444, 452, 471, 472, 473, 474, 477, 478, 488, 489, 497, 521, 525, 539, 541, 545, 546, 549, 552], "bit_pack": 447, "bita": [473, 541], "bitnami": 545, "bitpack": 424, "bitwidth": 471, "bk3": 3, "black": 554, "black_nod": 2, "blendcnn": 555, "bleu": [228, 233, 537, 544], "bleu_hook": 228, "bleu_scor": 228, "bleu_token": 227, "bleu_util": 233, "blob": [3, 135, 177, 178, 227, 228, 231, 232, 234, 262, 538], "blob_siz": 31, "block": [31, 143, 145, 169, 179, 182, 187, 192, 245, 281, 417, 433, 448, 473, 477, 533, 541, 542, 544, 549, 554], "block_list": [145, 433], "block_mask": [169, 183], "block_nam": 448, "block_num": [145, 433], "block_pattern": [143, 417], "block_prefix": [145, 433], "block_siz": [179, 404, 439, 477, 549], "blockfallbacktuningsampl": 277, "blocklist": [439, 472], "blockmaskcriterion": 169, "blocksiz": [31, 439, 452, 473, 541], "blockwis": 552, "blockwise_over_matmul_gemm_conv": 192, "blog": [439, 494, 545], "bloom": [475, 488, 494, 536, 544, 552], "bloomz": [475, 552], "blue": [477, 547], "bmm": [472, 477, 488, 549], "bn": 555, "bnb": [477, 549], "bnb_nf4": [439, 448], "bo": 195, "bodi": 490, "bool": [1, 29, 30, 31, 126, 133, 140, 144, 145, 153, 161, 175, 195, 209, 211, 221, 225, 228, 234, 277, 281, 286, 299, 303, 385, 396, 398, 406, 409, 412, 413, 418, 420, 423, 425, 426, 427, 431, 433, 439, 442, 446, 448, 452, 459, 462, 465, 466, 477, 478, 523, 537, 553], "bool_val": 90, "boolean": [1, 409], "boost": [474, 494, 539, 544, 545], "booster": 525, "border": 553, "both": [133, 195, 385, 443, 476, 477, 479, 484, 488, 489, 490, 495, 525, 538, 544, 546, 549, 552, 554], "bottleneck": [477, 488, 549], "boudoukh": 544, "bound": [59, 151, 225, 230, 266, 331, 413, 537, 553], "boundari": [225, 553], "bowl": 232, "box": [225, 230, 234, 492, 537, 553, 554], "branch": [133, 385, 491], "brand": 535, "break": [94, 359, 526, 538], "breakthrough": [473, 541], "breviti": [227, 228, 537], "bridg": [495, 496, 533], "briefli": [488, 538, 552], "bright": 553, "bring": [488, 552], "broad": [470, 494, 533], "broadcast": [225, 526, 553], "broadcast_optimizer_st": 526, "broadcast_paramet": 526, "bronco": 232, "brought": [488, 540, 546], "buffer": [140, 396, 452], "bug": [491, 494], "build": [1, 29, 31, 100, 146, 147, 149, 150, 154, 238, 240, 242, 243, 244, 390, 413, 481, 489, 495, 496, 497, 534, 540, 545, 554], "build_captured_dataload": 413, "build_slave_faker_model": 280, "build_torch_model": 474, "built": [60, 165, 184, 196, 197, 201, 205, 209, 210, 215, 217, 219, 220, 221, 222, 225, 234, 237, 245, 262, 301, 305, 332, 449, 481, 482, 495, 526, 538, 542, 546, 551, 553, 554], "builtin": [140, 396, 526], "busi": 545, "button": [491, 533], "bypass_reshap": [52, 53, 324, 325], "byte": [140, 396, 413], "byte_arrai": [140, 396], "bytes_or_buff": 452, "bytesio": [140, 396], "c": [3, 59, 154, 211, 331, 466, 477, 483, 488, 529, 549, 554], "c1": [59, 331], "c1c2": [59, 331], "c2": [59, 331], "c6a": 555, "c6g": 555, "c6i": 555, "c_": [477, 549], "c_out": 477, "cach": [209, 477, 488, 494, 529, 546, 549], "cache_dir": [141, 398, 448], "cache_kl": 1, "cache_minmax": 1, "caffenet": 555, "cal_scal": 413, "calcul": [30, 45, 145, 153, 169, 177, 192, 195, 212, 213, 231, 232, 271, 317, 387, 413, 425, 453, 466, 472, 475, 477, 481, 488, 521, 537, 538, 542, 544, 546, 549, 551, 552, 554], "calculate_md5": 211, "calculate_ms": 466, "calculate_quant_min_max": 145, "calculate_scale_zp": 30, "calib": [145, 292, 293, 294, 297, 298, 496], "calib_cfg": 1, "calib_data": [1, 139], "calib_dataload": [262, 284, 301, 302, 305, 479, 480, 481, 482, 492, 523, 532, 537, 538, 546, 547, 548, 549], "calib_func": [33, 145, 262, 284, 301, 302, 305, 306, 433, 472, 481, 494, 548], "calib_iter": [284, 288, 301, 302, 305, 481, 496], "calib_method": 3, "calib_model": 1, "calib_num": 413, "calib_registri": 3, "calib_tensor": 1, "calibcollector": 1, "calibdata": 1, "calibr": [1, 4, 31, 34, 74, 75, 125, 145, 151, 195, 198, 199, 262, 285, 301, 305, 346, 412, 413, 418, 438, 442, 461, 470, 472, 476, 477, 478, 481, 488, 494, 495, 497, 532, 533, 544, 546, 549, 554], "calibration_data": [84, 92, 357], "calibration_sampling_s": [195, 538], "calibratorbas": 3, "call": [1, 29, 140, 146, 161, 173, 181, 182, 187, 230, 396, 413, 446, 481, 484, 488, 489, 492, 495, 523, 532, 544, 546, 552, 553], "call_count": 161, "call_on": 151, "callabl": [140, 153, 195, 232, 284, 301, 302, 305, 396, 412, 437, 438, 439, 442, 443, 448, 449, 478, 481, 522, 523], "callback": [449, 455, 492, 538, 543, 544, 546], "callbacks_list": 449, "camembert": 555, "can": [29, 30, 31, 128, 135, 140, 150, 173, 174, 175, 180, 188, 190, 195, 198, 199, 208, 211, 225, 230, 232, 234, 235, 245, 262, 380, 396, 413, 442, 448, 449, 466, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 484, 488, 489, 492, 494, 495, 496, 497, 520, 522, 523, 525, 526, 528, 530, 533, 534, 536, 537, 538, 539, 540, 541, 542, 543, 544, 546, 547, 548, 549, 551, 552, 553, 554, 555], "candid": 406, "cannot": [230, 477, 492, 522, 529, 549], "cap": 494, "cap_s8_1_11": 497, "cap_s8_1_11_conv1d": 497, "capabl": [1, 32, 278, 289, 472, 474, 477, 488, 489, 496, 497, 520, 530, 538, 539, 544, 546, 554], "capac": [477, 488, 549, 551], "captur": [391, 413, 466, 476, 477], "captureddataload": 413, "captureoutputtofil": [391, 466], "card": [477, 544], "carri": [412, 413, 544], "cascad": 534, "case": [30, 55, 59, 140, 151, 154, 195, 208, 224, 225, 245, 327, 331, 396, 441, 443, 470, 477, 478, 481, 488, 497, 522, 523, 525, 526, 530, 532, 537, 542, 544, 545, 546, 549, 550, 552, 553, 555], "cast": [30, 36, 308, 548, 553], "cast_tensor": 30, "castonnxtransform": 225, "castpytorchtransform": 225, "casttftransform": 225, "cat": [492, 528], "categor": 537, "categori": [211, 229, 520], "category_id_set": 230, "caus": [192, 477, 488, 496, 523, 544, 549], "causal": [431, 441, 544], "cce5ff": 554, "cd": [526, 534], "cdot": [488, 552], "ce": [163, 195, 538], "center": [225, 493, 494, 534, 553, 554], "centercrop": 553, "centercroptftransform": 225, "centercroptransform": 225, "cento": 534, "central_fract": [221, 553], "cern": 545, "certain": [140, 194, 396, 482, 551], "certif": 491, "cfg": [56, 57, 145, 278, 328, 329, 412, 413, 417, 455, 496, 497], "cfg_filter": 455, "cfg_from_fil": 466, "cfg_preprocess": 455, "cfg_to_qconfig": [413, 417], "challeng": [473, 477, 541, 547], "chang": [29, 81, 128, 150, 181, 182, 184, 185, 187, 190, 195, 269, 352, 380, 413, 442, 466, 478, 491, 496, 523, 526, 529, 535, 538, 544, 553, 554], "channel": [29, 30, 31, 128, 149, 150, 171, 174, 179, 195, 221, 225, 380, 413, 433, 462, 472, 473, 475, 477, 494, 495, 497, 533, 534, 541, 544, 549, 553, 555], "channel_axi": [98, 147, 150], "channels_last": 221, "channelx1": [195, 544], "chapter": 496, "characterist": 490, "chart": [488, 546, 551], "chat": [475, 484, 489, 536, 552], "chatbot": [473, 541, 545], "chatglm2": 536, "chatglm3": 536, "check": [1, 31, 52, 53, 90, 133, 145, 192, 201, 211, 324, 325, 385, 391, 406, 413, 417, 427, 442, 446, 448, 457, 465, 466, 481, 483, 494, 526, 534, 550], "check_cfg_and_qconfig": [145, 413, 417], "check_config": 192, "check_dataload": 201, "check_integr": 211, "check_key_exist": 466, "check_key_valid": 192, "check_model": 457, "check_mx_vers": 1, "checknumer": [65, 337], "checkout": 491, "checkpoint": [140, 243, 390, 396, 431, 441, 465, 540], "checkpoint_dir": [431, 441, 465], "checkpoint_sess": [243, 390], "checksum": 211, "chees": 522, "cheeseshopaddress": 522, "chen": 535, "cheng": [477, 488], "child": [59, 141, 331, 398, 420], "children": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18, 19, 21, 22, 23, 25, 26, 141, 398], "chines": 545, "choic": [209, 477, 481, 488, 495, 537, 552], "choos": [211, 433, 462, 478, 488, 533, 545, 550, 552, 554], "chosen": [472, 496, 521], "chunk": [225, 553], "chunk_siz": 211, "ci": 491, "cifar": [211, 555], "cifar10": 211, "cifar100": 211, "circumst": 490, "ckpt": [133, 235, 243, 262, 385, 390, 555], "cl": [141, 146, 161, 163, 165, 175, 180, 188, 189, 190, 211, 218, 225, 234, 245, 274, 391, 398, 448, 466, 522], "claim": 535, "clamp_": [488, 552], "clarifi": 490, "class": [101, 138, 145, 170, 176, 183, 215, 235, 262, 285, 287, 291, 305, 397, 400, 448, 477, 478, 479, 481, 482, 492, 496, 522, 523, 537, 538, 542, 543, 546, 550, 554], "class_1": 211, "class_n": 211, "classdef": 554, "classic": [478, 554], "classif": [188, 209, 225, 234, 528, 537, 544, 545], "classifi": [173, 192, 209, 234], "classificationmnli": 555, "classificationmrpc": 555, "classificationqnli": 555, "classificationqqp": 555, "classificationsst": 555, "classifierheadsearch": 173, "classifierheadsearchertf": 173, "classregist": 280, "clean": [224, 398, 538], "clean_module_weight": 398, "clean_weight": 398, "clear": [207, 495, 530, 537, 538], "click": [494, 533, 534, 545], "client": [161, 494], "clip": [31, 413, 433, 477, 488, 549, 552], "clip_grad_norm_": 538, "clm": 544, "clone": [491, 534], "close": [128, 150, 380, 521], "cloud": [473, 491, 494, 541, 545], "cluster": 554, "cmd": [151, 534], "cnn": 555, "cnt": 538, "co": 544, "coarsest": [488, 552], "coco": [210, 217, 230, 234, 537, 544], "coco_dataset": 215, "coco_filt": 219, "coco_label_map": 233, "coco_tool": 233, "cocoev": 230, "cocoevalwrapp": 230, "cocomap": 537, "cocomapv2": [234, 537], "coconpi": 210, "cocoraw": 210, "cocorecorddataset": 210, "cocowrapp": 230, "code": [140, 173, 198, 199, 262, 396, 439, 449, 470, 474, 477, 479, 489, 492, 494, 495, 496, 497, 525, 526, 532, 533, 535, 537, 542, 543, 544, 545, 546, 551, 552, 553], "codebert": 555, "codec": [140, 396], "codenam": [474, 536, 539], "coder": [533, 545], "coeff": 189, "coeffici": [40, 189, 312, 544], "cola": [209, 537, 555], "collabor": [491, 494], "collat": [133, 145, 385], "collate_fn": [200, 202, 203, 204, 387, 523], "collate_pr": 30, "collate_result": 145, "collate_tf_pr": [133, 385], "collate_torch_pr": 145, "collctor": 3, "collect": [1, 3, 30, 145, 153, 154, 223, 225, 232, 234, 391, 403, 412, 413, 416, 419, 421, 430, 453, 466, 481, 488, 496, 546, 554], "collect_layer_histogram": 460, "collect_layer_input": 192, "collect_weight_info": 145, "collector": [1, 135, 453], "collectorbas": 1, "collecttransform": 225, "color": [477, 544, 547], "colorjitt": 553, "column": [466, 477, 488, 549, 552], "column_map": 466, "columnwis": [50, 322], "com": [3, 135, 177, 178, 188, 209, 227, 228, 231, 232, 234, 262, 439, 490, 522, 534, 535, 538, 550, 551, 555], "comb": 525, "combin": [1, 145, 195, 198, 199, 221, 235, 262, 391, 417, 449, 466, 477, 480, 492, 494, 495, 533, 538, 543, 545, 546, 548, 549, 553, 554], "combine_cap": 1, "combine_histogram": [391, 466], "come": [140, 396, 473, 536, 541, 554], "comma": 418, "command": [151, 154, 449, 483, 484, 526], "command_prefix": 154, "comment": [138, 397, 477, 483, 490, 496, 537, 538, 549, 553], "commentsbalancedor": 555, "commit": [478, 481, 490, 491], "common": [124, 138, 140, 195, 223, 226, 299, 301, 302, 303, 305, 379, 396, 397, 437, 438, 439, 442, 448, 455, 475, 479, 480, 481, 490, 522, 525, 526, 528, 531, 532, 534, 538, 546, 552], "commonli": 544, "commun": 490, "comoress": 462, "compact": [538, 544], "compar": [135, 234, 466, 473, 476, 477, 482, 488, 525, 537, 541, 544, 546, 549, 551, 554], "compare_kei": 466, "compare_label": [234, 262, 537], "compare_object": 466, "compare_weight": 135, "comparison": [466, 549, 550], "compat": [133, 243, 385, 390, 491, 526, 540, 544, 550], "compatible_format": [138, 397], "compil": [227, 471, 476, 489, 538], "complaint": 490, "complement": [474, 544], "complet": [278, 472, 495, 496, 544, 546, 554, 555], "complex": [156, 474, 522, 538, 539, 544], "complex_attr": 156, "complextfmap": 522, "compli": 554, "compon": [162, 284, 482, 492, 522, 526, 534, 535, 543, 544], "compos": [152, 225, 455, 553], "composableconfig": [152, 522], "composetransform": 225, "comprehens": [470, 478, 481, 538], "compress": [226, 245, 420, 439, 449, 462, 468, 470, 472, 475, 477, 478, 480, 482, 488, 489, 494, 501, 531, 533, 538, 542, 543, 544, 545, 546, 552, 555], "compress_bit": 444, "compressed_model": 549, "compression_dim": [429, 462, 549], "compression_dtyp": [429, 462, 549], "compression_manag": [195, 449, 492, 525, 538, 543, 544, 546], "compressionmanag": 449, "compressor": [1, 151, 154, 162, 165, 177, 178, 188, 195, 221, 222, 225, 226, 233, 234, 235, 239, 246, 262, 270, 276, 290, 302, 303, 304, 305, 389, 392, 394, 436, 437, 438, 439, 440, 441, 442, 445, 446, 447, 448, 450, 452, 456, 468, 470, 472, 473, 474, 475, 476, 477, 478, 479, 481, 482, 483, 485, 488, 489, 491, 495, 496, 497, 520, 521, 522, 524, 525, 526, 527, 528, 529, 530, 531, 532, 535, 536, 539, 540, 541, 542, 543, 544, 545, 546, 549, 550, 552, 553, 554, 555], "comput": [30, 31, 90, 135, 145, 227, 228, 231, 232, 234, 413, 417, 425, 449, 466, 472, 473, 474, 476, 477, 488, 521, 523, 525, 528, 537, 538, 539, 541, 544, 545, 546, 549, 552, 554], "compute_bleu": 228, "compute_const_folding_using_tf": 90, "compute_dtyp": 452, "compute_error": 135, "compute_spars": 466, "computemetr": 230, "concat": [16, 132, 384], "concat_gener": 225, "concaten": 413, "concatoper": 9, "concatv2": [109, 119, 132, 364, 374, 384, 530], "concept": [470, 532, 551], "concret": [133, 385, 390, 554], "conda": [529, 550], "condit": [70, 153, 209, 210, 211, 225, 263, 265, 342, 448, 482, 535, 553, 554], "conduct": [470, 492, 538, 554], "conf": [146, 151, 162, 195, 198, 199, 235, 245, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 278, 281, 449, 492, 497, 520, 532, 538, 539, 540, 543, 546, 547, 548, 549, 551, 552, 554], "confer": [135, 488, 544, 552], "confid": 145, "confidence_batch": [145, 554], "confidenti": 490, "config": [1, 31, 100, 101, 103, 107, 145, 151, 152, 153, 156, 160, 161, 169, 170, 171, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 198, 199, 209, 226, 235, 257, 261, 278, 279, 280, 281, 284, 288, 289, 300, 301, 302, 304, 305, 389, 406, 409, 412, 413, 417, 424, 427, 431, 438, 440, 442, 448, 449, 452, 455, 465, 466, 471, 472, 474, 476, 477, 478, 479, 496, 497, 501, 502, 520, 522, 523, 525, 526, 528, 533, 537, 538, 539, 540, 543, 544, 546, 548, 549, 551, 554], "config1": 153, "config2": 153, "config_file_path": 188, "config_inst": 151, "config_list": [152, 153], "config_map": [160, 427], "config_name_map": 160, "config_quantizable_lay": 103, "config_set": [153, 474, 479, 480, 481, 482], "config_sourc": 153, "configload": 153, "configmappingtyp": 427, "configproto": 261, "configregistri": [152, 160], "configs_map": [305, 399, 437, 442], "configset": 153, "configur": [1, 103, 134, 145, 151, 152, 153, 154, 161, 192, 195, 198, 199, 201, 211, 214, 234, 235, 245, 262, 271, 280, 301, 305, 406, 408, 409, 411, 413, 417, 422, 427, 437, 439, 442, 448, 449, 454, 455, 459, 464, 465, 478, 482, 484, 489, 492, 495, 496, 538, 542, 544, 546, 549, 550, 552, 553, 554, 555], "confirm": 496, "conflict": [90, 522, 529], "connect": [169, 544, 551], "consecut": [171, 173, 174, 544], "conserv": [195, 270], "conservativetunestrategi": 267, "consid": [31, 133, 234, 385, 490, 521, 522, 554], "consider": [480, 552], "consist": [41, 48, 313, 320, 477, 491, 492, 522, 552, 554], "consolid": 534, "const": [41, 42, 48, 49, 52, 53, 55, 67, 83, 313, 314, 320, 321, 324, 325, 327, 339, 354, 439], "const_node_valu": 90, "constant": [90, 158, 266, 276, 388, 447, 459, 460, 522, 523, 530, 553], "constant_valu": 225, "constfold": 530, "constrain": 547, "constraint": [195, 538, 544, 551, 554], "construct": [140, 209, 212, 213, 216, 223, 234, 239, 243, 280, 387, 389, 390, 396, 455, 482, 490, 496, 497, 526, 544, 554], "construct_function_from_graph_def": [133, 385], "consum": [482, 523], "consumpt": [473, 477, 488, 541, 552], "contact": [490, 550, 551], "contain": [1, 52, 53, 55, 59, 124, 133, 135, 140, 145, 151, 162, 170, 173, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 192, 195, 198, 199, 209, 210, 211, 230, 231, 232, 235, 262, 324, 325, 327, 331, 379, 385, 396, 406, 409, 412, 413, 417, 425, 429, 449, 455, 459, 465, 470, 477, 481, 494, 521, 529, 533, 537, 544, 549, 554], "content": 489, "content_fold": 216, "context": [1, 466, 477, 488, 549], "contextu": 544, "contigu": 145, "continu": [178, 230, 477, 536, 538, 544], "contract": [43, 315], "contrast": [175, 553], "contrib": [3, 226], "contribut": [490, 494, 544], "control": [146, 195, 492, 538, 544, 554], "conv": [16, 43, 47, 51, 56, 57, 58, 77, 94, 149, 195, 315, 319, 323, 328, 329, 330, 348, 359, 492, 496, 497, 538, 544, 546, 554], "conv1": [195, 479, 546], "conv1d": [194, 448, 478, 497], "conv1d_relu": 528, "conv2d": [38, 44, 45, 46, 50, 51, 56, 110, 120, 128, 294, 295, 298, 303, 310, 316, 317, 318, 322, 323, 328, 365, 375, 380, 413, 472, 496, 497, 528, 530, 552, 554], "conv2d_config": 479, "conv2d_relu": 528, "conv2dbackpropinput": [111, 366], "conv3d": [56, 110, 328, 365], "conv3dbackpropinputv2": [111, 366], "conveni": [478, 538], "convent": [211, 230, 473, 491, 541], "convers": [39, 40, 195, 311, 312, 406, 474, 475, 488, 489, 491, 495, 496, 538, 539, 546, 548, 552], "convert": [1, 30, 33, 34, 35, 38, 39, 40, 41, 42, 51, 59, 71, 84, 85, 87, 88, 89, 90, 116, 121, 125, 127, 134, 145, 173, 195, 209, 221, 224, 225, 230, 278, 281, 283, 288, 289, 306, 307, 310, 311, 312, 313, 314, 323, 331, 343, 355, 371, 376, 392, 399, 406, 437, 442, 448, 450, 458, 471, 472, 473, 474, 475, 476, 477, 478, 480, 481, 484, 488, 492, 494, 496, 531, 538, 539, 546, 548, 549, 552, 553, 554], "convert_add_to_biasadd": [61, 333], "convert_bf16": 496, "convert_by_vocab": 224, "convert_examples_to_featur": [209, 225], "convert_layout": [61, 333], "convert_leakyrelu": [61, 333], "convert_nan_to_random": [61, 333], "convert_placeholder_to_const": [61, 333], "convert_tensorflow_tensor_to_onnx": 90, "convert_to_unicod": 224, "convertaddtobiasaddoptim": [38, 310], "converted_model": [195, 235, 538, 539], "convertlayoutoptim": [39, 311], "convertleakyreluoptim": [40, 312], "convertnantorandom": [41, 313], "convertplaceholdertoconst": [42, 314], "convolut": 545, "convoper": 10, "cooper": [474, 534, 539, 545], "coordin": [225, 266, 553, 554], "copi": [195, 466, 523], "copyreg": [138, 397], "copyright": [491, 535], "core": [151, 154, 195, 285, 407, 424, 477, 483, 484, 489, 491, 494, 520, 534, 535, 555], "core_id": 151, "core_list": [151, 154], "core_list_per_inst": 154, "cores_per_inst": [151, 195, 520, 538], "corner": [225, 491, 495, 497, 553], "corpor": 535, "correct": [128, 150, 192, 195, 230, 231, 232, 234, 380, 490], "correspond": [3, 135, 173, 184, 195, 209, 227, 230, 234, 412, 413, 417, 427, 455, 466, 472, 478, 488, 495, 530, 538, 544, 546, 551, 554], "cost": [472, 473, 477, 488, 541, 546, 549], "could": [140, 149, 175, 198, 199, 211, 235, 262, 396, 413, 449, 470, 474, 476, 477, 479, 481, 488, 490, 492, 525, 529, 538, 543, 544, 546, 549, 552], "count": [1, 477, 488, 549], "counter": 90, "coupl": 208, "cover": [491, 528, 532, 545], "coverag": 491, "cowork": [431, 441], "cpu": [76, 77, 78, 79, 139, 140, 145, 154, 161, 195, 347, 348, 349, 350, 391, 396, 398, 401, 413, 420, 429, 431, 433, 441, 443, 448, 462, 466, 474, 476, 478, 481, 483, 496, 520, 521, 533, 538, 539, 545, 546, 547, 548, 549], "cpu_acceler": 443, "cpu_execution_tim": 251, "cpu_index": 154, "cpu_rang": 154, "cpuexecutionprovid": [2, 28, 29, 31, 539, 546], "cpufreq": 522, "cpuinfo": [161, 391, 466], "craft": [473, 541], "crbug": 522, "creat": [1, 3, 90, 138, 156, 195, 200, 209, 211, 230, 257, 261, 278, 280, 281, 387, 397, 406, 409, 452, 455, 470, 472, 488, 490, 495, 540, 544, 546, 551, 554], "create_data_exampl": 1, "create_dataload": 455, "create_dataset": 455, "create_eval_func": 455, "create_obj_from_config": 460, "create_onnx_config": 257, "create_quant_spec_from_config": 409, "create_tf_config": 261, "create_train_func": 455, "create_xiq_quantizer_from_pt2e_config": 409, "criteria": [170, 195, 482, 492, 552], "criterion": [135, 162, 164, 165, 169, 175, 180, 181, 182, 184, 187, 191, 195, 525, 538, 544, 546, 552, 554], "criterion_class": [169, 191], "criterion_conf": 195, "criterion_registri": 163, "criterion_typ": 163, "critet": 163, "critic": [184, 420, 481, 490], "crop": [221, 225, 553], "crop_pad": 225, "crop_ratio": 216, "cropres": 553, "cropresizetftransform": 225, "cropresizetransform": 225, "croptoboundingbox": [225, 553], "cross": [165, 234, 483, 532, 554], "cross_memori": 483, "crossentropyloss": [163, 195, 538], "crowd": 230, "crucial": [476, 544], "cs412": 3, "cse": [59, 83, 331, 354], "csv": 466, "ctx": 1, "cube": [195, 544], "cuda": [140, 192, 396, 413, 425, 443, 448, 478, 539, 544, 546], "cuda_acceler": 443, "cudaexecutionprovid": [539, 546], "current": [1, 55, 89, 154, 169, 170, 173, 176, 180, 183, 195, 266, 272, 327, 413, 433, 441, 448, 466, 476, 477, 478, 482, 492, 494, 495, 497, 521, 522, 526, 530, 531, 538, 542, 544, 546, 549, 552, 554], "current_pattern": 173, "current_sparsity_ratio": 180, "curv": 537, "custom": [100, 138, 145, 151, 234, 245, 292, 293, 294, 295, 297, 298, 397, 433, 465, 472, 478, 482, 488, 494, 520, 525, 544, 545, 549, 551], "custom_metr": 532, "custom_tune_config": [474, 479, 480, 481, 482], "customis": [523, 537], "customized_msg": [161, 391, 466], "cv": [195, 472, 476, 478, 481, 544], "cv2": 221, "cvf": [488, 552], "d": [59, 145, 331, 417, 532, 544], "d1": [59, 331, 466], "d18": 555, "d2": 466, "d_": 195, "d_conf": [195, 525, 538, 543], "dai": 554, "damp_perc": 452, "dampen": 31, "darvish": [473, 541], "data": [1, 29, 30, 31, 41, 90, 125, 133, 135, 140, 145, 149, 161, 173, 195, 198, 199, 226, 235, 256, 260, 262, 266, 278, 280, 283, 301, 305, 313, 385, 388, 391, 396, 403, 404, 409, 413, 418, 433, 448, 449, 452, 455, 459, 466, 470, 471, 472, 473, 474, 476, 477, 478, 481, 488, 494, 495, 521, 523, 526, 530, 534, 537, 538, 539, 541, 546, 548, 549, 550, 551, 552, 553, 554], "data_dir": 209, "data_format": [211, 221, 292, 294, 297, 298], "data_it": [1, 173], "data_load": [33, 34, 306, 495], "data_path": 214, "data_sourc": [207, 455], "data_typ": [280, 418], "data_x": 1, "databas": 211, "databrick": [475, 536, 552], "datafunc": 211, "dataiterload": 1, "dataload": [1, 2, 29, 31, 125, 126, 135, 145, 151, 170, 171, 173, 188, 192, 198, 199, 209, 220, 256, 260, 262, 283, 286, 387, 413, 418, 420, 433, 448, 449, 455, 470, 481, 492, 495, 496, 525, 526, 532, 538, 539, 543, 544, 546, 547, 548, 549, 552, 553], "dataloader_cfg": 455, "dataloaderwrap": 1, "datalod": 413, "dataset": [125, 126, 145, 188, 198, 199, 200, 202, 203, 204, 206, 207, 208, 220, 230, 231, 232, 234, 235, 262, 283, 286, 387, 418, 448, 449, 452, 455, 475, 477, 479, 480, 481, 488, 495, 496, 523, 526, 537, 538, 544, 546, 549, 550, 552, 554, 555], "dataset_format": 211, "dataset_nam": 418, "dataset_registri": 211, "dataset_typ": 211, "datatyp": [90, 145, 548, 554, 555], "date": [477, 549], "datetim": 195, "dbox": 492, "dco": 491, "ddr5": 555, "deal": 449, "deberta": 555, "debug": [281, 463, 554], "debug_stripp": 530, "dec": 545, "decid": [101, 195, 228, 481, 483, 495, 496, 538, 546, 550, 554], "decim": 466, "decis": [522, 537], "declar": 522, "decod": [140, 221, 227, 396, 452, 537, 553], "decode_singl": 492, "decodeimag": 553, "decompos": [52, 53, 324, 325], "decor": [3, 21, 52, 53, 55, 133, 146, 152, 161, 165, 175, 180, 188, 190, 225, 234, 245, 274, 324, 325, 327, 391, 413, 446, 448, 466, 492, 495, 554], "decorator_metr": 234, "decreas": [83, 354, 544], "dedic": 195, "dedicated_qdq_pair": [28, 195, 546], "deem": 490, "deep": [391, 466, 470, 473, 474, 478, 481, 488, 494, 495, 496, 523, 533, 534, 538, 539, 541, 545, 546, 554], "deep_get": [245, 391, 466], "deep_set": [245, 466], "deepcopi": 195, "deepen": 545, "deepst": 195, "def": [52, 53, 55, 153, 198, 199, 262, 281, 324, 325, 327, 391, 448, 449, 474, 475, 478, 479, 481, 482, 488, 492, 495, 522, 523, 525, 526, 537, 538, 546, 552, 554], "default": [3, 31, 82, 133, 140, 145, 152, 156, 161, 192, 195, 198, 199, 202, 209, 210, 211, 221, 225, 227, 234, 243, 280, 299, 303, 353, 385, 390, 391, 396, 398, 408, 409, 412, 413, 418, 425, 431, 433, 435, 437, 438, 439, 441, 442, 448, 452, 455, 458, 459, 462, 466, 472, 473, 474, 475, 477, 478, 481, 482, 483, 484, 489, 492, 495, 522, 523, 530, 533, 537, 538, 539, 541, 542, 544, 546, 548, 549, 551, 552, 553, 554], "default_alpha": 552, "default_col": [202, 387], "default_config": 192, "default_dtyp": 278, "default_opset_vers": 127, "default_sampl": [153, 482], "default_sq_alpha_arg": 303, "default_v": 156, "default_white_list": [152, 299, 303, 439], "default_workspac": 195, "defaultdataload": [202, 208], "defin": [71, 135, 156, 173, 175, 180, 181, 182, 187, 188, 189, 190, 195, 198, 199, 235, 262, 343, 449, 452, 465, 472, 479, 481, 482, 488, 490, 494, 495, 496, 523, 525, 530, 533, 537, 538, 543, 544, 546, 549, 551, 552, 554], "definit": [133, 142, 203, 207, 385, 429], "defult": [431, 441], "degrad": [525, 544, 554], "delet": 544, "delete_assign": 261, "deliv": [474, 477, 538, 545, 550], "demand": [477, 488, 549], "demo": [154, 478, 538], "democrat": 545, "demonstr": [473, 477, 482, 484, 494, 527, 541], "denot": [488, 552], "dens": [173, 190, 195, 295, 544, 555], "dense_shap": 213, "densenet": 555, "densiti": 554, "denver": 232, "depend": [1, 198, 199, 235, 262, 448, 449, 491, 494, 529, 530, 534, 546, 554], "deploi": [477, 482, 488, 491, 538, 549, 554], "deploy": [478, 524, 545, 547, 552], "deprec": [153, 522, 550, 553], "depth": [145, 417, 548], "depth_multipli": [294, 298], "depthwis": [294, 413], "depthwise_constraint": [294, 298], "depthwise_conv2d": 295, "depthwise_initi": [294, 298], "depthwise_regular": [294, 298], "depthwiseconv2d": 294, "depthwiseconv2dn": [50, 56, 110, 120, 322, 328, 365, 375, 530], "dequant": [29, 30, 31, 36, 73, 76, 77, 78, 79, 81, 135, 145, 308, 345, 347, 348, 349, 350, 352, 398, 413, 423, 429, 433, 466, 488, 492, 496, 552], "dequantize_cast_optim": [37, 309], "dequantize_data": 30, "dequantize_data_with_scale_zero": 30, "dequantize_per_channel": [488, 552], "dequantize_tensor": 466, "dequantize_weight": 466, "dequantizecastoptim": [36, 308], "dequantizelinear": 30, "deriv": [162, 169, 177, 178, 179, 185, 186, 187, 188, 189, 190, 191], "derogatori": 490, "desc": 538, "desc_act": 452, "descent": [173, 439, 477, 488, 494, 545], "describ": [495, 497, 522, 530], "descript": [1, 117, 281, 372, 472, 475, 492, 496, 497, 523, 552], "deseri": [140, 396], "design": [162, 208, 234, 267, 470, 476, 478, 481, 489, 494, 538, 544, 546], "desir": [195, 221, 225, 409, 475, 496, 497, 544, 552, 553], "despit": 482, "dest": 90, "destin": [133, 385, 457], "detach": [488, 552], "detail": [29, 149, 152, 173, 225, 413, 439, 466, 467, 470, 474, 475, 477, 478, 479, 480, 481, 488, 489, 490, 494, 496, 497, 521, 522, 530, 532, 536, 537, 538, 539, 544, 549, 550, 551, 552, 554], "detect": [143, 145, 161, 173, 195, 230, 234, 239, 417, 443, 448, 478, 491, 492, 544, 554], "detect_devic": 448, "detect_processor_type_based_on_hw": 161, "detection_box": [230, 538], "detection_boxes_list": 230, "detection_class": [230, 538], "detection_classes_list": 230, "detection_mask": 230, "detection_scor": [230, 538], "detection_scores_list": 230, "detection_typ": 230, "detectionboxes_precis": 234, "detectioncoco": 555, "detections_list": 230, "detector": 143, "determin": [169, 175, 180, 191, 195, 427, 461, 471, 476, 482, 484, 490, 492, 521, 544, 546], "determinist": 538, "dettmer": [477, 488, 549], "dev": 529, "develop": [135, 156, 245, 473, 474, 491, 492, 522, 534, 538, 539, 541, 544, 545, 548, 551], "deviat": [225, 553], "devic": [1, 64, 74, 75, 76, 77, 78, 79, 82, 92, 116, 121, 132, 139, 140, 145, 170, 173, 174, 188, 192, 195, 336, 346, 347, 348, 349, 350, 353, 357, 371, 376, 384, 396, 398, 401, 413, 417, 418, 420, 423, 425, 429, 431, 433, 441, 446, 448, 462, 474, 477, 478, 484, 488, 494, 496, 523, 538, 539, 544, 547, 549], "device_id": [140, 396], "device_map": 489, "device_nam": [443, 446], "device_synchron": 446, "devop": 491, "df": 90, "diag": 31, "diagnosi": 545, "diagon": [31, 477, 549], "diagram": [477, 496, 497, 554], "dict": [1, 31, 101, 133, 134, 135, 140, 145, 146, 151, 154, 160, 165, 169, 170, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 194, 195, 198, 199, 208, 229, 230, 231, 232, 234, 235, 243, 262, 277, 280, 281, 289, 299, 303, 305, 385, 390, 391, 396, 399, 404, 406, 411, 412, 413, 417, 418, 419, 420, 425, 427, 433, 435, 437, 438, 439, 442, 448, 449, 452, 453, 455, 457, 459, 465, 466, 477, 478, 479, 484, 489, 492, 496, 522, 528, 537, 544, 546, 552, 554], "dictionari": [52, 53, 55, 133, 134, 135, 140, 145, 194, 195, 224, 230, 324, 325, 327, 391, 396, 404, 413, 417, 431, 441, 448, 466], "differ": [128, 146, 150, 152, 154, 156, 157, 163, 184, 195, 203, 211, 225, 234, 243, 245, 380, 390, 445, 461, 472, 477, 478, 481, 484, 488, 490, 492, 496, 523, 526, 527, 531, 532, 533, 537, 538, 540, 542, 544, 546, 549, 552, 553, 554], "difficult": [475, 480, 482, 488, 495, 552], "difficulti": [475, 480, 488, 552], "diffus": [494, 545], "digit": [227, 466, 545], "dilat": [43, 315], "dilated_contract": [61, 333], "dilatedcontract": [43, 315], "dilation_r": [292, 294, 298], "dim": [225, 488, 552, 553], "dimens": [52, 53, 90, 202, 225, 324, 325, 387, 413, 477, 549, 553], "dir": [145, 209, 462, 465, 529, 532], "direct": [85, 179, 355, 478, 544], "direct8bit": 11, "direct8bitoper": 11, "direct_q8": 16, "directli": [171, 195, 208, 230, 234, 479, 489, 537, 538, 544, 546], "directori": [195, 210, 211, 243, 390, 408, 431, 441, 462, 465, 466, 494, 526, 529], "disabl": [133, 195, 391, 490, 548, 549, 552], "disable_al": 195, "disable_quanted_input": 452, "disable_random": [133, 391], "discard": 523, "discord": 494, "discourag": 544, "discov": 521, "discret": [3, 554], "discrimin": 555, "discuss": [478, 494], "dispatch": [496, 533], "displai": [195, 277, 278, 391, 466], "distanc": 466, "distil": [162, 195, 470, 494, 527, 531, 533, 543], "distil_loss": [195, 538], "distilbert": [209, 539, 545, 555], "distilgpt2": 555, "distillation_conf": 162, "distillation_criterion": [525, 538, 543], "distillationcallback": 162, "distillationconfig": [195, 449, 525, 538, 543], "distilroberta": 555, "distinct": [473, 541], "distort": 496, "distribut": [1, 3, 128, 150, 195, 200, 202, 203, 204, 207, 380, 387, 461, 470, 472, 477, 481, 488, 496, 523, 534, 545, 546, 549, 550, 552, 555], "distribute_calib_tensor": 1, "distributedoptim": 526, "distributedsampl": 526, "distutil": 529, "div_": [488, 552], "dive": [470, 530], "diverg": [3, 453, 461, 497, 521, 554], "divid": [488, 523, 552, 554], "divis": [31, 413], "dl": [235, 474, 532, 545, 555], "dlabel": 492, "dlrm": 234, "dmlexecutionprovid": 546, "dnnlexecutionprovid": [539, 546], "do": [162, 188, 195, 208, 209, 227, 230, 411, 459, 465, 472, 478, 490, 496, 522, 526, 530, 533, 537, 538, 544, 548, 550, 552, 554], "do_blockwis": [413, 439, 552], "do_constant_fold": 459, "do_lower_cas": [209, 224, 225, 553], "do_sampl": 489, "doc": [177, 178, 195, 234, 262, 277, 278, 281, 391, 443, 466, 494, 534, 537, 538], "doc_span_index": 225, "doc_strid": [225, 553], "doc_token": 225, "docker": 529, "docstr": [195, 281], "docstyl": 491, "document": [225, 281, 470, 474, 479, 480, 488, 489, 496, 497, 501, 526, 536, 539, 544, 550, 553], "doe": [227, 280, 301, 305, 476, 477, 481, 522, 523, 537, 538, 549, 550, 553], "doesn": [140, 225, 396, 474, 481, 495, 526, 538, 539], "dolli": [475, 536, 544, 552], "domain": [89, 90, 195, 544, 554], "don": [128, 150, 175, 195, 380, 475, 476, 492, 544, 554], "done": [488, 492, 525, 544, 546], "dong": 135, "dot": [391, 466, 474, 488, 539, 546], "dotdict": [195, 245, 466], "doubl": [438, 439, 477, 545], "double_qu": 448, "double_quant_bit": [439, 477], "double_quant_dtyp": [439, 477], "double_quant_group_s": [439, 477], "double_quant_typ": 448, "double_quant_use_sym": [439, 477], "dowload_hf_model": [141, 398, 448], "download": [141, 211, 398, 448, 494, 534, 550], "download_url": 211, "downstream": 544, "dpcpp": 489, "dq": [92, 94, 116, 357, 359, 371, 471, 476, 478, 488, 552], "draw": [523, 551, 554], "drive": 497, "driven": [245, 494, 531, 538, 542], "drop": [192, 230, 418, 474, 475, 477, 488, 491, 538, 539, 544, 549, 552, 555], "drop_last": [203, 207, 387], "dry_run": 526, "dscore": 492, "dtype": [30, 31, 87, 90, 195, 212, 213, 221, 225, 387, 401, 406, 409, 413, 423, 429, 433, 439, 448, 462, 474, 477, 478, 488, 489, 496, 497, 528, 530, 546, 549, 552, 553], "dtype_map": 30, "dtype_to_nam": 30, "duc": 555, "due": [154, 488, 497, 528, 544, 546, 552], "dummi": [44, 173, 212, 213, 234, 316, 387, 481, 537, 538], "dummy_biasadd": [61, 333], "dummy_dataset": 215, "dummy_dataset_v2": 215, "dummy_v2": [213, 387], "dummydataset": [212, 213, 387, 481], "dummydatasetv2": 387, "dump": [2, 138, 145, 151, 154, 397, 413, 417, 448, 459, 466, 481, 488, 491, 546], "dump_class_attr": 466, "dump_data_to_loc": 466, "dump_elapsed_tim": [161, 391, 466, 495], "dump_fp32": [131, 383], "dump_model_op_stat": [413, 417, 448], "dump_numa_info": 154, "dump_op_typ": 2, "dump_stats_path": [439, 472], "dump_tabl": 466, "dump_table_to_csv": 466, "duplic": [83, 93, 266, 354, 358], "durat": 551, "dure": [1, 138, 140, 175, 179, 190, 195, 245, 396, 397, 413, 442, 448, 466, 475, 476, 478, 481, 488, 494, 496, 531, 538, 540, 542, 543, 544, 546, 552], "dyna": 195, "dynam": [140, 195, 200, 278, 387, 396, 409, 435, 437, 439, 459, 477, 478, 494, 495, 523, 528, 531, 533, 538, 545, 554, 555], "dynamic_ax": [195, 459, 528], "dynamic_length": 209, "dynamic_max_gap": [418, 439, 477], "dynamic_quant_export": 459, "dynamic_shap": 435, "dynamicquantconfig": [439, 471], "dynamo": [476, 478], "e": [140, 154, 175, 195, 396, 477, 484, 488, 489, 490, 491, 494, 527, 544, 549, 552, 554], "e16": 491, "e2m1": [473, 477, 541, 549], "e2m3": [473, 541], "e3m2": [473, 541], "e4m3": [439, 472, 473, 494, 541], "e5m2": [472, 473, 541], "e8m0": [473, 541], "each": [126, 133, 135, 140, 145, 154, 192, 195, 207, 211, 214, 221, 225, 228, 230, 231, 232, 271, 286, 387, 396, 412, 413, 417, 431, 433, 448, 466, 470, 477, 480, 483, 488, 491, 495, 496, 497, 522, 525, 530, 538, 542, 544, 547, 549, 551, 553, 554], "eager": [188, 261, 435, 471, 476, 477, 478, 526, 533, 546, 548], "earli": [195, 488, 538, 546, 554], "eas": [478, 481, 534, 545], "easi": [209, 476, 478, 481, 528, 538, 545, 546, 549, 551], "easier": 545, "easili": [245, 482, 488, 495, 542, 552], "econom": 490, "ecosystem": [494, 545], "edg": 553, "edit": 490, "edouard": [488, 552], "edu": [3, 211], "educ": 490, "effect": [187, 476, 477, 495, 544, 545], "effici": [29, 149, 413, 476, 484, 488, 494, 521, 523, 543, 544, 545, 549, 552, 554], "efficientnet": 555, "effort": 538, "eg": [145, 211, 433], "egsdcrb1": 555, "either": [140, 153, 195, 230, 396, 476, 481, 482, 484, 488, 543, 546], "elaps": [161, 391, 466], "electra": 555, "electron": 490, "elem_format": 404, "elem_typ": 90, "element": [31, 153, 177, 178, 192, 207, 230, 387, 404, 433, 448, 466, 473, 477, 488, 533, 537, 541, 544, 549, 552], "elementwise_over_al": 192, "elementwise_over_matmul_gemm_conv": 192, "elemformat": 404, "eleutherai": [475, 531, 536, 552], "elia": [477, 488, 549], "elimin": [51, 323], "ellipsi": [406, 443], "els": [1, 133, 195, 207, 245, 385, 433, 489, 549], "em": 555, "email": 494, "emb": 477, "embed": [145, 420, 521, 544], "embed_layernorm": 16, "embed_out": 544, "embedlayernorm": 12, "embedlayernormalizationoper": 12, "emerg": [473, 541, 552], "emit": 472, "emnlp": [494, 545], "emot": 555, "empathi": 490, "empir": [128, 150, 380], "empow": [473, 541], "empti": [141, 195, 398, 448, 472, 477, 478, 547], "emsp": 544, "emul": [478, 488, 538, 546], "en": [494, 534], "enabl": [31, 94, 135, 180, 195, 359, 409, 474, 476, 477, 481, 496, 497, 526, 539, 544, 545, 552, 554], "enable_act": 135, "enable_al": 195, "enable_auto_scal": [31, 549], "enable_bas": 195, "enable_eager_execut": 526, "enable_extend": 195, "enable_full_rang": [418, 433, 439, 462, 477, 547, 549], "enable_minmax_tun": [418, 439, 477], "enable_mse_search": [31, 549], "enable_quanted_input": [418, 439, 477], "encapsul": [95, 198, 199, 235, 262, 360, 449, 540], "encod": [140, 230, 396, 452, 553], "encodejp": 553, "encount": [523, 529], "end": [133, 180, 181, 182, 187, 195, 209, 224, 225, 263, 265, 385, 489, 495, 496, 497, 525, 530, 534, 538, 542, 544, 549, 553, 554], "end_epoch": 538, "end_posit": 225, "end_step": [180, 195, 538, 544], "endlessli": 554, "energi": [473, 541], "engin": 474, "english": 544, "enhanc": [484, 521, 544, 545, 548], "enough": [156, 413, 481, 488, 489, 546], "enough_memo_store_scal": 413, "ensp": 553, "ensur": [1, 225, 266, 476, 479, 526, 546, 553], "ensure_list": 1, "entir": [262, 449, 475, 477, 480, 492, 496, 497, 521, 544], "entranc": [64, 201, 336], "entri": [52, 53, 55, 135, 165, 301, 302, 305, 324, 325, 327, 437, 438, 441, 442, 466, 478, 522, 535], "entropi": [195, 521, 554], "enum": 1, "enumer": [156, 157, 449, 473, 495, 525, 526, 538, 541, 543, 544], "env": [151, 529], "env_var": 151, "environ": [30, 151, 443, 447, 474, 478, 484, 489, 490, 494, 520, 554], "eoferror": [138, 397], "ep": [3, 150, 539], "epoch": [162, 181, 182, 187, 449, 523, 525, 526, 538, 543, 544], "equal": [90, 128, 133, 150, 281, 380, 391, 448, 466, 473, 541, 544, 549], "equal_dict": 466, "equat": [30, 232, 234, 488, 546, 552], "equival": [68, 340, 432, 473, 475, 477, 480, 488, 494, 541, 545, 549, 552], "erf": [54, 326], "error": [31, 90, 135, 140, 234, 271, 396, 413, 452, 463, 472, 477, 488, 529, 537, 549, 552, 554], "error_msg": 90, "especi": 531, "essenti": [521, 529], "estim": [133, 234, 243, 390, 554], "estimator_sess": [243, 390], "et": [135, 473, 477, 488, 521, 541, 549, 552], "etc": [151, 162, 224, 232, 235, 262, 392, 465, 494, 533, 544, 551, 554], "ethnic": 490, "euclidean": 466, "eural_compressor": 245, "eval": [492, 522, 523, 546], "eval_acc": 153, "eval_acc_fn": [474, 479, 481], "eval_arg": [153, 302, 438, 474, 478, 479, 481], "eval_dataload": [151, 195, 198, 199, 235, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 449, 520, 532, 537, 538, 546, 547, 554], "eval_fn": [153, 302, 438, 474, 478, 479, 480, 481, 482], "eval_fn_wrapp": 480, "eval_frequ": 162, "eval_func": [125, 198, 199, 235, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 281, 283, 449, 492, 523, 525, 526, 538, 546, 547, 548, 549, 554], "eval_metr": [198, 199, 235, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 449, 537, 546], "eval_perf": 153, "eval_result": 522, "eval_result_of_q_model": 153, "evalu": [151, 153, 162, 175, 198, 199, 209, 227, 230, 231, 232, 234, 235, 245, 262, 438, 449, 455, 478, 480, 481, 482, 488, 495, 496, 522, 525, 532, 533, 536, 537, 538, 539, 542, 544, 546, 551, 552, 554], "evaluate_squad": 233, "evaluation_result": 526, "evaluation_time_cost": 526, "evaluationfuncwrapp": 153, "even": [488, 547, 549, 552], "evenli": 523, "event": 490, "everi": [52, 53, 55, 175, 178, 180, 195, 324, 325, 327, 477, 495, 523, 547, 549, 554], "everyon": 490, "everyth": 192, "exact": [231, 418], "exact_match_scor": 231, "exactli": 526, "exampl": [1, 31, 135, 140, 145, 151, 152, 153, 156, 160, 173, 174, 188, 195, 209, 210, 211, 221, 225, 230, 231, 232, 234, 235, 245, 262, 280, 281, 391, 396, 408, 411, 426, 431, 433, 435, 443, 448, 449, 465, 466, 470, 475, 481, 482, 484, 490, 494, 496, 497, 501, 531, 532, 533, 548, 550, 553, 554, 556], "example_algo": [391, 448], "example_gener": 281, "example_index": 225, "example_inp": 145, "example_input": [145, 195, 408, 411, 412, 413, 417, 432, 433, 435, 438, 442, 459, 465, 471, 475, 476, 477, 478, 528], "examplealgorithm": 152, "examplealgorithmconfig": 152, "exampleclass": 281, "exce": [225, 549, 553], "exceed": [243, 390], "excel": [488, 549], "except": [140, 281, 396, 413, 466, 477, 522, 546, 547], "exchang": 528, "exclud": [195, 472, 495, 521, 548], "excluded_op_nam": [56, 57, 195, 328, 329, 538, 544], "excluded_precis": [195, 439, 548], "execut": [29, 30, 39, 140, 151, 157, 180, 181, 182, 195, 261, 262, 263, 265, 311, 392, 396, 412, 420, 448, 449, 455, 465, 466, 476, 477, 488, 489, 525, 538, 539, 543, 544, 546, 549, 551, 554, 555], "executionprovid": 555, "exemplifi": 544, "exhaust": [195, 270, 413], "exhaustivetunestrategi": 268, "exist": [39, 151, 243, 311, 390, 391, 446, 466, 474, 478, 489, 528, 537, 554], "exit": [195, 263, 265, 482, 538], "exit_polici": 538, "exp": [195, 473, 538, 541, 544], "expand_and_reshap": 404, "expanddim": [45, 317], "expanddims_optim": [61, 333], "expanddimsoptim": [45, 317], "expect": [211, 477, 479, 488, 490, 491, 529, 536, 546, 549, 554], "expens": [538, 554], "experi": [481, 489, 490, 551, 552, 554], "experiment": [489, 525, 526, 528, 538, 544, 546, 550, 553], "explain": [156, 488, 497, 552], "explicit": 490, "explicitli": [156, 195, 476, 484, 489, 532, 544], "explor": [473, 495, 541], "explos": [473, 541], "expon": [473, 541], "exporsingleimagedetectionboxestococo": 230, "export": [86, 195, 230, 436, 460, 470, 471, 474, 476, 478, 489, 529], "export_compressed_model": [462, 549], "export_format": [418, 439], "export_model_for_pt2e_qu": 435, "exportconfig": 195, "exportdetectionstococo": 230, "exported_model": [471, 476], "exportgroundtruthtococo": 230, "exportsingleimagedetectionboxestococo": 230, "exportsingleimagedetectionmaskstococo": 230, "exportsingleimagedetectionstococo": 230, "exportsingleimagegroundtruthtococo": 230, "expos": 452, "express": [227, 490], "extend": [478, 489, 496, 497, 531], "extend_engin": [95, 360], "extens": [138, 140, 391, 396, 397, 466, 474, 475, 476, 478, 488, 489, 494, 495, 502, 522, 529, 530, 531, 533, 534, 536, 538, 539, 540, 544, 545, 546, 549, 552, 554], "extra": [140, 232, 396, 477, 538, 549], "extra_opset": 87, "extract": [52, 53, 55, 133, 173, 211, 280, 324, 325, 327], "extract_data_typ": 280, "extran": 522, "extrem": 521, "f": [140, 170, 396, 477, 488, 522, 526, 546, 549, 552], "f1": [231, 233, 234, 526, 537, 538, 551, 555], "f1_score": [231, 232], "face": [141, 398, 448, 489, 490, 494, 501, 545, 550, 555], "facebook": [475, 536, 552], "facil": [140, 396], "facilit": 522, "fact": [488, 546], "factor": [3, 126, 286, 413, 425, 471, 475, 477, 481, 488, 549, 552, 555], "factori": [241, 391], "fail": [140, 396, 492, 523, 528], "failur": [491, 492], "fair": 490, "faith": 490, "fake": [29, 31, 98, 142, 149, 156, 280, 413, 429, 433, 477, 488, 538, 546, 549, 552], "fake_qu": [33, 73, 92, 116, 121, 306, 345, 357, 371, 376, 439], "fake_quant": 99, "fakeaffinetensorquantfunct": [142, 429], "fakealgoconfig": 156, "fakequ": [73, 288, 345], "fakequant": 98, "fakequantizebas": 98, "falcon": [475, 494, 536, 544, 552], "fall": [140, 145, 396, 448, 473, 541], "fallback": [145, 195, 269, 271, 272, 474, 475, 476, 528, 539, 548, 554], "fallback_list": 28, "fallback_ord": 145, "fallbacktuningsampl": 277, "fals": [1, 2, 28, 29, 30, 31, 32, 33, 34, 56, 57, 74, 77, 84, 87, 88, 90, 98, 116, 121, 128, 131, 132, 133, 139, 140, 144, 145, 151, 163, 166, 195, 200, 202, 204, 209, 211, 221, 225, 230, 234, 245, 262, 281, 289, 292, 293, 294, 297, 298, 303, 306, 328, 329, 346, 348, 371, 376, 380, 383, 384, 385, 387, 396, 403, 404, 406, 409, 413, 417, 418, 420, 425, 427, 429, 433, 439, 442, 448, 452, 455, 462, 465, 466, 475, 477, 479, 481, 482, 483, 489, 495, 496, 523, 530, 537, 538, 546, 549, 552, 553, 554], "familiar": 470, "famou": [477, 488, 549], "faq": [490, 494], "far": 478, "fashionmnist": 211, "fast": [187, 195, 477, 481, 544, 545, 549], "fast_bias_correct": [148, 195, 546], "fastbiascorrect": [147, 150], "faster": [523, 524, 528, 545, 554, 555], "fatal": 463, "father": [141, 173, 398], "fault": 491, "fault_tolerant_fil": 466, "fbgemm": [474, 539, 546], "fc": [145, 195, 544], "fc1": [145, 433, 476], "fc2": [31, 145, 433], "fcn": 555, "feasibl": 195, "featur": [195, 209, 210, 221, 225, 413, 474, 479, 488, 491, 494, 520, 523, 524, 538, 539, 540, 544, 545, 550, 553], "feb": 545, "fed": [413, 554], "feed": [133, 208, 385, 544], "feed_dict": [133, 208, 385], "feedward": 184, "fefin": 199, "feng": 535, "ferplu": 555, "fetch": [46, 145, 148, 154, 203, 318, 387, 496], "fetch_modul": [145, 433, 448], "fetch_weight_from_reshap": [61, 333], "fetchweightfromreshapeoptim": [46, 318], "few": [544, 545, 552, 554], "ffffff": 554, "ffn": [143, 184, 417], "ffn2_sparsiti": [171, 544], "ffn_modul": 184, "ffn_name": 184, "field": [195, 211, 230, 473, 492, 495, 497, 526, 538, 541, 551, 554], "field_nam": [161, 466], "fig": 472, "figur": [477, 547], "file": [90, 133, 138, 140, 141, 145, 148, 160, 166, 188, 192, 195, 198, 199, 209, 210, 211, 214, 224, 225, 230, 235, 245, 249, 250, 253, 262, 385, 391, 396, 397, 398, 411, 413, 417, 465, 466, 472, 477, 483, 491, 495, 496, 497, 526, 529, 532, 535, 537, 538, 540, 542, 546, 549, 553], "file_lik": [140, 396], "file_typ": 466, "filenam": [211, 466], "filepath": [140, 396, 466, 522], "fill": [551, 553, 554], "filter": [1, 209, 210, 211, 212, 213, 214, 216, 220, 292, 298, 387, 406, 427, 466, 497, 533, 544], "filter_fn": 427, "filter_registri": 218, "filter_typ": 218, "final": [140, 173, 180, 192, 396, 476, 488, 496, 538, 542, 544, 546, 550, 554], "finalize_calibr": 442, "find": [30, 52, 53, 55, 90, 133, 194, 234, 266, 324, 325, 327, 385, 466, 482, 495, 536, 544, 549, 551, 552, 554], "find_by_nam": 30, "find_lay": [194, 420], "find_layers_nam": 420, "find_opset": 90, "find_spec": 448, "fine": [186, 209, 478, 522, 544, 545, 546], "finer": [488, 552, 554], "finest": [488, 552], "finetun": [477, 488, 544, 549, 555], "finish": 484, "first": [128, 140, 145, 150, 152, 174, 195, 209, 230, 267, 278, 380, 391, 396, 413, 433, 466, 473, 474, 477, 483, 488, 489, 492, 494, 495, 496, 497, 534, 539, 541, 546, 548, 549, 551, 552, 554], "first_conv_or_matmul_quant": [195, 546], "first_n": [131, 383], "fit": [151, 175, 195, 235, 262, 266, 281, 449, 492, 520, 523, 526, 532, 537, 538, 539, 540, 544, 546, 547, 548, 549], "fit_with_raw_cmd": 151, "fix": [133, 185, 187, 209, 385, 477, 523, 544, 546, 549, 554], "fix_ref_type_of_graph_def": [133, 385], "flag": [195, 228], "flan": 544, "flatten_static_graph": 173, "flex": [494, 534], "flexibl": [474, 477, 481, 494, 544, 549], "flip": [221, 225, 553], "float": [3, 30, 31, 125, 126, 135, 152, 175, 180, 189, 192, 195, 209, 221, 228, 230, 231, 232, 277, 281, 283, 286, 303, 392, 413, 418, 433, 439, 442, 443, 452, 466, 471, 472, 473, 474, 475, 476, 477, 479, 480, 482, 488, 522, 537, 541, 546, 552, 553], "float16": [5, 8, 30, 401, 406, 462, 472, 477, 489, 549], "float16activationoper": 5, "float16binaryoper": 8, "float32": [212, 213, 225, 230, 387, 429, 448, 462, 472, 477, 488, 495, 520, 538, 546, 549, 552, 553], "float_dict": 135, "float_model": [135, 477, 484], "float_to_bfloat16": 30, "float_to_float16": 30, "floatfunct": 492, "floor": [473, 541], "flop": [477, 488, 549], "flow": [231, 232, 488, 492, 538], "flowchart": 554, "fn": 406, "fn_arg": 406, "focu": [489, 521, 554], "focus": [473, 477, 481, 490, 495, 531, 541], "fold": [47, 48, 145, 303, 319, 320, 413, 432, 433, 439, 459, 475, 477, 530, 549, 552], "fold_batch_norm": [61, 333], "fold_const": [61, 333], "foldbatchnormnodesoptim": [47, 319], "folder": [133, 211, 216, 235, 262, 385, 465, 477, 478, 489, 549], "follow": [73, 95, 153, 174, 211, 227, 230, 345, 360, 392, 420, 473, 474, 475, 480, 483, 484, 488, 489, 490, 491, 492, 494, 495, 496, 497, 522, 523, 525, 526, 529, 530, 534, 535, 537, 538, 539, 540, 541, 543, 544, 546, 549, 552, 554], "footprint": [195, 245, 525, 538, 542, 544, 547], "forc": [443, 548], "forg": 529, "fork": 491, "form": 232, "format": [5, 30, 39, 133, 145, 154, 195, 209, 210, 211, 230, 278, 281, 288, 289, 311, 385, 389, 390, 404, 431, 441, 445, 448, 459, 462, 473, 474, 476, 478, 481, 483, 488, 494, 496, 497, 522, 526, 528, 537, 538, 539, 540, 541, 545, 546, 549], "format_list2str": 154, "format_vers": [138, 397], "formul": 538, "formula": [488, 544, 552], "forpytorch": 534, "fortensorflow": 534, "fortieth": 544, "forward": [1, 145, 192, 413, 433, 477, 488, 496, 538, 544, 546, 549, 552], "forward_wrapp": [145, 413, 433], "foster": 490, "found": [162, 406, 413, 448, 474, 494, 522, 536, 539, 552, 554, 555], "foundat": 545, "four": [471, 476, 482], "fp1": [488, 552], "fp16": [31, 195, 278, 399, 401, 406, 418, 439, 488, 546], "fp2": [488, 552], "fp32": [28, 29, 31, 64, 116, 121, 128, 145, 150, 195, 198, 199, 234, 267, 271, 278, 301, 305, 336, 371, 376, 380, 391, 406, 411, 412, 415, 431, 433, 437, 455, 458, 459, 462, 465, 466, 472, 474, 475, 476, 477, 478, 479, 481, 488, 491, 495, 497, 530, 536, 537, 538, 539, 546, 548, 549, 550, 551, 552, 554, 555], "fp32_baselin": [153, 455, 495], "fp32_graph": [128, 380], "fp32_layer": [292, 293, 294, 297, 298], "fp32_model": [135, 145, 431, 441, 459, 466, 475, 476, 480, 481, 547, 549], "fp32_model_path": 547, "fp32_onnx_config": 528, "fp32_onnx_path": 459, "fp32_op": [33, 35, 92, 306, 307, 357], "fp32_tensor": 466, "fp4": [433, 473, 477, 541, 549], "fp6": [473, 541], "fp8": [437, 439, 441, 473, 478, 541, 545], "fp8_config": [439, 472, 494], "fp8_entri": 437, "fp8_white_list": [439, 472], "fp8config": [437, 439, 472, 494], "fpath": 211, "frac": [473, 488, 541, 552], "fraction": [221, 553], "fragment": 495, "framework": [1, 151, 152, 157, 163, 165, 170, 176, 180, 183, 187, 188, 196, 197, 201, 202, 205, 208, 209, 210, 211, 212, 213, 214, 215, 216, 218, 220, 222, 225, 234, 235, 236, 237, 239, 272, 278, 288, 299, 389, 455, 470, 474, 476, 481, 482, 488, 495, 497, 521, 522, 526, 527, 531, 532, 533, 537, 538, 539, 544, 545, 546, 553, 554, 555], "framework_dataset": 211, "framework_nam": [152, 522], "framework_specific_info": [32, 288, 289, 495], "frantar": [477, 488, 544, 549], "free": [29, 149, 187, 198, 199, 262, 413, 475, 480, 488, 490, 542, 551, 552], "freez": [73, 74, 75, 133, 345, 346, 385, 471, 476, 544], "freeze_fake_qu": [80, 351], "freeze_valu": [80, 351], "freeze_value_without_calib": 80, "freezefakequantopoptim": [73, 345], "freezevaluetransform": [74, 346], "freezevaluewithoutcalibtransform": 75, "frequenc": [162, 180, 195, 538, 544], "frequent": 534, "fresh": [494, 534], "friendli": [473, 488, 530, 538, 541, 545, 552], "from": [1, 30, 46, 52, 53, 55, 90, 133, 140, 141, 145, 151, 152, 153, 160, 162, 163, 169, 170, 176, 177, 178, 179, 183, 185, 186, 187, 188, 189, 190, 191, 192, 195, 198, 199, 200, 203, 206, 209, 210, 211, 212, 213, 214, 216, 221, 227, 230, 231, 232, 235, 243, 245, 262, 278, 280, 281, 288, 318, 324, 325, 327, 385, 387, 390, 392, 396, 398, 406, 408, 411, 412, 413, 415, 417, 431, 433, 437, 441, 448, 449, 452, 455, 457, 458, 459, 462, 465, 466, 470, 471, 472, 473, 474, 475, 476, 477, 479, 480, 481, 482, 483, 484, 488, 489, 490, 491, 492, 495, 497, 520, 522, 523, 525, 526, 527, 528, 529, 531, 533, 536, 537, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554], "from_dict": 479, "from_pretrain": [462, 489, 531, 538], "frontend": 489, "frozen": [235, 243, 262, 390, 540], "frozen_func": [133, 385], "frozen_pb_sess": [243, 390], "fuel": [473, 541], "full": [30, 209, 210, 211, 462, 466, 472, 477, 494, 521, 529, 535, 544, 549, 550], "full_rang": 433, "fulli": [479, 538, 546, 552], "fulltoken": 224, "fun": 281, "func": [133, 145, 161, 385, 466, 546], "func_dict": 455, "function": [124, 125, 138, 159, 181, 182, 187, 195, 198, 199, 210, 283, 296, 379, 393, 397, 421, 460, 470, 474, 476, 477, 478, 481, 488, 489, 491, 492, 495, 496, 497, 520, 523, 525, 528, 532, 533, 536, 537, 538, 539, 544, 546, 549, 551, 552, 554], "function1": 281, "function2": 281, "function3": 281, "fundament": [479, 480, 521, 538], "funnel": 555, "funsd": 555, "further": [55, 195, 327, 474, 488, 490, 494, 539, 540, 546], "fuse": [1, 43, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 76, 77, 78, 79, 81, 116, 121, 144, 145, 149, 315, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 347, 348, 349, 350, 352, 371, 376, 417, 492, 552], "fuse_biasadd_add": [61, 333], "fuse_column_wise_mul": [61, 333], "fuse_conv_redundant_dequant": [80, 351], "fuse_conv_requant": [80, 351], "fuse_conv_with_math": [61, 333], "fuse_decomposed_bn": [61, 333], "fuse_decomposed_in": [61, 333], "fuse_gelu": [61, 333], "fuse_layer_norm": [61, 333], "fuse_matmul_redundant_dequant": [80, 351], "fuse_matmul_requant": [80, 351], "fuse_pad_with_conv": [61, 333], "fuse_pad_with_fp32_conv": [61, 333], "fuse_qdq_bn": [115, 370], "fuse_qdq_concatv2": [115, 370], "fuse_qdq_conv": [115, 370], "fuse_qdq_deconv": [115, 370], "fuse_qdq_in": [115, 370], "fuse_qdq_matmul": [115, 370], "fuse_qdq_pool": [115, 370], "fuse_reshape_transpos": [61, 333], "fusebiasaddandaddoptim": [49, 321], "fusecolumnwisemuloptim": [50, 322], "fuseconvredundantdequantizetransform": [76, 347], "fuseconvrequantizetransform": [77, 348], "fuseconvwithmathoptim": [51, 323], "fusedbatchnorm": [66, 338], "fusedbatchnormv2": [66, 338], "fusedbatchnormv3": [108, 118, 363, 373], "fusedbatcnormv3": [55, 327], "fusedecomposedbnoptim": [52, 324], "fusedecomposedinoptim": [53, 325], "fusedinstancenorm": [112, 367], "fusedmatmul": 18, "fusedmatmuloper": 18, "fusegeluoptim": [54, 326], "fuselayernormoptim": [55, 327], "fusematmulredundantdequantizetransform": [78, 349], "fusematmulrequantizedequantizenewapitransform": [79, 350], "fusematmulrequantizedequantizetransform": [79, 350], "fusematmulrequantizenewapitransform": [79, 350], "fusematmulrequantizetransform": [79, 350], "fusenodestartwithconcatv2": [109, 119, 364, 374], "fusenodestartwithconv2d": [110, 120, 365, 375], "fusenodestartwithdeconv2d": [111, 366], "fusenodestartwithfusedbatchnormv3": [108, 118, 363, 373], "fusenodestartwithfusedinstancenorm": [112, 367], "fusenodestartwithmatmul": [113, 122, 368, 377], "fusenodestartwithpool": [114, 123, 369, 378], "fusepadwithconv2doptim": [56, 328], "fusepadwithfp32conv2doptim": [57, 329], "fusetransposereshapeoptim": [58, 330], "fusion": [44, 46, 55, 63, 94, 111, 112, 113, 116, 117, 118, 122, 316, 318, 327, 335, 359, 366, 367, 368, 371, 372, 373, 377, 480, 492, 495, 530], "futur": [149, 153, 198, 199, 470, 478, 533, 536, 538], "fw": 496, "fwk": 239, "fwk_name": [152, 522], "fx": [145, 272, 406, 435, 441, 471, 474, 476, 533, 539, 545, 546, 548], "fx_model": 145, "fx_white_list": 145, "g": [90, 140, 154, 175, 195, 396, 477, 484, 488, 491, 527, 544, 549, 550, 552, 554], "g_idx": 429, "gain": [543, 545], "gan": 545, "gap": [477, 540], "gather": [1, 16, 195], "gatheroper": 13, "gaudi": [478, 494, 534, 545], "gaudi2": [472, 494, 534], "gaussian": [266, 554], "gavgpool": 16, "gb": 484, "gcc": 555, "gcp": 545, "gelu": [54, 326], "gemm": [16, 195], "gemm_to_matmul": [195, 546], "gemmoper": 15, "gen": [474, 488, 536, 539, 545, 546, 548], "gen_bar_updat": 211, "gen_id": 489, "gen_text": 489, "gender": 490, "gener": [1, 33, 34, 72, 107, 133, 145, 151, 153, 154, 173, 198, 199, 200, 208, 211, 212, 213, 214, 225, 227, 235, 258, 262, 266, 281, 299, 303, 306, 344, 385, 387, 413, 417, 418, 420, 438, 439, 442, 449, 472, 473, 474, 475, 477, 478, 479, 484, 488, 489, 491, 492, 495, 497, 523, 537, 538, 539, 541, 542, 544, 545, 546, 548, 549, 552, 553, 554], "generaltopk": 234, "generate_activation_observ": [145, 417], "generate_feed_dict": [133, 385], "generate_ffn2_pruning_config": 171, "generate_kwarg": 489, "generate_mha_pruning_config": 171, "generate_prefix": [151, 154], "generate_xpu_qconfig": 417, "generategraphwithqdqpattern": [92, 357], "generator1": 281, "geomean": 533, "gestalt": 545, "get": [1, 29, 30, 31, 52, 53, 55, 89, 90, 133, 141, 145, 151, 154, 161, 165, 166, 169, 170, 171, 173, 176, 183, 189, 190, 191, 192, 195, 203, 225, 234, 243, 262, 271, 278, 280, 299, 302, 324, 325, 327, 385, 387, 390, 391, 398, 413, 417, 420, 433, 439, 448, 455, 459, 462, 466, 470, 475, 478, 483, 488, 496, 521, 529, 533, 534, 536, 538, 545, 549, 552, 554, 556], "get_absorb_lay": [145, 433], "get_acceler": 446, "get_activ": 166, "get_adaptor_nam": 280, "get_algorithm": 455, "get_all_config": 160, "get_all_config_set": [302, 438], "get_all_config_set_from_config_registri": [152, 522], "get_all_fp32_data": [391, 466], "get_all_registered_config": [299, 439], "get_architectur": 151, "get_attribut": 173, "get_blob_s": 31, "get_block_nam": 448, "get_block_prefix": [145, 433], "get_bounded_thread": 151, "get_children": [141, 398], "get_common_modul": 173, "get_const_dim_count": [52, 53, 324, 325], "get_core_id": 151, "get_criterion": 169, "get_dataload": 418, "get_default_autoround_config": 439, "get_default_awq_config": 439, "get_default_double_quant_config": 439, "get_default_dynamic_config": 439, "get_default_fp8_config": 439, "get_default_fp8_config_set": 439, "get_default_gptq_config": 439, "get_default_hqq_config": 439, "get_default_mixed_precision_config": 439, "get_default_mixed_precision_config_set": 439, "get_default_mx_config": 439, "get_default_rtn_config": [439, 484], "get_default_sq_config": [303, 439], "get_default_static_config": 439, "get_default_static_quant_config": [299, 303], "get_default_teq_config": 439, "get_depth": [145, 417], "get_dict_at_depth": [145, 417], "get_double_quant_config_dict": 448, "get_element_under_depth": [145, 417], "get_embedding_contigu": 145, "get_estimator_graph": 133, "get_example_input": 145, "get_fallback_ord": 145, "get_filter_fn": 406, "get_final_text": 225, "get_framework_nam": 1, "get_func_from_config": 455, "get_graph_def": [133, 385], "get_half_precision_node_set": 406, "get_hidden_st": 145, "get_index_from_strided_slice_of_shap": 90, "get_input_output_node_nam": [133, 385], "get_ipex_vers": 446, "get_lay": 192, "get_layer_names_in_block": 448, "get_linux_numa_info": 154, "get_max_supported_opset_vers": 89, "get_metr": 455, "get_model_devic": 448, "get_model_fwk_nam": 239, "get_model_info": 448, "get_model_input_shap": [133, 385], "get_model_typ": [243, 390], "get_modul": [141, 398, 413, 433, 448], "get_module_input_output": [145, 433], "get_mse_order_per_fp32": 145, "get_mse_order_per_int8": 145, "get_multimodal_block_nam": 448, "get_named_children": [141, 398], "get_node_map": 459, "get_node_original_nam": 30, "get_numa_nod": 154, "get_number_of_socket": 466, "get_op_list": 466, "get_op_type_by_nam": 145, "get_par": [413, 433], "get_pattern": 176, "get_physical_id": 151, "get_postprocess": 455, "get_preprocess": 455, "get_processor_type_from_user_config": 448, "get_prun": 183, "get_quant": 448, "get_quant_dequant_output": 29, "get_quantizable_onnx_op": 459, "get_quantizable_ops_from_cfg": [145, 417], "get_quantizable_ops_recurs": [413, 417], "get_reg": 189, "get_reg_typ": 189, "get_reversed_numa_info": 154, "get_rtn_double_quant_config_set": 438, "get_schedul": 190, "get_schema": 89, "get_siz": 466, "get_sparsity_ratio": 192, "get_sparsity_ratio_tf": 192, "get_subgraphs_from_onnx": 90, "get_super_module_by_nam": [141, 398], "get_tensor_by_nam": [133, 385], "get_tensor_histogram": [391, 466], "get_tensor_val_from_graph_nod": 133, "get_tensorflow_node_attr": 90, "get_tensorflow_node_shape_attr": 90, "get_tensorflow_tensor_data": 90, "get_tensorflow_tensor_shap": 90, "get_tensors_info": 466, "get_tf_criterion": 191, "get_tf_model_typ": 390, "get_thread": 151, "get_threads_per_cor": 151, "get_torch_vers": [145, 446], "get_torchvision_map": 225, "get_tuning_histori": 466, "get_unquantized_node_set": 406, "get_weight_from_input_tensor": 133, "get_weight_scal": 31, "get_weights_detail": 466, "get_windows_numa_info": 154, "get_woq_tuning_config": [439, 482], "get_workspac": 161, "getdefaultencod": 452, "getenv": 522, "gholami": 544, "gigant": [475, 480, 552], "girl": 489, "git": [491, 529, 534], "github": [3, 135, 177, 178, 188, 209, 227, 228, 231, 232, 234, 262, 439, 477, 491, 494, 527, 534, 535, 538, 550], "give": [156, 496, 554], "given": [1, 3, 30, 52, 53, 55, 89, 90, 101, 133, 141, 145, 152, 194, 225, 230, 262, 324, 325, 327, 392, 398, 406, 409, 413, 427, 433, 442, 448, 452, 478, 482, 497, 544, 552, 553], "global": [100, 175, 195, 280, 389, 466, 477, 478, 479, 492, 538, 544, 554], "global_config": 192, "global_st": 466, "global_step": 180, "globalaveragepool": 14, "globalaveragepooloper": 14, "glorot_uniform": [292, 293, 294, 298], "glue": [227, 234, 537, 538], "gluon": [1, 235, 262, 540], "glx": 529, "gm": 406, "go": [530, 549, 553], "goal": [151, 235, 262, 482, 488, 496, 523, 543, 546, 554], "goe": 472, "good": [479, 490, 546, 554], "googl": [474, 494, 522, 539, 545], "googlenet": 555, "got": [133, 385, 496, 529], "gp": 266, "gpt": [475, 494, 531, 536, 544, 552], "gpt2": 555, "gptq": [31, 392, 428, 431, 437, 439, 441, 478, 484, 488, 489, 494, 522, 536, 547, 549], "gptq_arg": [477, 549], "gptq_config": 549, "gptq_config_path": 549, "gptq_entri": 437, "gptq_g128asym": 549, "gptq_g32asym": 549, "gptq_g32asym_disable_last_matmul": 549, "gptq_quantiz": 31, "gptq_related_block": 420, "gptqconfig": [437, 439, 452, 477, 482, 489], "gptquantiz": 420, "gpu": [74, 75, 82, 140, 195, 346, 353, 396, 413, 443, 448, 477, 481, 520, 533, 538, 539, 546, 547], "gracefulli": 490, "grad": [182, 187], "gradient": [169, 439, 477, 488, 494, 533, 538, 544, 545, 555], "gradient_accumulate_step": [418, 439, 477], "gradient_accumulation_step": 538, "gradientcriterion": 169, "gradual": [190, 544], "grain": [186, 478, 544, 545, 554], "gram": 228, "granular": [292, 293, 294, 297, 298, 409, 473, 488, 495, 496, 497, 530, 533, 541, 546, 552], "graph": [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 90, 91, 92, 93, 94, 95, 97, 98, 99, 102, 115, 117, 121, 124, 127, 128, 130, 131, 132, 133, 173, 208, 236, 243, 261, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 362, 370, 372, 376, 379, 380, 382, 383, 384, 385, 390, 406, 441, 471, 476, 488, 492, 495, 526, 532, 540, 546, 547, 548], "graph_bas": [72, 344], "graph_convert": [96, 361], "graph_converter_without_calib": 96, "graph_cse_optim": [61, 333], "graph_def": [39, 125, 126, 133, 235, 243, 261, 262, 283, 286, 311, 385, 390, 458], "graph_def_sess": [243, 390], "graph_modul": 435, "graph_nam": 87, "graph_node_name_map": 133, "graph_optimization_level": [195, 546], "graph_output": 90, "graph_rewrit": [96, 361], "graph_sess": [243, 390], "graph_transform_bas": [130, 382], "graph_util": [96, 361], "graphanalyz": [95, 360], "graphconvert": [33, 306], "graphconverterwithoutcalib": 34, "graphcseoptim": [59, 331], "graphdef": [59, 133, 243, 261, 331, 385, 390, 540], "graphfoldconstantoptim": [48, 320], "graphmodel": 406, "graphmodul": [145, 406, 435, 492, 548], "graphrewriterbas": [71, 343], "graphrewriterhelp": [95, 360], "graphtrac": [413, 433], "graphtransform": [129, 381], "graphtransformbas": [129, 381], "grappler": [60, 332, 530], "grappler_optim": 530, "grappler_pass": [61, 333], "grappleroptim": [60, 332], "greater": [133, 391, 466, 481, 544, 554], "greatest": 554, "greatli": [477, 547, 549], "grei": [477, 547], "grid": [195, 544], "ground": [231, 232], "ground_truth": [231, 232], "groundtruth": [195, 230], "groundtruth_box": 230, "groundtruth_boxes_list": 230, "groundtruth_class": 230, "groundtruth_classes_list": 230, "groundtruth_dict": 230, "groundtruth_is_crowd": 230, "groundtruth_mask": 230, "group": [31, 189, 292, 426, 433, 477, 488, 494, 533, 544, 549, 553, 554, 555], "group_dim": [439, 477, 549], "group_norm": 528, "group_siz": [31, 142, 145, 280, 426, 429, 433, 439, 452, 477, 482, 549], "grouplasso": 189, "groupnorm": 552, "grow": [477, 488, 549], "grown": 544, "growth": [473, 474, 538, 539, 541, 544], "gt": [475, 477, 552, 553], "guangxuan": [477, 488, 549, 552], "guarante": [195, 538], "guess_output_rank": 30, "gui": [533, 544], "guid": [209, 474, 481, 492, 494, 522, 534], "guidelin": [493, 494], "gz": 211, "h": [31, 195, 225, 526, 553], "h384": 555, "h5": 540, "ha": [52, 53, 55, 59, 140, 179, 180, 227, 281, 324, 325, 327, 331, 391, 396, 404, 420, 443, 474, 479, 481, 488, 491, 495, 496, 497, 523, 528, 533, 538, 539, 544, 546, 548, 551, 552, 554], "habana": [448, 472, 494, 534], "habana_visible_devic": 494, "habanalab": 494, "hack": 227, "haihao": [535, 544], "half": [399, 401, 406, 439, 474, 477, 539], "half_away_from_zero": [292, 293, 294, 297, 298], "half_precision_convert": 400, "half_precision_rewrit": 407, "halfprecisionconvert": 399, "halfprecisionmodulewrapp": 401, "hand": [478, 481], "handl": [46, 133, 149, 159, 318, 385, 393, 413, 423, 463, 492, 493, 522, 523, 552], "handler": [398, 452, 466, 554], "hanj": 3, "hanwen": 535, "harass": 490, "hard": [195, 523], "hardswish": 528, "hardtanh": 552, "hardwar": [161, 448, 473, 476, 484, 494, 495, 533, 538, 541, 544, 545], "harm": 490, "harmon": [232, 234], "has_zp": 31, "hasattr": 489, "hassoun": 544, "have": [3, 59, 68, 126, 133, 140, 170, 174, 176, 183, 207, 227, 230, 234, 262, 280, 281, 286, 331, 340, 385, 396, 412, 413, 417, 448, 473, 474, 475, 477, 482, 488, 489, 490, 491, 496, 497, 522, 523, 526, 530, 535, 537, 538, 539, 541, 542, 544, 546, 547, 549, 552, 554], "haven": 544, "hawq": [135, 269, 554], "hawq_metr": 136, "hawq_top": 135, "hawq_v2": [195, 270], "hawq_v2_loss": 554, "hawq_v2tunestrategi": 269, "hbm": 534, "he": 495, "head": [171, 173, 177, 184, 192, 477, 544, 555], "head_mask": 184, "header": [161, 466, 529], "heavi": [477, 525], "height": [179, 221, 225, 526, 553], "helloworld": [539, 553], "help": [145, 166, 433, 470, 482, 488, 522, 536, 540, 549, 554], "helper": [30, 95, 101, 133, 145, 209, 210, 211, 224, 243, 360, 385, 390, 417, 457, 458, 459, 464], "here": [230, 281, 472, 475, 476, 477, 480, 481, 484, 488, 489, 496, 526, 527, 528, 530, 536, 537, 546, 550, 551, 552, 555], "herebi": 552, "herlper": [124, 379], "hesit": 544, "hessian": [31, 135, 269, 477, 549, 554], "hessian_trac": 135, "hessiantrac": 135, "heterogen": 545, "hf": [141, 398, 431, 441, 448, 484, 489, 536, 552], "hicham": 477, "hidden": [174, 179, 495], "high": [30, 212, 213, 387, 472, 481, 534, 545, 554], "higher": [152, 195, 198, 199, 235, 262, 443, 449, 472, 477, 484, 488, 492, 528, 537, 544, 546, 549, 551], "higher_is_bett": [195, 234, 262, 554], "highest": 554, "highli": [477, 545, 549], "highlight": 492, "hint": [466, 529], "histogram": [3, 391, 453, 466], "histogramcollector": 3, "histori": [195, 465, 466, 554], "history_cfg": 465, "hoc": 227, "hold": [230, 266], "holder": 216, "hook": [135, 162, 166, 184, 398, 455, 525, 538, 544, 552], "hope": 528, "horizont": [225, 553], "horovod": 526, "host": [494, 526], "hostconst": [82, 353], "hour": 554, "how": [31, 140, 162, 169, 175, 181, 182, 187, 195, 225, 396, 433, 448, 470, 474, 476, 477, 478, 479, 480, 481, 488, 489, 491, 492, 493, 495, 523, 525, 526, 532, 537, 538, 544, 545, 546, 552, 553, 554], "howev": [140, 396, 477, 484, 488, 544, 549, 552], "howpublish": 535, "hp_dtype": [439, 472], "hpex": 446, "hpo": [198, 544], "hpoconfig": 195, "hpu": [429, 441, 443, 448, 472, 478], "hpu_acceler": 443, "hpuweightonlylinear": 429, "hqq": [428, 439, 478, 494], "hqq_arg": 477, "hqq_blog": [439, 477], "hqq_entri": 437, "hqqconfig": [437, 439, 477], "hqqlinear": [423, 427], "hqqmodul": 422, "hqqmoduleconfig": 422, "hqqtensorhandl": 423, "hqquantiz": 427, "hqt": 472, "hqt_output": [439, 472], "hroughput": 483, "ht": 555, "html": [492, 494, 496, 521, 526, 533, 534, 546], "htmllabel": 554, "http": [3, 135, 169, 177, 178, 187, 188, 209, 211, 227, 228, 230, 231, 232, 234, 262, 420, 439, 477, 492, 494, 534, 535, 538, 544, 550], "hub": [141, 398, 431, 441, 448, 494, 527], "hue": 553, "hug": [141, 398, 448, 489, 494, 545], "huge": [525, 544, 547], "hugginfac": [431, 441], "huggingfac": [173, 184, 209, 431, 441, 462, 473, 494, 527, 541, 552, 555], "huggingface_model": 188, "human": [154, 554], "hvd": [234, 526], "hw": 472, "hw_aligned_single_scal": 472, "hybirdblock": [235, 262], "hybrid": [484, 489], "hybridblock": 540, "hyper": [167, 477], "hyperparamet": [195, 488, 551, 552, 554], "i": [1, 30, 31, 33, 34, 36, 39, 40, 45, 49, 55, 67, 70, 90, 117, 128, 133, 135, 138, 140, 144, 145, 150, 151, 153, 156, 161, 162, 165, 166, 169, 173, 174, 180, 185, 186, 187, 188, 189, 191, 192, 195, 198, 199, 200, 207, 208, 209, 210, 211, 212, 213, 216, 218, 221, 225, 227, 230, 232, 234, 235, 245, 249, 250, 253, 262, 267, 272, 280, 281, 306, 308, 311, 312, 317, 321, 327, 339, 342, 372, 380, 385, 387, 391, 396, 397, 406, 408, 409, 413, 417, 420, 426, 427, 431, 433, 435, 439, 441, 443, 446, 448, 449, 452, 462, 465, 466, 471, 472, 473, 474, 475, 476, 477, 478, 480, 481, 482, 483, 484, 488, 489, 490, 491, 492, 494, 495, 496, 497, 498, 500, 506, 509, 512, 520, 521, 522, 523, 525, 526, 528, 529, 530, 533, 534, 535, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555], "ic": [534, 545], "id": [151, 224, 227, 229, 230, 231, 232, 537, 551, 554], "idea": [488, 494, 544, 552, 554], "ideal": 474, "ident": [59, 65, 174, 331, 337, 490], "identifi": [52, 53, 55, 140, 230, 243, 324, 325, 327, 390, 396, 482, 544, 549], "idx1": 211, "idx3": 211, "ieee": [135, 474, 488, 539, 552], "ignor": [466, 477, 488, 521, 549, 552, 554], "ignore_attr": 466, "ignore_kei": 466, "ii": 554, "illinoi": 3, "illustr": [477, 496, 497, 525, 554], "imag": [210, 211, 214, 216, 221, 225, 230, 474, 488, 526, 528, 544, 552, 553, 555], "image_format": 216, "image_height": 230, "image_id": [230, 537], "image_list": 214, "image_tensor": 538, "image_width": 230, "imageclassifi": 211, "imagefold": [211, 526, 538], "imagenet": [211, 214, 221, 479, 538, 544, 553, 555], "imagenet_dataset": 215, "imagenet_transform": 222, "imagenetraw": 214, "imagerecord": 526, "imageri": 490, "img": 494, "img1": 214, "img2": 214, "img_dir": 210, "imgx": 214, "iml": 545, "immedi": 544, "impact": [269, 544, 550, 554], "imper": 492, "implement": [95, 128, 132, 140, 198, 199, 204, 207, 208, 209, 211, 227, 235, 245, 262, 269, 360, 380, 384, 392, 396, 449, 477, 489, 497, 523, 537, 538, 544, 546, 549, 550, 554], "implicitli": [140, 156, 396], "import": [133, 151, 153, 161, 195, 235, 245, 262, 281, 385, 431, 441, 446, 449, 466, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 484, 488, 489, 492, 494, 497, 520, 523, 525, 526, 528, 531, 537, 538, 539, 540, 541, 542, 543, 544, 546, 547, 548, 550, 551, 552, 553, 554], "import_util": 448, "importerror": [138, 397, 529], "importlib": 448, "impract": 477, "improv": [186, 474, 476, 477, 488, 489, 491, 494, 521, 522, 538, 544, 545, 546, 548, 549, 554, 555], "in_featur": [403, 423, 429], "in_graph": [133, 385], "in_graph_is_binari": [133, 385], "in_mp": 30, "inappropri": 490, "inc": [225, 301, 305, 429, 431, 441, 481, 482, 490, 496, 536, 545, 550, 554], "inc_model": [528, 540], "inc_target_devic": [443, 478, 489], "incbench": 483, "incept": 555, "incid": 490, "incit": [475, 552], "includ": [138, 163, 169, 173, 175, 189, 191, 195, 209, 211, 218, 225, 281, 392, 397, 418, 431, 437, 441, 448, 466, 472, 477, 478, 481, 482, 484, 489, 490, 495, 496, 497, 523, 534, 535, 536, 537, 538, 544, 546, 548, 549, 554], "include_lay": 453, "include_nod": 1, "include_tensors_kl": 1, "include_tensors_minmax": 1, "inclus": 490, "incompat": 529, "incorpor": [477, 496, 497, 525, 544, 554], "incorrect": [140, 396], "incquantizationconfigmixin": 452, "increas": [195, 473, 477, 524, 533, 541, 549, 554], "increasingli": 544, "increment": 554, "incub": 3, "incur": [473, 541], "incweightonlylinear": 429, "independ": [184, 257, 261, 497], "index": [52, 53, 55, 90, 151, 192, 195, 203, 207, 211, 324, 325, 327, 387, 448, 466, 488, 494, 534, 537, 549, 552], "indexdataset": 207, "indexerror": [138, 397], "indexfetch": [203, 387], "indic": [140, 152, 162, 195, 203, 207, 209, 230, 387, 396, 409, 426, 443, 481, 492, 523, 529], "individu": [126, 175, 257, 261, 286, 488, 490, 544, 552], "industri": [537, 545], "infer": [1, 30, 90, 125, 145, 173, 243, 245, 262, 283, 301, 305, 390, 417, 433, 449, 474, 475, 476, 477, 480, 481, 488, 489, 494, 495, 496, 521, 525, 531, 538, 539, 542, 544, 545, 546, 549, 552, 554], "infer_onnx_shape_dtyp": 90, "infer_shap": 30, "inferenc": [473, 541], "influenc": [145, 544], "info": [30, 145, 154, 161, 177, 178, 192, 223, 239, 391, 412, 413, 417, 442, 448, 462, 463, 466, 492, 522, 540], "inform": [1, 135, 154, 169, 170, 173, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 195, 198, 199, 234, 389, 426, 466, 472, 473, 475, 477, 481, 483, 484, 490, 491, 493, 494, 495, 496, 497, 498, 500, 506, 509, 512, 521, 522, 530, 538, 540, 541, 544, 548, 549, 550, 552, 555, 556], "infrastructur": 524, "ingest": 230, "inherit": [162, 163, 185, 186, 190, 206, 225, 392, 495, 496], "init": [0, 4, 29, 148, 164, 170, 172, 193, 537, 554], "init_alpha": [413, 439], "init_quantize_config": 101, "init_tun": 153, "initi": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18, 19, 21, 22, 23, 25, 26, 29, 30, 31, 90, 101, 133, 140, 153, 163, 180, 198, 199, 206, 209, 262, 278, 282, 285, 287, 291, 292, 293, 294, 295, 296, 297, 298, 300, 385, 396, 400, 409, 448, 449, 478, 481, 488, 497, 537, 544, 546, 552, 554], "initial_op_tuning_cfg": 277, "initial_tuning_cfg_with_quant_mod": 278, "initialize_int8_avgpool": 297, "initialize_int8_conv2d": 292, "initialize_int8_dens": 293, "initialize_int8_depthwise_conv2d": 294, "initialize_int8_maxpool": 297, "initialize_int8_separable_conv2d": 298, "initialize_name_count": 90, "inject": [44, 316, 538], "injectdummybiasaddoptim": [44, 316], "inlin": [133, 385], "innov": 545, "inplac": [29, 412, 413, 442, 478, 489], "input": [1, 2, 29, 30, 31, 39, 45, 49, 52, 53, 55, 56, 57, 59, 67, 68, 70, 71, 90, 94, 95, 101, 107, 126, 133, 134, 142, 144, 145, 149, 154, 173, 174, 192, 195, 198, 199, 209, 210, 211, 213, 221, 224, 225, 234, 235, 239, 243, 262, 286, 311, 317, 321, 324, 325, 327, 328, 329, 331, 339, 340, 342, 343, 359, 360, 385, 387, 390, 391, 408, 411, 413, 417, 425, 429, 433, 435, 442, 448, 449, 457, 458, 459, 462, 465, 466, 471, 476, 477, 478, 488, 495, 496, 520, 521, 523, 525, 526, 528, 530, 532, 537, 538, 539, 540, 544, 546, 549, 552, 553], "input2tupl": 145, "input_data": [29, 523], "input_desc": 1, "input_dtyp": 90, "input_fil": 225, "input_fn": [133, 243, 390], "input_func": [145, 433], "input_graph": [116, 121, 128, 258, 261, 371, 376, 380, 495], "input_graph_def": [52, 53, 55, 324, 325, 327], "input_id": [209, 225, 489, 538], "input_mask": [225, 538], "input_max": 413, "input_max_ab": 413, "input_min": 413, "input_minmax": 413, "input_model": 540, "input_model_tensor": 466, "input_nam": [52, 53, 87, 127, 195, 243, 324, 325, 390, 458, 459, 528], "input_name_to_nod": 457, "input_node_map": [52, 53, 324, 325], "input_node_nam": [69, 116, 121, 133, 341, 371, 376, 385], "input_output_nam": [60, 332], "input_pb": [129, 131, 132, 381, 383, 384], "input_scal": [142, 398, 413, 429], "input_shap": [90, 213, 387], "input_tensor": [133, 243, 385, 390], "input_tensor_data": 467, "input_tensor_ids_op_nam": [145, 417], "input_tensor_nam": [133, 243, 385, 390], "input_valu": [145, 433], "inputbatch": 225, "inputcapturemodul": 413, "inputfeatur": [209, 225], "inputs_as_nchw": [127, 458], "insecur": [140, 396], "insensit": 443, "insert": [62, 90, 92, 94, 98, 131, 149, 288, 334, 357, 359, 383, 413, 433, 442, 476, 477, 478, 480, 488, 492, 496, 523, 533, 538, 544, 546, 548, 549, 552], "insert_log": [130, 382], "insert_newlin": 466, "insert_print_nod": [61, 333], "insert_qdq_pattern": [91, 356], "insertlog": [131, 383], "insertprintminmaxnod": [62, 334], "inset": 538, "insid": [230, 477, 525, 529, 544, 547, 554], "insight": [545, 550, 554], "inspect": 550, "inspect_tensor": 495, "inspect_typ": 495, "inspir": [477, 549], "instal": [391, 481, 489, 526, 529, 531, 556], "instanc": [100, 101, 151, 154, 195, 198, 199, 231, 232, 234, 235, 262, 266, 280, 409, 449, 466, 477, 483, 490, 491, 492, 495, 520, 531, 538, 549, 554, 555], "instance_index": 154, "instance_norm": 528, "instancenorm": [53, 325, 552], "instances_val2017": 210, "instanti": 543, "instead": [195, 208, 466, 476, 544, 549], "institut": 211, "instruct": [474, 475, 488, 489, 496, 497, 534, 539, 544, 545, 546, 552], "insuffici": 483, "insult": 490, "int": [1, 3, 29, 30, 31, 90, 125, 133, 143, 145, 152, 156, 161, 171, 195, 208, 209, 210, 221, 225, 228, 230, 234, 251, 257, 261, 280, 281, 283, 284, 288, 301, 302, 305, 385, 404, 413, 417, 418, 423, 425, 426, 429, 433, 439, 444, 448, 452, 458, 459, 462, 466, 477, 481, 522, 523, 537, 538, 549, 553], "int32": [429, 462, 477, 549], "int4": [488, 494, 536, 546], "int8": [5, 6, 30, 31, 72, 108, 109, 110, 116, 118, 119, 120, 121, 128, 133, 150, 151, 195, 221, 278, 280, 292, 293, 294, 297, 298, 299, 303, 344, 363, 364, 365, 371, 373, 374, 375, 376, 380, 409, 433, 439, 441, 457, 458, 459, 465, 466, 472, 473, 475, 477, 478, 479, 481, 488, 491, 492, 495, 496, 497, 520, 525, 530, 536, 538, 539, 541, 545, 546, 548, 549, 550, 551, 552, 553], "int8_conv_config": 496, "int8_model": [458, 459], "int8_model_path": 547, "int8_node_name_revers": 133, "int8_onnx_config": [195, 528], "int8_sequ": [33, 306], "int_label": 537, "int_max": 30, "integ": [162, 179, 180, 195, 230, 234, 281, 448, 475, 477, 480, 488, 495, 497, 521, 546, 549, 552, 554], "integerop": 555, "integr": [133, 163, 385, 474, 478, 481, 488, 495, 538, 549], "intel": [154, 165, 177, 178, 188, 226, 233, 234, 246, 262, 270, 276, 290, 302, 303, 304, 305, 391, 392, 394, 436, 437, 438, 439, 440, 441, 442, 445, 446, 447, 448, 450, 452, 456, 468, 470, 472, 474, 475, 476, 477, 478, 479, 481, 482, 483, 485, 488, 490, 491, 493, 495, 496, 497, 521, 524, 525, 527, 528, 529, 530, 531, 535, 536, 539, 540, 542, 543, 544, 545, 546, 548, 549, 550, 552, 554, 555], "intel_extension_for_pytorch": [446, 476, 489], "intelcaff": 545, "intellig": 545, "intelon": 545, "inteltensorflow": [214, 216], "intend": 491, "inter": 195, "inter_area": 221, "inter_num_of_thread": [195, 257, 261, 538], "inter_pol": 221, "interact": [494, 551], "interest": [490, 494], "interfac": [71, 98, 154, 301, 343, 390, 392, 455, 474, 476, 478, 481, 488, 533, 538, 546, 550], "interleav": 210, "intermedi": [30, 135, 163, 195, 488, 492, 533, 546], "intermediatelayersknowledgedistillationloss": 163, "intermediatelayersknowledgedistillationlossconfig": 195, "intern": [135, 207, 278, 387, 472, 523, 537, 544], "internal_pattern": 278, "internet": 211, "interpol": [186, 225, 537, 544, 553], "intersect": [234, 406, 495, 537], "interv": [186, 544], "intra": 195, "intra_num_of_thread": [195, 257, 261, 538], "introduc": [59, 331, 470, 477, 488, 495, 496, 497, 530, 538, 546, 549, 550, 552], "introduct": 470, "intuit": [477, 488, 549, 552], "inturn": 537, "inuput": 30, "invalid": 413, "invalid_lay": 175, "invent": [488, 546], "invers": [477, 549], "investig": [490, 522], "invok": [230, 496], "involv": 476, "io": [1, 140, 396, 439, 477, 494, 534], "iou": 234, "iou_thr": [230, 234, 537], "iou_typ": 230, "ipc": 494, "ipex": [145, 195, 411, 412, 413, 417, 437, 441, 446, 465, 474, 475, 489, 533, 536, 538, 539, 552], "ipex_config": [145, 413], "ipex_config_path": [145, 413, 417], "ipexmodel": 244, "ir_vers": 29, "is_asymmetr": 496, "is_b_transpos": 30, "is_ckpt_format": [133, 385], "is_dynam": 409, "is_fused_modul": 145, "is_glob": 175, "is_hpex_avail": 446, "is_imposs": 225, "is_int8_model": 465, "is_ipex_avail": 446, "is_ipex_import": 446, "is_large_model": 30, "is_leaf": 420, "is_list_or_tupl": 90, "is_measur": 245, "is_model_quant": 1, "is_onnx_domain": 90, "is_optimum_avail": 448, "is_optimum_habana_avail": 448, "is_package_avail": 446, "is_perchannel": 496, "is_qat": 144, "is_saved_model_format": [133, 385], "is_subgraph": 87, "is_transformers_import": 446, "isa": 474, "isiter": 1, "isn": [52, 53, 55, 324, 325, 327], "issu": [413, 488, 490, 491, 493, 494, 534, 546], "item": [30, 192, 195, 224, 278, 391, 466, 488, 521, 526, 544, 552, 554], "item_list": 30, "item_typ": 278, "itemstyl": 554, "iter": [1, 2, 90, 125, 133, 145, 151, 174, 180, 187, 190, 195, 198, 199, 203, 207, 211, 212, 235, 262, 283, 301, 305, 385, 387, 413, 417, 418, 425, 433, 439, 449, 452, 455, 477, 481, 482, 492, 495, 520, 523, 538, 544, 546, 552, 554], "iter_bar": 538, "iter_op": [133, 385], "iterabledataset": [207, 211], "iterablefetch": [203, 387], "iterablesampl": [207, 387], "iteration_list": 495, "iterativeschedul": 190, "iterator_sess_run": [133, 385], "itex": [32, 94, 195, 289, 359, 458, 481, 539, 546, 552], "itex_instal": 391, "itex_mod": [32, 33, 74, 92, 116, 121, 289, 306, 346, 357, 371, 376], "itex_qdq_mod": [56, 57, 328, 329], "itrex": [195, 418, 439, 475, 552], "its": [30, 45, 81, 133, 135, 173, 182, 185, 187, 192, 195, 205, 211, 225, 228, 229, 266, 317, 352, 406, 408, 457, 474, 477, 482, 488, 490, 535, 537, 539, 544, 549, 551, 553, 554], "itself": 544, "j": [475, 488, 494, 536, 544, 552], "jan": 545, "jason": 552, "jbla": 31, "jeffrei": 521, "ji": [477, 488, 549], "jit": [173, 476, 552], "jitbasicsearch": 173, "jitter": 553, "john": [391, 466], "join": 494, "joint": 545, "jonathanhuang": 230, "journei": 545, "jpeg": 553, "jpg": [210, 214, 216, 494], "json": [145, 160, 210, 225, 411, 413, 417, 465, 477, 540, 549, 553], "json_file_path": [411, 465], "judg": 420, "juli": 545, "jun": 545, "june": [494, 545], "just": [82, 200, 223, 225, 245, 353, 387, 413, 476, 481, 488, 489, 529, 538, 543, 546, 552, 553, 554], "k": [234, 262, 537, 544], "k_block": 31, "kappa": 425, "keep": [140, 161, 184, 192, 267, 396, 492, 495, 522, 550], "keep_mask_lay": 175, "keepdim": [488, 552], "kei": [133, 135, 140, 141, 173, 184, 192, 195, 243, 281, 390, 391, 396, 398, 413, 433, 448, 453, 455, 466, 477, 488, 494, 496, 545, 549, 554], "kept": 179, "kera": [55, 101, 165, 173, 192, 195, 211, 218, 238, 243, 287, 290, 302, 305, 327, 390, 391, 479, 481, 496, 523, 526, 540], "keras_model": 237, "keras_sess": [243, 390], "kerasadaptor": 288, "kerasbasepattern": 175, "kerasbaseprun": 180, "kerasbasicprun": 181, "kerasconfigconvert": 288, "kerasmodel": [238, 390], "keraspatternnxm": 179, "kerasqueri": 288, "kerassurgeri": 288, "kernel": [31, 149, 195, 477, 495, 496, 554], "kernel_constraint": [292, 293], "kernel_initi": [292, 293], "kernel_regular": [292, 293], "kernel_s": [292, 294, 298], "keutzer": 544, "key_layer_nam": 184, "keynot": 545, "keyword": [140, 195, 396, 413, 431, 441], "kim": 544, "kind": [145, 538], "kit\u4e3aai\u5e94\u7528\u5e26\u6765\u9ad8\u6548\u5f02\u6784\u52a0\u901f\u670d\u52a1": 545, "kl": [1, 3, 195, 409, 413, 439, 453, 461, 496, 497, 521, 530, 538, 554], "kl_diverg": 460, "klcalibr": 3, "know": [522, 526, 550], "knowledg": [162, 163, 195, 480, 525, 527, 533, 538, 540], "knowledgedistillationframework": 163, "knowledgedistillationloss": [163, 538], "knowledgedistillationlossconfig": [195, 525, 538, 543], "known": [140, 266, 396, 472, 473, 538, 541, 544, 546, 554], "kriz": 211, "kullback": 497, "kwarg": [2, 30, 90, 107, 108, 109, 110, 111, 112, 113, 114, 117, 118, 119, 120, 122, 141, 145, 171, 195, 211, 223, 225, 234, 235, 236, 238, 240, 242, 243, 244, 262, 277, 279, 281, 292, 293, 294, 297, 298, 363, 364, 365, 366, 367, 368, 369, 372, 373, 374, 375, 377, 390, 398, 399, 418, 420, 429, 431, 433, 437, 439, 441, 448, 449, 452, 462, 463, 465, 466, 523], "kwon": 544, "l": [477, 483, 521, 554], "l1": 195, "l12": 555, "l2": [195, 544], "l6": 555, "l954": 227, "l983": 227, "label": [195, 198, 199, 209, 211, 212, 213, 214, 217, 221, 225, 227, 229, 234, 235, 262, 387, 413, 449, 481, 492, 523, 537, 538, 546, 553], "label_fil": [209, 225, 553], "label_list": [209, 234], "label_map": 537, "label_shap": [213, 387], "label_shift": [221, 553], "labelbalancecocorawfilt": 217, "labelbalancecocorecordfilt": 217, "labelshift": [221, 553], "lack": [523, 529], "lake": [474, 534, 539, 545], "lambada": [475, 552], "lambada_openai": 536, "lambda": [140, 396, 547], "lamini": [475, 544, 552], "land": 545, "languag": [29, 149, 227, 413, 431, 441, 448, 472, 473, 475, 477, 478, 480, 481, 488, 489, 490, 528, 541, 545, 547, 549, 552], "laplacian": 477, "larei": 544, "larg": [29, 30, 149, 413, 472, 473, 475, 477, 478, 480, 481, 488, 489, 523, 538, 541, 545, 547, 549, 552, 555], "larger": [152, 443, 477, 488, 496, 544, 549, 552], "lasso": [189, 533, 544], "lassounbalanc": 555, "last": [145, 169, 192, 195, 413, 472, 474, 475, 477, 523, 539, 546, 549, 552], "last_batch": [200, 202, 204, 208, 387, 523], "last_conv_or_matmul_quant": [195, 546], "latenc": [551, 554], "latency_pattern": 483, "later": [140, 149, 396, 413, 443, 471, 522], "latest": [474, 478, 494, 534, 545, 550], "latin1": [140, 396], "launch": [483, 539], "launcher": 525, "layer": [32, 101, 102, 103, 104, 106, 107, 137, 139, 141, 149, 163, 166, 171, 173, 174, 175, 179, 184, 192, 194, 195, 288, 289, 291, 391, 395, 398, 413, 420, 429, 448, 453, 455, 466, 470, 475, 476, 478, 488, 495, 496, 497, 525, 529, 533, 544, 549, 550], "layer1": [173, 195, 538, 544, 546], "layer2": [173, 538, 544, 546], "layer3": [538, 544], "layer_1": 174, "layer_2": 174, "layer_idx": 192, "layer_initi": 295, "layer_input": 192, "layer_map": [163, 195], "layer_nam": [195, 413, 544], "layer_norm": 528, "layer_tensor": 453, "layer_wis": [394, 465, 547], "layer_wise_qu": [136, 195, 547], "layerhistogramcollector": 453, "layernorm": [55, 327, 488, 552], "layerwisequ": 139, "layout": [39, 311], "layoutlmv3": 555, "lazi": [161, 466], "lazyimport": [161, 466], "ld_library_path": 529, "lead": [195, 474, 477, 481, 488, 522, 538, 539, 544, 549, 552], "leadership": 490, "leaky_relu": 528, "leakyrelu": [40, 312, 552], "learn": [470, 473, 474, 477, 478, 481, 488, 494, 495, 496, 523, 528, 532, 533, 534, 538, 539, 541, 544, 545, 546, 552, 554], "learning_r": [195, 538], "least": [188, 491, 538, 544, 554], "leav": 101, "lee": 544, "left": [221, 225, 488, 552, 553], "legal": [494, 556], "leibler": 497, "len": [135, 195, 225, 526, 553], "length": [184, 195, 209, 225, 230, 418, 448, 477, 488, 537, 545, 546, 549, 553], "less": [40, 133, 145, 195, 312, 391, 433, 466, 520, 538, 544], "let": [497, 530, 544], "level": [31, 156, 173, 267, 281, 463, 472, 473, 488, 490, 541, 552, 554], "levelwis": 173, "leverag": [60, 332, 462, 471, 472, 476, 479, 482, 488, 489, 528, 543, 546, 549, 554], "lib": 529, "libgl": 529, "libgl1": 529, "libglib2": 529, "librari": [174, 226, 257, 448, 468, 474, 481, 494, 534, 538, 539, 545, 546], "licens": 491, "lie": 521, "light": 525, "lightn": 494, "lightweight": [484, 544], "like": [59, 81, 83, 133, 140, 156, 173, 192, 195, 198, 199, 200, 211, 234, 243, 262, 331, 352, 354, 385, 387, 390, 396, 448, 449, 452, 474, 477, 481, 488, 491, 492, 494, 495, 496, 525, 533, 543, 544, 546, 549, 550, 552, 554], "limit": [138, 266, 397, 466, 473, 477, 478, 481, 494, 536, 541, 546, 549], "lin": [477, 488, 549], "line": [496, 522, 526, 533], "linear": [30, 142, 145, 149, 171, 173, 174, 179, 184, 192, 194, 195, 403, 413, 423, 427, 429, 433, 448, 472, 475, 476, 477, 488, 489, 492, 528, 538, 544, 549, 552, 554], "linear2linearsearch": 173, "linear_lay": 184, "linear_pattern": 174, "linearcompress": 174, "linearcompressioniter": 174, "linearli": 30, "link": [195, 209, 234, 262, 472, 478, 489, 521, 528, 549, 555], "linkedin": 545, "linux": [154, 483, 484, 489, 520, 529], "list": [1, 29, 30, 31, 39, 90, 125, 133, 135, 143, 145, 151, 152, 153, 154, 156, 173, 174, 179, 184, 188, 192, 194, 195, 198, 199, 203, 209, 221, 225, 227, 228, 230, 231, 232, 234, 235, 243, 249, 250, 253, 262, 277, 281, 283, 299, 302, 303, 305, 311, 385, 390, 398, 406, 413, 417, 418, 420, 433, 438, 439, 448, 449, 453, 458, 459, 466, 472, 475, 478, 480, 481, 492, 494, 495, 528, 530, 534, 536, 538, 542, 544, 546, 548, 552, 554], "liter": 281, "littl": 489, "llama": [475, 484, 489, 494, 536, 544, 545, 547, 549, 552], "llama2": 494, "llamanorm": 552, "llm": [125, 126, 283, 286, 420, 439, 472, 473, 475, 476, 477, 480, 488, 489, 531, 541, 544, 545, 547, 549, 552], "llm_weight_minmax": [92, 357], "lm": [477, 544, 555], "lm_head": [477, 544, 549], "lm_head_config": 477, "ln": 529, "lnl": 489, "load": [133, 138, 140, 141, 160, 209, 224, 225, 235, 243, 262, 385, 390, 395, 397, 398, 408, 411, 412, 413, 415, 417, 431, 441, 445, 448, 465, 466, 472, 481, 489, 496, 523, 529, 546, 547, 552], "load_and_cache_exampl": 209, "load_config_map": 160, "load_data_from_pkl": 466, "load_empty_model": [141, 398, 448, 477, 484, 547], "load_entri": 440, "load_huggingfac": [460, 538], "load_layer_wise_quantized_model": [141, 398], "load_modul": 398, "load_saved_model": [243, 390], "load_state_dict": [140, 396], "load_tensor": [141, 398], "load_tensor_from_shard": [141, 398], "load_valu": 398, "load_vocab": 224, "load_weight_onli": 465, "loadannot": 230, "loaded_model": [477, 489], "loader": [1, 125, 198, 199, 235, 262, 283, 301, 305, 431, 449, 462, 523, 546], "loadformat": [431, 445], "loc": [140, 396], "local": [175, 195, 431, 441, 466, 477, 479, 494, 529, 534, 544, 551], "local_config": [192, 195], "local_config_fil": [32, 288, 289], "locat": [140, 146, 192, 225, 396, 413, 476, 481, 526, 546, 550, 553], "lock": [185, 533, 543, 544], "log": [131, 151, 159, 161, 173, 249, 250, 253, 383, 393, 413, 463, 483, 492, 551, 554], "log2": [473, 541], "log_fil": [151, 256, 260], "log_interv": 526, "log_process": 161, "log_quantizable_layers_per_transform": 420, "logfile_dict": 154, "logger": [161, 453, 460, 466], "logic": [421, 425, 443, 478], "logical_cpu": 154, "login": 551, "loglevel": 554, "logo": 535, "long": [225, 477, 522, 529, 549, 553], "long_str": 522, "longer": [209, 225, 418, 484, 553], "longest": [225, 553], "look": [133, 184, 480, 495, 497, 530, 537, 552], "lookup": 173, "loop": [449, 492, 496, 497, 530, 551, 554], "loss": [29, 153, 163, 195, 234, 449, 474, 476, 477, 481, 488, 521, 525, 526, 527, 536, 537, 538, 539, 543, 544, 545, 546, 549, 552, 554], "loss_func": [170, 188], "loss_sum": 538, "loss_typ": [163, 195, 538], "loss_weight": [163, 195, 538], "lossi": [488, 546], "lot": [488, 492, 552], "low": [30, 198, 199, 212, 213, 235, 387, 472, 474, 477, 482, 488, 489, 495, 496, 520, 521, 526, 538, 539, 545, 546, 549, 552, 554], "low_cpu_mem_usag": 418, "low_gpu_mem_usag": [418, 439, 477], "low_memory_usag": 195, "lower": [224, 225, 232, 267, 413, 471, 472, 473, 476, 481, 488, 525, 541, 544, 545, 546, 552, 553, 554], "lowerbitssampl": 277, "lowercas": 209, "lowest": [544, 554], "lp_norm": 425, "lpot": [545, 550], "lr": [195, 418, 439, 452, 477, 526, 538], "lr_schedul": [418, 439, 477, 538, 544], "lstm": 16, "lstmoper": 17, "lt": 555, "lvwerra": 555, "lwq": 477, "m": [30, 135, 177, 178, 195, 413, 433, 483, 491, 534, 544, 555], "machin": [154, 227, 477, 481, 484, 528, 534, 545, 552], "maco": 534, "made": [269, 488, 495, 546, 550, 554], "mae": [234, 537], "magnitud": [169, 191, 195, 234, 413, 533, 544], "magnitude_progress": 195, "magnitudecriterion": [169, 191], "mahonei": 544, "mai": [3, 133, 138, 140, 281, 385, 396, 397, 472, 474, 477, 478, 480, 488, 489, 490, 491, 494, 496, 521, 522, 528, 529, 535, 539, 545, 546, 549, 552, 554], "mail": 490, "main": [3, 165, 184, 188, 301, 302, 305, 420, 437, 438, 442, 443, 477, 478, 479, 481, 483, 484, 488, 492, 523, 526, 538, 544, 546, 549], "mainli": [162, 190, 488, 494, 531, 538, 544, 546], "mainstream": [470, 494], "maintain": [234, 476, 477, 481, 488, 490, 491, 522, 540, 544, 549, 550], "mainten": 495, "major": [488, 532, 546, 552], "make": [30, 90, 128, 150, 175, 180, 188, 190, 207, 380, 466, 474, 475, 477, 480, 488, 490, 494, 495, 496, 520, 522, 523, 530, 537, 538, 544, 546, 547, 548, 549, 551, 552, 554], "make_dquant_nod": 30, "make_matmul_weight_only_nod": 31, "make_modul": 1, "make_nam": 90, "make_nc_model": 1, "make_nod": 30, "make_onnx_inputs_output": 90, "make_onnx_shap": 90, "make_quant_nod": 30, "make_sub_graph": 29, "make_symbol_block": 1, "makeiter": [133, 385], "male": 466, "malici": [140, 396], "manag": [152, 449, 455, 538], "mandatori": [198, 199, 262, 538], "mani": [31, 234, 262, 433, 477, 481, 488, 522, 523, 534, 549, 554], "manipul": [87, 88], "manner": [523, 540], "manual": [211, 544], "mao": 521, "map": [1, 30, 90, 133, 140, 145, 160, 195, 225, 229, 231, 232, 234, 396, 427, 442, 457, 459, 477, 488, 494, 522, 526, 537, 538, 544, 546, 549], "map_kei": 234, "map_loc": [140, 396], "map_numpy_to_onnx_dtyp": 90, "map_onnx_to_numpy_typ": 90, "map_point": [230, 234, 537], "map_tensorflow_dtyp": 90, "mar": 545, "mark": 521, "marketplac": [494, 545], "mask": [169, 175, 177, 180, 182, 186, 187, 209, 230, 544, 555], "mask_padding_with_zero": 209, "massiv": 544, "master": [3, 177, 178, 188, 227, 228, 231, 232, 234, 262, 538, 554], "match": [63, 87, 140, 173, 230, 231, 335, 396, 406, 476, 481, 483, 494, 549], "match_datatype_pattern": 145, "math": [51, 323, 488, 546], "mathemat": [475, 480, 488, 552], "matmul": [16, 31, 38, 44, 50, 58, 79, 94, 113, 122, 179, 195, 303, 310, 316, 322, 330, 350, 359, 368, 377, 530, 549, 554], "matmul_weight_only_nod": 31, "matmulfpq4": 31, "matmulnbit": 31, "matmuloper": 18, "matric": [488, 552], "matrix": [31, 234, 262, 480, 488], "matter": [207, 548], "max": [30, 89, 128, 150, 195, 225, 231, 232, 266, 380, 413, 433, 439, 466, 473, 477, 481, 488, 494, 534, 538, 541, 544, 546, 549, 552, 553, 554], "max_answer_length": [225, 553], "max_dim": [225, 553], "max_filter_tensor": 466, "max_grad_norm": 538, "max_inclusive_opset_vers": 89, "max_input_chars_per_word": 224, "max_length": 209, "max_min_data": [74, 75, 346], "max_new_token": 489, "max_num_class": 230, "max_ord": 228, "max_output": 492, "max_query_length": [225, 553], "max_seq_length": [209, 225, 420, 553], "max_shard_s": 431, "max_sparsity_ratio_per_op": [175, 180, 195, 538, 544], "max_trial": [153, 195, 474, 482, 538, 554], "max_x": 413, "maxab": [439, 472], "maxabs_hw": [439, 472], "maxabs_hw_opt_weight": 472, "maxabs_pow2": 472, "maxim": [538, 544, 554], "maximum": [40, 153, 175, 180, 195, 209, 225, 228, 243, 266, 312, 390, 413, 425, 431, 477, 488, 497, 521, 538, 544, 546, 552, 553], "maxpool": [16, 114, 123, 297, 369, 378, 530], "maxpooling2d": 297, "maxpooloper": 19, "mbzuai": [475, 552], "mckinstri": 521, "md": [177, 178, 195, 234, 262], "md5": 211, "mean": [29, 31, 175, 184, 195, 221, 225, 232, 234, 271, 413, 425, 431, 441, 472, 477, 479, 488, 492, 496, 497, 523, 526, 530, 537, 538, 544, 546, 547, 549, 552, 553, 554], "mean_valu": [221, 553], "meaning": [538, 543], "meanwhil": 547, "measur": [133, 235, 385, 439, 461, 472, 477, 481, 488, 495, 520, 531, 537, 538, 549, 554], "measure_exclud": [439, 472], "mechan": [138, 169, 397, 470, 544, 551], "media": [490, 545], "median": [125, 283], "medium": [494, 545], "meet": [195, 201, 263, 265, 477, 479, 481, 482, 488, 492, 496, 542, 545, 546, 549, 554], "member": [490, 495], "memomeri": 139, "memori": [59, 145, 245, 331, 413, 433, 466, 472, 473, 474, 475, 477, 478, 480, 483, 488, 489, 521, 523, 525, 538, 539, 541, 542, 544, 546, 547, 549, 552, 554, 555], "mention": [477, 488, 544, 549], "merg": [93, 202, 278, 358, 387, 483, 554], "merge_duplicated_qdq": [91, 356], "mergeduplicatedqdqoptim": [93, 358], "mesa": 529, "messag": [90, 131, 383, 483, 491, 554], "met": [153, 482, 488, 530, 546], "meta": [81, 352, 426, 484, 489, 494, 536, 545], "meta_info": 426, "meta_op_optim": [80, 351], "metaclass": 274, "metadata": [140, 396], "metagraphdef": [243, 390], "metainfochangingmemopoptim": [81, 352], "metal": [494, 529, 534], "metaop": [81, 352], "meteor": 534, "method": [31, 126, 128, 138, 140, 145, 189, 195, 203, 207, 208, 209, 211, 218, 225, 278, 280, 286, 380, 392, 396, 397, 417, 431, 441, 455, 460, 462, 463, 472, 477, 478, 479, 481, 488, 492, 494, 521, 522, 523, 525, 537, 538, 543, 544, 546, 548, 549, 552, 553, 554], "meticul": [473, 541], "metric": [153, 162, 195, 198, 199, 226, 235, 262, 449, 455, 470, 480, 495, 496, 526, 538, 539, 546, 551, 552, 554, 555], "metric_cl": [234, 262, 538], "metric_criterion": 245, "metric_fn": [231, 232], "metric_max_over_ground_truth": [231, 232], "metric_registri": 234, "metric_typ": 234, "metric_weight": 245, "mha": [176, 183, 544], "mha_compress": 184, "mha_head_s": 184, "mha_modul": 184, "mha_nam": 184, "mha_scor": 184, "mha_spars": [171, 544], "mhacompress": 184, "microcod": 555, "microsc": 478, "microsoft": [3, 473, 489, 494, 495, 541], "middl": [488, 552], "migacz": 521, "might": [192, 474, 492, 522, 554], "migrat": [470, 475, 480, 488, 552], "mimic": [488, 546], "min": [128, 150, 195, 380, 433, 466, 473, 477, 481, 488, 541, 544, 546, 549, 552, 553, 554], "min_dim": [225, 553], "min_filter_tensor": 466, "min_max": 425, "min_sparsity_ratio_per_op": [175, 195, 538, 544], "min_train_sampl": 195, "min_x": 413, "mini": [489, 555], "minilm": [545, 555], "minim": [128, 150, 266, 380, 472, 476, 477, 481, 488, 496, 497, 521, 527, 537, 538, 544, 546, 554], "minimum": [175, 195, 413, 425, 448, 473, 497, 521, 538, 541, 544, 552, 554], "minmax": [1, 3, 303, 409, 413, 439, 476, 496, 497, 521, 530, 546], "minmax_file_path": 466, "minmax_lr": [418, 439, 452, 477], "minmaxcalibr": 3, "miou": 234, "misc": [138, 397, 535], "miss": [488, 544, 546], "mistral": 536, "mistralai": 536, "mitig": [477, 549], "mix": [134, 195, 235, 264, 278, 400, 437, 439, 470, 478, 494, 495, 501, 531, 533], "mix_precis": [195, 226, 281, 538, 539], "mixed_precis": [235, 394, 538], "mixed_precision_entri": 437, "mixed_precision_model": 134, "mixedprecis": [195, 538], "mixedprecisionconfig": [195, 235, 281, 437, 439, 474, 538, 539], "mixin": 452, "mixprecisionconfig": 437, "ml": 545, "mla": [495, 539, 546], "mleffici": 545, "mlp": [478, 544], "mlperf": [545, 555], "mm": 555, "mnist": [211, 526], "mnli": [209, 537, 555], "mobil": [538, 555], "mobilebert": [209, 555], "mobilenet": [528, 551, 555], "mobilenetv2": 555, "mobiusml": [439, 477], "mod": 427, "mod_dict": 439, "mode": [28, 29, 30, 95, 140, 157, 161, 218, 230, 243, 278, 360, 390, 396, 404, 413, 437, 439, 442, 448, 465, 466, 472, 477, 478, 495, 496, 533, 546, 548, 549, 554], "model": [1, 2, 3, 16, 28, 29, 30, 31, 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 54, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 94, 101, 103, 125, 126, 127, 133, 134, 135, 139, 140, 141, 143, 144, 145, 149, 151, 153, 156, 162, 170, 171, 173, 180, 181, 182, 185, 187, 188, 190, 192, 195, 198, 199, 205, 208, 209, 211, 225, 226, 234, 235, 245, 256, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 280, 281, 283, 286, 288, 301, 302, 305, 306, 307, 308, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 326, 328, 329, 330, 331, 332, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 345, 346, 347, 348, 349, 350, 352, 353, 354, 355, 357, 358, 359, 385, 388, 390, 391, 392, 396, 398, 406, 408, 411, 412, 413, 415, 417, 420, 431, 432, 433, 435, 437, 438, 441, 442, 448, 449, 450, 457, 458, 459, 462, 465, 466, 468, 470, 471, 472, 473, 474, 477, 478, 479, 480, 481, 484, 488, 489, 491, 492, 495, 497, 501, 520, 521, 522, 523, 525, 526, 527, 531, 532, 533, 535, 537, 539, 542, 543, 545, 546, 550, 551, 553, 554], "model_attr": 156, "model_forward": [413, 433], "model_forward_per_sampl": 413, "model_info": 439, "model_level": 156, "model_loss": 554, "model_nam": [101, 195, 494], "model_name_or_path": [209, 431, 441, 489, 494, 538, 547], "model_origin": [195, 262, 548], "model_path": [125, 133, 283, 385, 420, 439, 477], "model_proto": 90, "model_slim": [170, 184], "model_slim_ffn2": 171, "model_slim_mha": 171, "model_state_dict_path": [477, 484], "model_typ": [209, 466], "model_wis": 538, "model_wrapp": [2, 136, 388], "modeling_util": 448, "modelproto": [31, 235, 457, 540], "models": [195, 245, 538, 542], "modelwisetuningsampl": 277, "modern": [477, 488, 549], "modif": [491, 495, 530], "modifi": [184, 211, 261, 280, 472, 492, 497, 526, 530, 544], "modified_pickl": [137, 395], "modul": [136, 137, 155, 158, 170, 176, 183, 395, 407, 410, 414, 424, 428, 434, 447, 470, 472, 474, 477, 478, 482, 489, 492, 494, 502, 522, 523, 531, 533, 537, 538, 540, 544, 548, 549], "module_debug_level1": 281, "module_hook_config": [145, 433], "module_nam": [141, 161, 169, 170, 180, 181, 182, 183, 185, 186, 187, 189, 191, 398, 448, 466], "module_name_list": [145, 433], "module_node_map": 459, "module_typ": 420, "module_wrapp": 400, "modulelist": 420, "mold": 153, "momentum": [169, 533, 538, 544], "momentumbalanc": 555, "momentumunbalanc": 555, "monitor": [153, 442, 478], "more": [29, 133, 149, 156, 177, 178, 225, 385, 413, 439, 470, 472, 473, 474, 475, 477, 478, 481, 488, 489, 493, 494, 496, 521, 522, 526, 528, 533, 534, 536, 538, 539, 541, 542, 543, 544, 548, 549, 552, 554, 555], "mosaicml": [475, 552], "mose": 227, "mosesdecod": 227, "mosh": 544, "most": [195, 234, 472, 474, 477, 481, 488, 538, 539, 544, 546, 549, 552, 554, 555], "mostli": 522, "motiv": 489, "move": [63, 140, 335, 396, 413, 433, 448, 477, 488, 492, 549, 550], "move_input_devic": 145, "move_input_to_devic": [413, 433], "move_squeeze_after_relu": [61, 333], "movesqueezeafterreluoptim": [63, 335], "mp": 481, "mpi": 554, "mpirun": 554, "mpt": [475, 544, 552], "mrpc": [209, 234, 537, 544, 554, 555], "mscoco": 230, "mse": [31, 145, 195, 234, 262, 270, 466, 477, 488, 537, 549, 550], "mse_metric_gap": 466, "mse_v2": [195, 270], "mse_v2tunestrategi": 272, "mseloss": [488, 552], "msetunestrategi": 271, "msfp": [473, 541], "msft": 545, "msg": 463, "mt": 555, "mteval": 227, "mtl": 489, "much": [162, 169, 195, 225, 488, 552, 553], "mul": [40, 50, 51, 54, 149, 312, 322, 323, 326, 477, 528, 549, 552], "mullinear": [142, 429], "multi": [151, 154, 171, 173, 184, 230, 234, 262, 483, 523, 533, 538, 542, 544, 546, 554], "multi_object": 542, "multiclass": 537, "multilabel": 537, "multilingu": 555, "multimod": 448, "multiobject": 245, "multipl": [152, 165, 171, 196, 197, 201, 202, 209, 210, 212, 213, 214, 215, 216, 220, 222, 225, 235, 236, 237, 239, 245, 481, 520, 528, 531, 538, 543, 544, 551, 552, 554], "multipli": [3, 195, 471, 473, 477, 541, 549], "must": [225, 230, 452, 491, 492, 496, 520, 523, 534, 546, 553], "mx": [1, 402, 404, 439, 473, 478, 494, 531, 541], "mx_quant": 394, "mx_quant_entri": 437, "mx_spec": [403, 404], "mxfp4": [473, 541], "mxfp6": [473, 541], "mxfp8": [473, 541], "mxint8": [473, 541], "mxlinear": 403, "mxnet": [0, 1, 3, 195, 204, 208, 211, 214, 218, 225, 234, 235, 240, 262, 495, 496, 521, 523, 527, 530, 533, 538, 539, 540, 554], "mxnet_model": 237, "mxnetcifar10": 211, "mxnetcifar100": 211, "mxnetcropresizetransform": 225, "mxnetcroptoboundingbox": 225, "mxnetdataload": 204, "mxnetdataset": 211, "mxnetfashionmnist": 211, "mxnetfilt": 218, "mxnetimagefold": 211, "mxnetimagenetraw": 214, "mxnetmetr": 234, "mxnetmnist": 211, "mxnetmodel": 240, "mxnetnormalizetransform": 225, "mxnettransform": 225, "mxnettranspos": 225, "mxquantconfig": [437, 439, 473, 541], "mxquantiz": 403, "my": 491, "mydataload": [479, 481, 482], "n": [177, 178, 195, 210, 225, 228, 281, 488, 497, 522, 536, 544, 546, 552, 553], "n_best_siz": [225, 553], "n_bit": [488, 552], "n_block": [477, 549], "n_gpu": 538, "n_iter": 266, "n_pack": 444, "n_sampl": [31, 145, 413, 452, 477], "n_warmup": 266, "na": [195, 239, 481, 533, 555], "name": [1, 30, 39, 52, 53, 55, 89, 90, 95, 101, 125, 133, 135, 140, 141, 144, 145, 146, 151, 152, 153, 156, 160, 166, 169, 173, 175, 180, 183, 184, 188, 189, 190, 191, 192, 194, 195, 209, 210, 211, 214, 218, 223, 225, 229, 234, 239, 243, 245, 262, 278, 280, 283, 292, 293, 297, 311, 324, 325, 327, 360, 385, 390, 391, 396, 398, 403, 412, 413, 417, 418, 420, 427, 433, 439, 442, 443, 446, 448, 453, 455, 457, 458, 459, 466, 472, 473, 476, 477, 478, 479, 491, 492, 497, 522, 526, 528, 530, 535, 537, 538, 540, 541, 544, 546, 550, 551, 554, 555], "namecollector": 1, "named_paramet": 526, "namespac": 522, "namhoon": 544, "nan": [41, 313], "narrow": [473, 541], "narrow_rang": 98, "nasconfig": 195, "nation": [211, 490], "nativ": 497, "natur": [227, 477, 528], "nbest_predict": [225, 553], "nbit": 426, "nblock": [418, 439], "nbsp": 554, "nc": [551, 554], "nc_model": 1, "nc_resnet50_v1": 526, "nc_workspac": 195, "nchw": [39, 221, 311], "ncmodel": 1, "ndarrai": [1, 29, 30, 52, 53, 55, 225, 324, 325, 327, 444, 453, 529, 553], "ndarray_to_devic": 1, "nearest": [173, 225, 439, 477, 478, 488, 549, 553], "nearst": 31, "necessari": [30, 188, 490, 496, 540, 544, 551, 554], "necessarili": [138, 397], "need": [1, 29, 90, 94, 151, 156, 173, 188, 195, 198, 199, 200, 207, 208, 211, 218, 225, 234, 235, 245, 262, 271, 359, 387, 404, 411, 413, 431, 441, 449, 459, 465, 466, 477, 480, 482, 484, 488, 489, 492, 495, 522, 523, 526, 529, 533, 538, 544, 546, 547, 548, 549, 550, 552, 553, 554], "need_appli": [305, 442], "need_spac": 413, "neelnanda": [418, 452], "neither": 546, "neo": 531, "neox": 536, "nepoch": 538, "nest": [145, 391, 417, 466], "nesterov": 538, "net": [241, 391, 494], "netflix": 545, "nets_factori": 237, "network": [135, 169, 175, 176, 269, 439, 448, 473, 474, 488, 521, 525, 528, 538, 541, 545, 546, 552, 554], "neural": [1, 135, 151, 154, 162, 165, 175, 176, 177, 178, 188, 195, 221, 222, 225, 226, 233, 234, 235, 239, 246, 262, 269, 270, 276, 290, 302, 303, 304, 305, 389, 392, 394, 436, 437, 438, 439, 440, 441, 442, 445, 446, 447, 448, 450, 452, 456, 468, 470, 472, 473, 474, 475, 476, 477, 478, 479, 481, 482, 483, 485, 488, 489, 491, 495, 496, 497, 520, 521, 522, 524, 525, 526, 527, 528, 529, 530, 531, 532, 535, 536, 539, 540, 541, 542, 543, 545, 546, 549, 550, 552, 553, 554, 555], "neural_compressor": [471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 484, 489, 492, 494, 497, 520, 522, 523, 525, 526, 528, 531, 532, 537, 538, 539, 540, 541, 542, 543, 544, 546, 547, 548, 550, 551, 552, 553, 554], "neurip": 545, "neuron": 544, "never": [140, 396, 554], "nevertheless": [473, 541], "new": [1, 3, 31, 133, 151, 200, 211, 225, 245, 385, 387, 392, 413, 433, 443, 452, 470, 474, 475, 476, 477, 488, 491, 496, 526, 538, 539, 544, 545, 546, 549, 550, 551, 553], "new_api": [33, 34, 56, 57, 62, 64, 77, 84, 116, 121, 128, 306, 328, 329, 334, 336, 348, 371, 376, 380], "new_dtyp": 30, "new_func": [133, 385], "new_graph_def": [133, 385], "new_in_featur": 444, "new_init": 31, "new_metr": 537, "new_modul": [141, 145, 398, 413, 433, 448], "new_quantized_nam": 30, "newapi": [79, 350], "newdataload": 523, "newli": [538, 544], "newlin": [232, 466], "newmetr": 537, "next": [45, 203, 281, 317, 387, 477, 488, 494, 496, 497, 523, 549, 554], "next_annotation_id": 230, "next_tune_cfg": 554, "nextplatform": 545, "nf4": [433, 477, 549], "nfl": 232, "ngram": [227, 537], "nhwc": [39, 221, 311], "ni_workload_nam": 195, "ninm": 176, "nll_loss": 526, "nlp": [188, 195, 474, 478, 481, 488, 544, 546], "nn": [141, 142, 145, 173, 174, 184, 194, 195, 235, 262, 398, 408, 412, 413, 417, 420, 427, 429, 431, 433, 435, 437, 438, 441, 442, 448, 459, 462, 465, 472, 477, 478, 488, 492, 538, 540, 549, 552], "nncf": 135, "no_absorb_lay": [145, 433], "node": [1, 29, 30, 31, 35, 39, 40, 41, 42, 45, 47, 49, 52, 53, 55, 59, 62, 65, 67, 68, 69, 83, 87, 88, 90, 95, 117, 125, 133, 154, 173, 243, 261, 283, 307, 311, 312, 313, 314, 317, 319, 321, 324, 325, 327, 331, 334, 337, 339, 340, 341, 354, 360, 372, 385, 390, 406, 413, 433, 443, 457, 459, 483, 495, 526, 547, 554, 555], "node1": 526, "node2": 526, "node_candidate_list": 406, "node_collector": 135, "node_def": [52, 53, 55, 324, 325, 327], "node_from_map": [52, 53, 55, 324, 325, 327], "node_index": 154, "node_list": 406, "node_map": [52, 53, 55, 324, 325, 327], "node_nam": [52, 53, 55, 133, 243, 251, 324, 325, 327, 390, 496], "node_name_from_input": [52, 53, 55, 324, 325, 327], "node_name_list": [131, 383], "node_op": 496, "node_set_from_user_config": 406, "nodedef": [52, 53, 55, 324, 325, 327], "non": [3, 466, 472, 474, 477, 492, 549, 554], "nondigit_punct_r": 227, "none": [1, 28, 30, 31, 32, 33, 34, 48, 74, 87, 89, 90, 95, 101, 127, 133, 135, 139, 140, 141, 142, 145, 151, 152, 153, 156, 161, 162, 163, 169, 170, 171, 173, 177, 188, 192, 195, 198, 199, 200, 202, 204, 207, 209, 210, 211, 212, 213, 214, 216, 221, 225, 230, 234, 235, 245, 256, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 278, 281, 284, 288, 289, 292, 293, 294, 297, 298, 299, 301, 302, 303, 305, 306, 320, 346, 360, 385, 387, 391, 392, 396, 398, 403, 404, 405, 413, 417, 418, 420, 423, 426, 429, 431, 432, 433, 435, 438, 439, 441, 442, 448, 449, 452, 453, 455, 458, 459, 462, 465, 466, 472, 473, 477, 478, 481, 483, 489, 494, 495, 496, 523, 537, 541, 546, 549, 553, 554], "nor": 546, "norm": [16, 477], "normal": [3, 20, 225, 232, 477, 488, 538, 542, 549, 552, 553], "normalfloat": [477, 549], "normalizationoper": 20, "normalize_answ": 232, "normalizetftransform": 225, "normalizetransform": 225, "not_use_best_ms": [418, 439, 477], "notat": [195, 466], "note": [40, 138, 153, 179, 230, 272, 312, 392, 397, 470, 471, 472, 474, 475, 476, 477, 480, 483, 488, 489, 494, 496, 497, 522, 523, 526, 528, 530, 531, 534, 536, 538, 544, 546, 549, 552, 554], "notebook": 470, "noteworthi": 521, "noth": [230, 554], "notic": [128, 150, 380, 474, 477, 535, 539, 550], "notimplementederror": 448, "nov": 545, "novel": 525, "now": [195, 225, 489, 492, 497, 526, 553, 554], "np": [30, 225, 266, 526, 552, 553, 554], "np_dtype": 90, "npu": [195, 546], "npy": 210, "npy_dir": 210, "npz": 211, "nr": 90, "nsampl": [418, 420, 439, 448, 549], "nsdf3": 211, "nuanc": 477, "num": [433, 466, 489], "num_beam": 489, "num_bin": [1, 3, 453], "num_bit": [31, 98, 142, 145, 413, 429, 488, 552], "num_c": 483, "num_class": 234, "num_cor": [209, 210, 214], "num_cores_on_numa": 483, "num_cores_per_inst": [154, 483], "num_correct": 234, "num_cpu": 154, "num_detect": [230, 234, 537, 538], "num_gt_box": 230, "num_i": 483, "num_inst": [154, 483], "num_of_inst": [151, 195, 520, 538], "num_of_process": 526, "num_quantized_bin": 3, "num_replica": 526, "num_sampl": 234, "num_train_epoch": [538, 544], "num_work": [200, 202, 204, 387, 523, 538, 546], "numa": [154, 483], "numa_index": 154, "numa_info": 154, "numa_node_index": 154, "numactl": [151, 154, 529], "numba": 444, "number": [3, 31, 52, 53, 90, 125, 145, 152, 153, 161, 171, 177, 178, 195, 207, 210, 221, 225, 231, 232, 234, 257, 261, 266, 281, 283, 324, 325, 387, 413, 418, 426, 433, 443, 448, 455, 466, 471, 477, 482, 483, 488, 520, 523, 526, 537, 544, 546, 549, 551, 552, 553, 554], "number_of_process": 554, "numer": [195, 234, 473, 474, 477, 481, 497, 539, 541, 545, 546, 549], "numpi": [29, 30, 52, 53, 55, 90, 133, 225, 230, 324, 325, 327, 444, 529, 552, 553], "nvidia": [494, 521, 533, 534, 555], "nxm": [176, 186, 195, 544], "o": [140, 281, 396, 494, 522, 534], "o0": [267, 554], "o1": 554, "obj": [1, 90, 170, 192, 208, 235, 245, 262, 449, 455, 466], "obj1": 466, "obj2": 466, "obj_cl": 245, "obj_criterion": 245, "obj_weight": 245, "object": [1, 29, 30, 31, 59, 71, 90, 101, 117, 133, 134, 135, 138, 140, 144, 145, 151, 153, 160, 162, 169, 170, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 195, 198, 199, 200, 209, 210, 211, 225, 226, 230, 234, 235, 243, 244, 257, 261, 262, 266, 280, 331, 343, 372, 385, 387, 390, 391, 396, 397, 399, 406, 409, 411, 413, 417, 433, 448, 449, 452, 455, 465, 466, 470, 476, 478, 481, 492, 496, 501, 520, 522, 523, 529, 537, 538, 540, 543, 544, 546, 553, 554, 555], "object_detect": [195, 230], "objective_cfg": 245, "objective_cl": 245, "objective_custom_registri": 245, "objective_registri": 245, "oblig": 490, "observ": [145, 417, 439, 442, 471, 472, 476, 477, 478, 497, 549, 551, 554], "obstacl": [473, 541], "obtain": [171, 173, 182, 187, 189, 192, 488, 492, 496, 544, 551, 552, 554], "occupi": [473, 541], "occur": 476, "ocp": [473, 541], "oct": 545, "off": [3, 52, 53, 55, 324, 325, 327, 477, 488, 491, 549], "offens": 490, "offer": [473, 477, 481, 541], "offici": [133, 227, 231, 232, 385, 490, 552], "offlin": [466, 475, 480, 481, 488, 490, 538, 546, 552], "offset_height": [225, 553], "offset_width": [225, 553], "ofir": 544, "often": [192, 477, 523, 543, 544], "old": [391, 466, 475, 538, 550], "old_hist": [391, 466], "oliv": [494, 545], "omit": [472, 549], "omp": 489, "omp_num_thread": 484, "ompi_mca_btl_vader_single_copy_mechan": 494, "on_after_compute_loss": [449, 525, 538, 543], "on_after_optimizer_step": [538, 544], "on_before_optimizer_step": [449, 525, 538, 543, 544], "on_epoch_begin": [449, 455, 525, 538, 543], "on_epoch_end": [449, 455, 525, 538, 543], "on_step_begin": [449, 455, 525, 538, 543, 544], "on_step_end": [449, 455, 525, 538, 543], "on_train_begin": [449, 492, 525, 538, 543, 544, 546], "on_train_end": [449, 525, 538, 543, 544, 546], "onc": [133, 140, 165, 190, 263, 265, 385, 396, 481, 489, 496, 497, 523, 544, 545, 554, 555], "one": [31, 94, 95, 100, 140, 145, 151, 175, 179, 184, 187, 188, 190, 225, 227, 230, 234, 359, 360, 396, 413, 417, 433, 474, 477, 478, 481, 482, 483, 488, 495, 521, 525, 526, 530, 533, 534, 537, 538, 539, 542, 543, 544, 546, 547, 549, 550, 552, 553, 554], "oneapi": [470, 474, 489, 534, 545], "onednn": [474, 481, 539, 546], "onednn_max_cpu_isa": 474, "ones": [140, 230, 396, 544], "oneshotschedul": 190, "onli": [29, 31, 39, 48, 55, 71, 94, 100, 128, 140, 149, 150, 151, 165, 170, 176, 183, 195, 209, 262, 272, 280, 281, 311, 320, 327, 343, 359, 380, 396, 413, 428, 429, 431, 432, 433, 439, 441, 459, 466, 470, 472, 474, 478, 482, 483, 489, 495, 496, 497, 520, 521, 522, 523, 526, 529, 530, 531, 536, 538, 539, 544, 545, 546, 547, 552, 554], "onlin": [477, 490, 551], "onnx": [2, 3, 16, 28, 30, 31, 72, 127, 149, 195, 205, 235, 242, 257, 457, 458, 459, 464, 491, 494, 495, 496, 498, 523, 527, 528, 530, 533, 539, 540, 541, 545, 549, 552, 554], "onnx_graph": 86, "onnx_ml_pb2": [235, 540], "onnx_model": [237, 256], "onnx_nod": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 86], "onnx_qlinear_to_qdq": 457, "onnx_quant": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27], "onnx_schema": 86, "onnx_typ": 90, "onnxbilinearimagenettransform": 221, "onnxcommunitymeetup2023": 545, "onnxgraph": 87, "onnxmodel": [31, 242, 256], "onnxnod": 88, "onnxopschema": 89, "onnxprofilingpars": 249, "onnxqlinear2qdq": 195, "onnxqlinear2qdqconfig": 195, "onnxresizecropimagenettransform": 221, "onnxrt": [29, 30, 31, 209, 211, 218, 225, 234, 256, 464, 495, 539, 546, 555], "onnxrt_cuda_ep": [195, 539, 546], "onnxrt_dataload": 256, "onnxrt_dml_ep": [195, 546], "onnxrt_dnnl_ep": [195, 539, 546], "onnxrt_integ": 538, "onnxrt_integerop": [211, 218, 225], "onnxrt_qdq": [211, 218], "onnxrt_qlinear": 538, "onnxrt_qlinearop": [211, 218, 225], "onnxrt_qlinearopsadaptor": 495, "onnxrt_trt_ep": [195, 539, 546], "onnxrtaug": 2, "onnxrtbertdataload": 205, "onnxrtbertdataset": 209, "onnxrtcroptoboundingbox": 225, "onnxrtdataload": [205, 256], "onnxrtglu": 234, "onnxrtimagenetdataset": 214, "onnxrtitdataset": 211, "onnxrtitfilt": 218, "onnxrtitmetr": 234, "onnxrtittransform": 225, "onnxrtparserfactori": 248, "onnxrtqldataset": 211, "onnxrtqlfilt": 218, "onnxrtqlmetr": 234, "onnxrtqltransform": 225, "onnxruntim": [3, 195, 205, 211, 218, 257, 495, 521, 523, 533, 534, 539], "onto": [140, 396], "op": [1, 16, 36, 38, 50, 51, 52, 53, 54, 55, 56, 57, 59, 63, 66, 70, 73, 76, 77, 78, 79, 81, 90, 92, 94, 108, 109, 110, 116, 118, 119, 120, 121, 126, 128, 131, 133, 135, 144, 145, 149, 173, 195, 257, 261, 267, 269, 271, 278, 279, 280, 286, 308, 310, 322, 323, 324, 325, 326, 327, 328, 329, 331, 335, 338, 342, 345, 347, 348, 349, 350, 352, 357, 359, 363, 364, 365, 371, 373, 374, 375, 376, 380, 383, 385, 412, 413, 417, 433, 437, 448, 457, 459, 461, 466, 474, 475, 478, 488, 489, 495, 496, 497, 530, 538, 539, 544, 546, 548, 550, 552, 554], "op_block_lst": 277, "op_cfg": 139, "op_defin": 251, "op_dict": 538, "op_dtyp": 277, "op_dtype_dict": 277, "op_infos_from_cfg": [145, 412, 413, 417], "op_level": 156, "op_list": 495, "op_nam": [145, 195, 279, 413, 417, 433, 448, 466, 467, 476, 496, 538, 544], "op_name_dict": [195, 476, 538, 546, 554], "op_name_or_module_typ": [152, 299, 303, 439], "op_name_typ": 278, "op_quant_mod": 279, "op_registri": 21, "op_run": 251, "op_typ": [21, 125, 126, 133, 194, 279, 283, 286, 303, 413, 417, 475, 476], "op_type_dict": [195, 476, 497, 546, 549, 554], "op_type_level": 156, "op_types_to_quant": 28, "op_user_cfg": 280, "op_user_cfg_modifi": 280, "op_wis": 538, "op_wise_config": [92, 116, 121, 357, 371, 376, 496], "op_wise_sequ": [116, 121, 371, 376], "open": [140, 226, 396, 455, 468, 490, 494, 528, 529, 538, 545], "openai": [475, 552], "opencv": 529, "opentri": 466, "openvinotoolkit": 135, "oper": [4, 30, 89, 95, 125, 133, 152, 156, 173, 174, 179, 195, 221, 257, 261, 283, 299, 360, 385, 406, 413, 471, 472, 474, 475, 476, 477, 478, 479, 480, 481, 488, 489, 492, 520, 523, 528, 544, 546, 549, 552, 553, 554], "operator_name_or_list": 478, "operatorconfig": [299, 439], "ops_lst": [145, 417], "ops_nam": [145, 417], "opset": [29, 87, 89, 90, 195, 458, 459, 528], "opset_vers": [90, 127, 195, 458, 459, 528], "opt": [188, 475, 488, 489, 494, 536, 544, 552, 554, 555], "opt_cfg": [60, 332], "opt_model": [471, 476, 538], "opt_param": 425, "optdecoderlay": 552, "optim": [39, 59, 60, 64, 65, 101, 103, 133, 164, 167, 168, 170, 173, 182, 187, 195, 266, 311, 331, 332, 336, 337, 385, 424, 439, 449, 451, 453, 466, 470, 476, 477, 480, 481, 482, 484, 488, 489, 494, 520, 522, 525, 526, 530, 533, 534, 536, 538, 540, 545, 546, 548, 549, 551, 552, 554], "optimize_lay": 102, "optimize_qdq": [115, 370], "optimize_transform": 489, "optimize_weights_proximal_legaci": 425, "optimized_model_tensor": 466, "optimized_tensor_data": 467, "optimizedmodel": 462, "optimizeqdqgraph": [116, 371], "optimizer_registri": 165, "optimizer_typ": 165, "optimum": [448, 472, 549], "option": [3, 31, 90, 140, 145, 156, 161, 175, 195, 198, 199, 209, 225, 230, 234, 235, 262, 278, 280, 281, 391, 392, 396, 398, 408, 409, 413, 415, 418, 425, 431, 433, 435, 437, 438, 439, 441, 442, 448, 449, 458, 459, 460, 462, 466, 477, 478, 481, 488, 496, 497, 522, 523, 530, 534, 538, 544, 546, 549, 551, 552, 553, 554], "optuningconfig": [277, 278, 279], "optyp": [1, 195, 457, 496], "optype_wise_": 496, "optypes_to_exclude_output_qu": [28, 195, 546], "optypewis": 496, "optypewisetuningsampl": 277, "opwis": 496, "opwisetuningsampl": 277, "orchestr": [449, 470, 533], "order": [139, 145, 153, 227, 228, 266, 271, 280, 477, 482, 488, 489, 492, 537, 549, 552, 554], "ordered_op": 145, "ordereddefaultdict": 280, "ordereddict": [403, 412, 416, 419, 430, 522], "ordinari": 551, "org": [169, 187, 230, 420, 439, 492, 494, 534, 544], "orient": 490, "orig_answer_text": 225, "orig_bit": 444, "orig_lay": [142, 429], "orig_model": 477, "orig_sav": 478, "orig_text": 225, "origin": [30, 31, 125, 133, 141, 145, 173, 185, 192, 195, 209, 225, 267, 280, 283, 385, 398, 413, 427, 431, 433, 441, 442, 448, 462, 466, 477, 478, 479, 481, 488, 491, 544, 546, 549, 553, 554], "original_model": [431, 441, 477], "ort": 257, "ortsmoothqu": 29, "other": [52, 53, 55, 138, 149, 208, 232, 243, 324, 325, 327, 390, 397, 413, 473, 478, 481, 488, 490, 492, 496, 497, 502, 522, 530, 531, 533, 535, 538, 540, 541, 542, 544, 546, 552, 553, 554, 555], "otherwis": [140, 211, 225, 396, 406, 413, 427, 448, 452, 466, 477, 490, 496, 549, 553], "ouput_dir": 547, "our": [128, 145, 195, 380, 413, 473, 489, 494, 528, 538, 541, 551], "out": [178, 195, 209, 210, 211, 412, 413, 477, 479, 481, 488, 491, 492, 494, 544, 546, 549], "out_dtyp": 439, "out_featur": [403, 423, 429], "out_graph_def": [133, 385], "out_graph_fil": [133, 385], "outcom": 234, "outer": [202, 387], "outlier": [125, 283, 475, 477, 480, 488, 496, 521, 549, 552], "outlin": [496, 497], "outofcheeseerror": 522, "outperform": 477, "output": [29, 30, 31, 36, 39, 44, 59, 83, 90, 95, 133, 145, 166, 173, 174, 179, 192, 195, 198, 199, 227, 234, 235, 243, 262, 308, 311, 316, 331, 354, 360, 385, 390, 391, 408, 413, 415, 417, 425, 431, 433, 439, 442, 449, 458, 459, 462, 463, 466, 472, 477, 478, 479, 488, 489, 492, 495, 496, 525, 526, 528, 530, 532, 537, 538, 540, 543, 544, 546, 549, 552, 553, 554], "output_data": 29, "output_dict": 31, "output_dir": [139, 408, 415, 431, 462, 478, 538], "output_fn": 225, "output_func": [145, 433], "output_graph": 195, "output_graph_def": 480, "output_handl": [161, 466], "output_index_map": [234, 537], "output_mod": 209, "output_model": [281, 538], "output_nam": [87, 127, 195, 243, 390, 458, 459, 528], "output_node_nam": [68, 69, 116, 117, 121, 133, 340, 341, 371, 372, 376, 385], "output_path": 230, "output_process": 166, "output_shap": 87, "output_tensor": [133, 243, 385, 390], "output_tensor_id_op_nam": [412, 413, 417], "output_tensor_ids_op_nam": [145, 413, 417], "output_tensor_nam": [133, 243, 385, 390], "output_valu": [145, 433], "outputs_to_valu": 90, "over": [90, 140, 211, 234, 396, 496, 527, 533, 537, 544, 552, 554], "overal": [477, 497, 549], "overflow": [488, 546], "overhead": [477, 552], "overli": 522, "overrid": [195, 466, 478, 489, 522], "overridden": 554, "overview": [494, 531, 554], "overwrit": 211, "overwrite_exist": 151, "own": [59, 140, 207, 225, 245, 331, 396, 470, 488, 496, 523, 533, 537, 542, 549, 551], "p": [3, 477, 484, 488, 489, 549], "p_conf": [538, 543], "pack": [30, 46, 318, 421, 426, 444], "pack_array_with_numba_b2_c16": 444, "pack_array_with_numba_b2_c32": 444, "pack_array_with_numba_b2_c64": 444, "pack_array_with_numba_b2_c8": 444, "pack_array_with_numba_b4_c16": 444, "pack_array_with_numba_b4_c32": 444, "pack_array_with_numba_b4_c64": 444, "pack_array_with_numba_b4_c8": 444, "pack_array_with_numba_b8_c16": 444, "pack_array_with_numba_b8_c32": 444, "pack_array_with_numba_b8_c64": 444, "pack_array_with_numba_b8_c8": 444, "packag": [89, 446, 448, 494, 495, 522, 529, 534, 545, 550, 554], "package_nam": 446, "packed_arrai": 444, "packer": 421, "pad": [16, 31, 56, 57, 209, 225, 292, 294, 297, 298, 328, 329, 553], "pad_max_length": 549, "pad_tensor": 31, "pad_token": 209, "pad_token_segment_id": 209, "paddedcentercroptransform": 225, "padding_mod": 553, "pade": 31, "padoper": 22, "page": [490, 491], "pager": 522, "pain": [488, 546], "pair": [92, 195, 357, 406, 474, 476, 496, 523, 538, 539], "paper": [475, 477, 488, 544, 549, 552], "paragraph": [231, 232], "parallel": [210, 257, 261, 538, 554], "param": [145, 151, 154, 156, 165, 200, 234, 266, 387, 466, 472, 495, 540, 554], "param1": 281, "param2": 281, "param3": 281, "param_alia": 466, "param_dict": [163, 165], "param_nam": [398, 466], "paramet": [1, 3, 29, 30, 31, 52, 53, 55, 71, 90, 101, 117, 125, 126, 133, 134, 135, 140, 141, 144, 145, 146, 151, 152, 153, 154, 156, 160, 161, 162, 163, 165, 167, 169, 170, 171, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 194, 195, 198, 199, 208, 209, 211, 218, 221, 225, 227, 228, 230, 231, 232, 234, 235, 239, 243, 245, 257, 261, 262, 266, 274, 278, 280, 281, 283, 286, 301, 305, 324, 325, 327, 343, 372, 385, 390, 391, 396, 398, 404, 406, 408, 409, 411, 412, 413, 415, 417, 418, 420, 422, 425, 427, 431, 433, 435, 437, 438, 439, 441, 442, 443, 446, 448, 449, 455, 457, 458, 459, 462, 463, 465, 466, 473, 475, 476, 477, 480, 481, 483, 488, 492, 496, 523, 526, 533, 537, 538, 541, 544, 546, 547, 549, 551, 552, 553, 554], "parameter": 544, "parameter1": 281, "parameter2": 281, "paramlevel": 156, "params_list": [152, 156], "parent": [185, 186, 190, 413, 433], "pars": [90, 133, 145, 154, 209, 210, 221, 225, 249, 250, 253, 385, 390, 412, 413, 417, 496, 497, 553], "parse_auto_slim_config": [171, 544], "parse_cfg": 417, "parse_last_linear": 192, "parse_last_linear_tf": 192, "parse_saved_model": [133, 385], "parse_str2list": 154, "parse_to_prun": 192, "parse_to_prune_tf": 192, "parse_tune_config": 1, "parse_valid_pruner_typ": 183, "parsedecodebert": 209, "parsedecodecoco": 210, "parsedecodeimagenet": [221, 553], "parsedecodeimagenettransform": 221, "parsedecodevoctransform": 225, "parserfactori": 247, "part": [145, 171, 180, 433, 470, 474, 488, 492, 534, 539, 544, 547, 552], "parti": [230, 488, 491, 535, 546], "partial": [184, 481, 544, 546], "particip": 490, "particular": [489, 494, 544, 549], "particularli": 481, "partit": [526, 544], "partner": 545, "parzen": 554, "pascal": 211, "paser_cfg": 145, "pass": [62, 133, 138, 140, 151, 161, 234, 262, 334, 385, 391, 396, 397, 411, 413, 431, 441, 465, 466, 477, 488, 491, 492, 495, 520, 525, 526, 532, 537, 538, 544, 546, 548, 552], "past": [477, 488, 495, 549], "pat": 545, "patch": 427, "patch_hqq_moduil": 427, "path": [90, 125, 133, 141, 145, 160, 173, 188, 192, 209, 210, 211, 225, 235, 243, 261, 262, 278, 283, 385, 390, 398, 405, 412, 413, 415, 416, 417, 431, 442, 458, 459, 462, 466, 472, 476, 477, 478, 484, 492, 495, 526, 532, 537, 538, 539, 540, 543, 549, 553], "path_to_sav": 449, "pathlik": [140, 396], "pattern": [43, 44, 46, 63, 81, 92, 93, 94, 116, 121, 145, 169, 170, 173, 174, 180, 181, 182, 183, 185, 187, 189, 195, 211, 278, 315, 316, 318, 335, 352, 357, 358, 359, 371, 376, 406, 471, 476, 480, 483, 495, 522, 530, 533, 538, 543, 552, 554, 555], "pattern_analyz": 172, "pattern_detector": 136, "pattern_factori": 406, "pattern_lock": [183, 195], "pattern_lst": [143, 417], "pattern_pair": 406, "pattern_to_intern": 278, "pattern_to_path": 278, "patternmha": 177, "patternpair": 406, "pb": [151, 195, 235, 243, 262, 390, 481, 520, 526, 538, 540, 555], "pbound": 266, "pc": 491, "pdf": 3, "peak": [245, 483, 542], "pegasu": 555, "peleenet": 555, "penal": 544, "penalti": [227, 228, 537], "pend": 529, "pentium": 535, "peopl": [488, 546], "pep": [281, 522], "per": [29, 30, 31, 149, 151, 195, 398, 413, 433, 437, 461, 472, 473, 475, 477, 483, 495, 497, 523, 541, 544, 548, 549, 555], "per_channel": [98, 409, 439, 496, 497, 530, 546], "per_channel_symmetr": 497, "per_tensor": [292, 293, 294, 297, 298, 299, 303, 409, 439, 479, 496, 497, 530, 546], "per_tensor_symmetr": 497, "percdamp": [31, 439, 477, 549], "percent": 31, "percentag": [232, 477, 521, 549], "percentil": [3, 31, 125, 283, 303, 433, 521], "percentilecalibr": 3, "perceptron": 544, "perchannel": 31, "perform": [81, 125, 151, 189, 195, 209, 221, 234, 245, 262, 267, 271, 283, 284, 352, 413, 471, 472, 474, 476, 477, 478, 479, 481, 482, 483, 484, 488, 489, 491, 492, 494, 495, 496, 520, 524, 525, 527, 528, 531, 533, 534, 537, 538, 539, 540, 542, 543, 544, 545, 546, 547, 548, 549, 552, 554, 555], "performance_onli": [32, 33, 34, 92, 116, 121, 132, 289, 306, 357, 371, 376, 384, 538], "perm": [225, 553], "perman": [171, 490, 544], "permiss": 490, "permut": [225, 553], "persist": 540, "person": [391, 466, 490, 545], "perspect": 554, "phase": [146, 448, 481, 488, 538, 544, 546, 548, 554], "phi": [489, 535], "philip": 544, "philosophi": [476, 481, 546], "physic": [151, 154, 490, 520], "physical_cpu": 154, "pickl": [138, 140, 396, 397], "pickle_load_arg": [140, 396], "pickle_modul": [140, 170, 396], "pickle_protocol": 170, "pickleerror": [138, 397], "pickler": [138, 397], "pickletool": [138, 397], "picklingerror": [138, 397], "piec": [224, 227, 477, 488, 537, 549], "pil": [225, 553], "pile": [418, 452], "pin": 523, "pin_memori": [200, 202, 204, 387, 523], "ping_memori": [538, 546], "pip": [494, 526, 529, 531, 534, 550], "pipe": 522, "pipelin": [153, 162, 491, 525, 538, 543], "pixel": 553, "pkl": 466, "pl": 227, "place": [412, 413, 433, 442, 478, 525, 534, 544, 554], "placehold": [42, 207, 314, 387, 409, 522], "placeholder_dtyp": 173, "placeholder_shap": 173, "plai": [236, 390, 477, 488, 545, 549, 552], "plan": [478, 533], "platform": [466, 483, 488, 533, 545, 546], "platinum": 555, "pleas": [29, 135, 149, 169, 177, 178, 179, 187, 188, 195, 209, 210, 211, 214, 234, 262, 281, 413, 420, 470, 472, 474, 475, 476, 477, 478, 479, 480, 488, 489, 492, 493, 494, 495, 520, 521, 525, 526, 528, 534, 536, 537, 539, 544, 546, 548, 549, 550, 551, 552, 554], "plu": 539, "plug": 545, "png": 211, "point": [30, 31, 221, 231, 232, 266, 425, 433, 466, 471, 472, 473, 474, 475, 476, 477, 480, 488, 537, 541, 546, 549, 552, 553, 554], "pointwise_constraint": 298, "pointwise_initi": 298, "pointwise_regular": 298, "polici": [263, 265, 271, 482, 490, 494, 556], "polit": 490, "pollut": 522, "pont": 534, "pool": 16, "pool2d": 295, "pool_siz": 297, "pooloper": 23, "poor": 478, "popen": 522, "popular": [226, 468, 470, 478, 482, 488, 494, 495, 496, 521, 527, 528, 536, 538, 544, 546, 549, 554], "popularli": 537, "port": [52, 53, 55, 324, 325, 327], "portabl": [138, 397], "portion": 209, "pose": [473, 541, 547], "posit": [225, 442, 490, 537], "possibl": [140, 396, 406, 472, 543, 544, 547, 548, 554], "post": [29, 82, 83, 149, 187, 195, 198, 199, 262, 353, 354, 413, 420, 439, 473, 475, 476, 477, 478, 480, 482, 488, 490, 494, 495, 496, 525, 526, 528, 533, 534, 541, 544, 545, 547, 549, 552, 554], "post_batch": 1, "post_hostconst_convert": [80, 351], "post_node_nam": [62, 334], "post_quantized_op_cs": [80, 351], "post_training_auto_qu": 554, "post_training_dynamic_qu": [538, 554], "post_training_static_qu": [538, 554], "postcompressionutil": 174, "postcseoptim": [83, 354], "posterior": 554, "postfix": [74, 75, 346], "posthostconstconvert": [82, 353], "postposttrainingquantconfig": 548, "postprocess": [195, 222, 225, 234, 262, 455, 495, 538, 553], "postprocess_cfg": 455, "postprocess_cl": [223, 538], "postprocess_model": 448, "posttrainingquantconfig": [195, 262, 492, 497, 523, 528, 537, 538, 540, 546, 547, 548, 549, 551, 552, 554], "power": [472, 473, 481, 538, 541, 545], "pp": 544, "pr": [491, 523, 537, 550], "practic": [473, 541], "pre": [64, 101, 141, 173, 198, 199, 209, 235, 262, 336, 398, 439, 449, 476, 477, 482, 488, 522, 525, 538, 544, 545, 546, 549, 554], "pre_batch": 1, "pre_node_nam": [62, 334], "pre_optim": [61, 333], "pre_post_process_quant": [195, 546], "pre_process": 526, "preced": [227, 488, 552, 554], "precis": [134, 195, 198, 199, 232, 234, 235, 245, 264, 267, 278, 289, 399, 400, 401, 406, 437, 439, 466, 470, 472, 473, 476, 477, 478, 482, 488, 494, 495, 496, 501, 520, 521, 525, 526, 530, 531, 533, 541, 545, 546, 549, 552, 554], "pred": [133, 234, 385, 537], "pred_list": 234, "pred_text": 225, "predefin": 482, "predict": [133, 225, 227, 231, 232, 234, 262, 385, 477, 488, 537, 549, 552, 553], "prefer": [151, 262, 477, 488, 544, 549], "prefix": [133, 140, 141, 144, 145, 151, 154, 385, 396, 398, 433, 472], "preoptim": [64, 336], "prepar": [1, 31, 145, 392, 412, 415, 431, 437, 442, 448, 471, 472, 473, 475, 476, 477, 478, 481, 484, 488, 492, 494, 526, 531, 544, 546], "prepare_compress": [195, 449, 492, 525, 538, 543, 544, 546], "prepare_dataload": 1, "prepare_input": 31, "prepare_model": 1, "prepare_model_data": 1, "prepare_prun": [170, 544], "prepared_model": [471, 475, 476, 477, 484, 531], "preprint": [473, 477, 488, 521, 541, 544, 549, 552], "preprocess": [145, 195, 209, 225, 280, 433, 455, 526, 553], "preprocess_user_cfg": 280, "present": [52, 53, 55, 324, 325, 327, 488, 545, 552], "preserv": [169, 473, 477, 521, 541, 544, 549], "pretrain": [420, 462], "pretrained_model_name_or_path": [139, 141, 398, 448], "prettyt": 466, "preval": [477, 488, 549], "previou": [192, 470, 478, 488, 496, 497, 538, 544, 550, 552, 554], "previous": 523, "primari": [482, 521, 554], "primit": [140, 396], "print": [62, 135, 173, 234, 281, 334, 420, 425, 466, 483, 488, 489, 526, 537, 538, 544, 552, 554], "print_iter": 173, "print_op_list": 466, "print_tabl": 466, "printer": [161, 466], "prior": [1, 73, 345, 554], "prioriti": [152, 443, 446, 522], "privat": 490, "prob": 492, "probabl": [3, 195, 461, 477, 488, 549], "problem": [138, 209, 234, 397, 474, 537, 539], "proce": [181, 182, 187], "procedur": [544, 554], "proceed": [488, 552], "process": [101, 125, 135, 151, 153, 159, 161, 175, 180, 181, 182, 188, 190, 192, 195, 198, 199, 209, 210, 211, 221, 225, 235, 262, 263, 265, 266, 283, 412, 413, 448, 449, 465, 466, 471, 472, 473, 476, 477, 480, 481, 482, 484, 488, 489, 495, 496, 497, 521, 523, 525, 526, 528, 538, 541, 543, 544, 546, 547, 549, 552, 553], "process_and_check_config": 192, "process_config": 192, "process_weight_config": 192, "process_yaml_config": 192, "processor": [161, 439, 448, 474, 484, 488, 489, 494, 536, 539, 545, 546, 548], "processor_typ": [439, 484], "processortyp": [161, 439, 448], "product": [169, 474, 488, 533, 539, 545, 546, 551, 554], "profession": 490, "profil": [151, 195, 226], "profilerfactori": [254, 255, 259], "profilingpars": 250, "profilingresult": 251, "program": [476, 494, 526, 535], "progress": [30, 183, 211, 544, 546], "project": [225, 469, 490, 491, 545, 551, 554, 556], "promis": [472, 525, 538, 544], "promot": [473, 477, 541], "prompt": [489, 526], "prone": 544, "propag": [1, 85, 355], "properti": [209, 280, 535], "proport": 234, "propos": [474, 477, 488, 495, 539, 549, 552], "protect": [477, 549], "protected_nod": [65, 337], "proto": [221, 225, 553], "protobuf": [90, 243, 390], "prototyp": 492, "prove": [477, 488, 521, 549, 552], "provid": [29, 30, 31, 87, 90, 95, 173, 198, 199, 225, 230, 235, 262, 360, 409, 413, 448, 449, 460, 462, 470, 472, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 488, 489, 494, 495, 496, 497, 520, 523, 525, 526, 527, 531, 533, 534, 536, 537, 538, 539, 540, 544, 546, 549, 552, 554, 555], "proxi": 492, "prune": [162, 169, 170, 171, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 189, 190, 191, 192, 195, 466, 470, 494, 526, 527, 530, 531, 533, 543, 545], "prune_conf": 538, "prune_config": 192, "pruner": [538, 544], "pruner2": 544, "pruner_class": 187, "pruner_info": 188, "pruners_info": 192, "pruning_class": 188, "pruning_config": [195, 538, 544], "pruning_end": 544, "pruning_frequ": [180, 195, 538, 544], "pruning_func": 538, "pruning_op_typ": [195, 538, 544], "pruning_pattern": 544, "pruning_scop": [195, 538, 544], "pruning_start": 544, "pruning_typ": [195, 538, 544], "pruningcallback": 162, "pruningconfig": 449, "pruningcriterion": [169, 191], "pruningschedul": 190, "pseudo": [198, 199, 262, 449, 477, 549], "pt": [140, 396, 465, 477, 489, 494, 531, 534, 540, 545, 546, 548, 549], "pt2e": [405, 407, 409, 435, 437, 441, 555], "pt2e_dynamic_quant_entri": 437, "pt2e_export": 434, "pt2e_quant": 394, "pt2e_static_quant_entri": 437, "pt_fp32_model": 459, "pt_int8_model": 459, "ptq": [195, 262, 475, 476, 480, 481, 492, 526, 533, 538, 546, 552, 554], "public": [281, 490], "publish": [474, 490, 535, 536, 539, 552], "pull": [52, 53, 55, 324, 325, 327], "punct_nondigit_r": 227, "punctuat": [224, 227, 232], "pure": 544, "purif": 545, "purpos": [474, 475, 476, 477, 479, 530, 539, 546], "push": [473, 477, 488, 491, 541, 544, 549, 552], "put": [140, 211, 396], "pvc": 489, "py": [3, 133, 135, 151, 154, 180, 195, 228, 230, 231, 232, 281, 385, 443, 479, 483, 484, 489, 492, 495, 522, 526, 534, 538, 546, 550], "pycocotool": [230, 529], "pyhessian": 135, "pylanc": 522, "pyobject": 529, "pypi": 534, "pytest": 491, "python": [3, 60, 133, 138, 140, 161, 211, 226, 281, 332, 385, 396, 397, 443, 466, 468, 484, 489, 492, 494, 522, 526, 529, 534, 538, 544, 545, 553], "python3": 529, "pythonmultiheadattentionprun": 184, "pytorch": [163, 165, 166, 170, 173, 176, 179, 180, 183, 188, 194, 195, 208, 209, 211, 214, 218, 225, 234, 235, 244, 262, 272, 392, 394, 435, 436, 437, 438, 439, 440, 441, 442, 445, 446, 447, 448, 459, 460, 466, 473, 478, 489, 491, 492, 494, 495, 496, 497, 502, 521, 522, 523, 525, 527, 530, 531, 533, 534, 536, 538, 539, 540, 541, 544, 545, 549, 550, 552, 554], "pytorch_cpu": 497, "pytorch_fx": [211, 218, 538], "pytorch_ipex": [211, 218, 538], "pytorch_prun": 188, "pytorchalignimagechannel": 225, "pytorchbasemodel": 244, "pytorchbasepattern": [175, 189], "pytorchbaseprun": 180, "pytorchbasicprun": 181, "pytorchbertdataset": 209, "pytorchblockmaskprun": 182, "pytorchcifar10": 211, "pytorchcifar100": 211, "pytorchcriterion": 163, "pytorchcropresizetransform": 225, "pytorchcrossentropyloss": 163, "pytorchdataload": 206, "pytorchdataset": 211, "pytorchdynamo": 478, "pytorchfashionmnist": 211, "pytorchfilt": 218, "pytorchfxmodel": 244, "pytorchimagenetraw": 214, "pytorchintermediatelayersknowledgedistillationloss": 163, "pytorchintermediatelayersknowledgedistillationlosswrapp": 163, "pytorchknowledgedistillationloss": [163, 538], "pytorchknowledgedistillationlosswrapp": 163, "pytorchloss": 234, "pytorchmetr": 234, "pytorchmnist": 211, "pytorchmodel": 244, "pytorchmxnettransform": 225, "pytorchmxnetwrapdataset": 211, "pytorchmxnetwrapfunct": [211, 225], "pytorchnormalizetransform": 225, "pytorchoptim": 165, "pytorchpatternlockprun": 185, "pytorchpatternninm": 178, "pytorchpatternnxm": 179, "pytorchprogressiveprun": 186, "pytorchretrainfreeprun": 187, "pytorchselfknowledgedistillationloss": 163, "pytorchselfknowledgedistillationlosswrapp": 163, "pytorchsgd": 165, "pytorchtransform": 225, "pytorchtranspos": 225, "q": [30, 31, 92, 94, 116, 357, 359, 371, 471, 476, 478, 488, 549, 552], "q_conf": 525, "q_config": [28, 145, 292, 293, 294, 297, 298, 459, 495], "q_dataload": [198, 199, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 532, 554], "q_func": [198, 199, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 413, 495, 496, 538, 554], "q_hook": [198, 199, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274], "q_max": [488, 552], "q_min": [488, 552], "q_model": [135, 139, 195, 262, 301, 305, 417, 471, 475, 476, 480, 482, 489, 492, 523, 526, 528, 532, 537, 538, 540, 546, 547, 548, 549], "q_tensor": 433, "q_weight": [31, 423], "q_x": [488, 552], "qa": [231, 232, 494], "qactivationoper": 5, "qargmaxoper": 6, "qas_id": 225, "qat": [97, 144, 195, 243, 494, 525, 526, 531, 533, 538, 546], "qat_clone_funct": 101, "qat_op_name_dict": 195, "qattent": 7, "qattentionoper": 7, "qavgpool2d": 297, "qbinari": 8, "qbinaryoper": 8, "qconcat": 9, "qconcatoper": 9, "qconfig": [1, 145, 417, 465, 472, 477, 494, 549], "qconfig_file_path": 160, "qconv2d": 292, "qconvoper": 10, "qd": 476, "qdens": 293, "qdepthwiseconv2d": 294, "qdirect": 11, "qdirectoper": 11, "qdq": [29, 72, 97, 98, 127, 145, 195, 288, 344, 362, 433, 457, 458, 459, 480, 495, 528, 533, 546, 552], "qdq_enabl": [33, 306], "qdq_op_fp32_bia": 195, "qdq_op_fp32_bias_qdq": 195, "qdq_op_int32_bia": 195, "qdq_quantiz": 412, "qdq_tensor": 31, "qdq_weight_actor": 433, "qdq_weight_asym": 433, "qdq_weight_sym": 433, "qdqlayer": [398, 429], "qembedlayernorm": 12, "qembedlayernormalizationoper": 12, "qgather": 13, "qgatheroper": 13, "qgemm": 15, "qgemmoper": 15, "qglobalaveragepooloper": 14, "qintegerop": [533, 546], "qkv": 184, "qkv_modul": 184, "qkv_name": 184, "qlinear": [195, 211, 218, 234, 457, 495], "qlinear2qdq": 456, "qlinearaveragepool": 23, "qlinearconv": 10, "qlinearglobalaveragepool": 14, "qlinearmatmul": 18, "qlinearop": [457, 533, 546, 555], "qlora": [477, 488, 549], "qmatmuloper": 18, "qmax": 145, "qmaxpool": 19, "qmaxpool2d": 297, "qmaxpooloper": 19, "qmin": 145, "qmodel": [135, 479, 481], "qnli": [209, 537, 555], "qop_registri": 21, "qoper": [5, 21, 195, 528], "qpad": 22, "qpadoper": 22, "qpooloper": 23, "qqp": [209, 537, 555], "qresiz": 25, "qresizeoper": 25, "qscheme": 497, "qseparableconv2d": 298, "qsplit": 26, "qsplitoper": 26, "qsym_model": 1, "qt_config": [33, 306], "qtensor": [423, 424], "qtensor_to_tensor": 1, "qtensorconfig": 422, "qtensormetainfo": 426, "qtype": [29, 30, 495], "quadrat": [439, 477], "quala": 545, "qualiti": [227, 522], "quant": [31, 133, 145, 195, 278, 285, 287, 288, 289, 299, 303, 413, 416, 433, 438, 439, 477, 488, 489, 492, 522, 536, 538, 546], "quant_axi": [292, 293, 294, 297, 298], "quant_block_list": [418, 439, 448], "quant_config": [288, 289, 301, 305, 391, 392, 403, 405, 412, 416, 418, 419, 420, 427, 430, 432, 442, 448, 471, 473, 475, 476, 477, 478, 479, 480, 481, 484, 531, 541], "quant_dequant_data": 29, "quant_dequant_w_v1": 413, "quant_dequant_x_v1": 413, "quant_format": [195, 459, 528], "quant_level": [195, 549, 552, 554], "quant_lm_head": [420, 439, 452, 477], "quant_max": 497, "quant_min": 497, "quant_mod": [32, 278, 289, 292, 293, 294, 297, 298, 496, 497], "quant_mode_from_pattern": 278, "quant_narrow_rang": [292, 293, 294, 297, 298], "quant_opt": 280, "quant_round_mod": [292, 293, 294, 297, 298], "quant_scal": [439, 477], "quant_statu": [292, 293, 294, 297, 298], "quant_t": [292, 293, 294, 297, 298], "quant_tensor": [31, 433], "quant_typ": 280, "quant_vis": 448, "quant_weight_w_scal": 433, "quant_zero": [439, 477], "quantformat": 30, "quantif": [475, 477, 480, 549, 552], "quantil": [433, 554], "quantit": 477, "quantiz": [1, 3, 4, 29, 30, 31, 33, 34, 73, 77, 79, 81, 84, 92, 97, 98, 99, 100, 101, 102, 103, 104, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 134, 135, 137, 141, 142, 145, 149, 151, 153, 157, 159, 161, 162, 195, 198, 199, 221, 226, 267, 269, 271, 280, 283, 284, 286, 288, 289, 290, 291, 292, 293, 294, 295, 297, 298, 392, 395, 398, 402, 403, 404, 405, 408, 409, 411, 412, 413, 415, 416, 417, 418, 419, 420, 422, 423, 424, 425, 426, 429, 430, 431, 432, 433, 434, 435, 436, 448, 449, 452, 453, 459, 461, 462, 465, 466, 470, 474, 480, 482, 485, 489, 495, 501, 502, 522, 523, 526, 527, 530, 531, 532, 533, 537, 540, 543, 545, 550, 553, 554], "quantizable_nod": 1, "quantizable_op": [145, 413, 417], "quantizaiton_config": 489, "quantization_aware_training_conf": 538, "quantization_cfg": 495, "quantization_config": [451, 489], "quantization_param": 28, "quantizationawaretrainingcallback": 162, "quantizationawaretrainingconfig": [195, 449, 492, 525, 528, 538, 546, 548, 554], "quantizationmethod": 452, "quantizationmod": 30, "quantizationspec": 409, "quantize_4bit": 433, "quantize_config": [99, 496], "quantize_data": 30, "quantize_data_per_channel": 30, "quantize_data_with_scale_zero": 30, "quantize_elemwise_op": 404, "quantize_graph": [96, 361], "quantize_graph_bas": [97, 362], "quantize_graph_bn": [97, 362], "quantize_graph_common": [96, 361], "quantize_graph_concatv2": [97, 362], "quantize_graph_conv": [97, 362], "quantize_graph_for_intel_cpu": [97, 362], "quantize_graph_matmul": [97, 362], "quantize_graph_pool": [97, 362], "quantize_help": 99, "quantize_lay": 99, "quantize_layer_add": 102, "quantize_layer_bas": 102, "quantize_layer_bn": 102, "quantize_model": [305, 479, 480, 481, 488], "quantize_model_with_single_config": 305, "quantize_mx_op": 404, "quantize_nod": 459, "quantize_nparrai": 30, "quantize_per_channel": [488, 552], "quantize_per_tensor_absmax": [488, 552], "quantize_rang": 30, "quantize_recip": 101, "quantize_sym_model": 1, "quantize_wrapp": 99, "quantizeconfig": [100, 101], "quantized_data": 30, "quantized_dict": 135, "quantized_model": [477, 484, 531], "quantized_nod": [92, 357], "quantized_value_typ": 30, "quantizedconcatv2": [109, 119, 364, 374], "quantizedconv": [76, 77, 347, 348], "quantizeddeconv": [76, 347], "quantizediniti": 30, "quantizedinput": [221, 553], "quantizedmatmul": [78, 79, 349, 350], "quantizedmatmulwithbiasanddequant": [79, 350], "quantizedrnnconvert": 84, "quantizedvalu": 30, "quantizedvaluetyp": 30, "quantizegraph": [117, 372], "quantizegraphbas": [117, 372], "quantizegraphforintel": [121, 376], "quantizegraphhelp": [124, 379], "quantizelay": 105, "quantizelayeradd": 104, "quantizelayerbas": 105, "quantizelayerbatchnorm": 106, "quantizelinear": 30, "quantizenodebas": [117, 372], "quantizer_cl": 448, "quantizev2": [83, 354], "quantizewrapp": [101, 107], "quantizewrapperbas": 107, "quantiztaion": [195, 417], "quantopt": 280, "quanttyp": [30, 280], "queri": [1, 32, 133, 145, 173, 184, 195, 288, 289, 417, 448, 488, 496, 497, 546], "query_framework_cap": 496, "query_fused_pattern": 495, "query_fw_cap": [495, 497], "query_layer_nam": 184, "query_quantizable_nod": 1, "querybackendcap": [495, 496], "question": [225, 231, 232, 488, 490, 491, 494, 495, 534, 544, 552, 553, 555], "question_text": 225, "quick": [470, 484, 530, 539, 544], "quickli": [482, 536, 554], "quint8": [30, 413], "quit": 544, "qweight_config_path": 549, "qwen": 489, "qwen2": [494, 545], "r": [30, 128, 150, 380, 475, 476, 477, 479, 481, 483, 488, 534, 546, 555], "r1": [133, 385], "r34": 492, "race": 490, "rais": [52, 53, 55, 90, 138, 140, 145, 170, 176, 183, 192, 230, 235, 281, 324, 325, 327, 396, 397, 413, 433, 435, 448, 494, 522, 544, 545], "ram": [140, 396, 477, 484], "ran": 162, "rand": [418, 439, 477, 488, 552], "randn": [195, 528], "random": [41, 133, 161, 195, 221, 225, 266, 270, 313, 391, 418, 466, 477, 538, 544, 553], "random_crop": [221, 553], "random_flip_left_right": [221, 553], "random_se": [195, 266, 538], "randomcrop": 553, "randomcroptftransform": 225, "randomcroptransform": 225, "randomhorizontalflip": [225, 538, 553], "randomli": [225, 266, 488, 552, 553, 554], "randomresizedcrop": [538, 553], "randomresizedcropmxnettransform": 225, "randomresizedcroppytorchtransform": 225, "randomresizedcroptftransform": 225, "randomresizedcroptransform": 225, "randomst": 266, "randomtunestrategi": 273, "randomverticalflip": [225, 553], "rang": [212, 213, 225, 281, 387, 433, 449, 462, 471, 473, 474, 477, 478, 481, 483, 488, 494, 495, 497, 521, 525, 526, 538, 541, 543, 544, 546, 549, 552, 553], "rank": 526, "rapid": [534, 536], "rate": [477, 488, 538, 544, 546], "rather": [477, 532, 549], "ratio": [31, 128, 150, 175, 180, 192, 195, 221, 225, 380, 536, 538, 544, 553, 555], "ratiospars": 555, "raw": [30, 151, 210, 214, 217, 271, 412, 415, 431, 437, 537, 554], "raw_arrai": 444, "raw_cmd": [151, 154], "raw_func": 446, "raw_imag": 211, "rawgptquant": 420, "rb": [140, 396], "rcnn": 528, "re": 549, "reach": [153, 175, 190, 195, 491, 523, 544, 554], "read": [90, 133, 140, 214, 225, 385, 396, 548, 551], "read_graph": [133, 385], "read_squad_exampl": 225, "read_tensorflow_node_attr": 90, "readabl": [154, 522], "readi": 544, "readlin": [140, 396], "readm": 526, "real": [30, 212, 387, 488, 496, 521, 526, 545, 546], "realdiv": [51, 54, 323, 326], "realiz": [526, 542, 548, 551], "rearrang": [31, 187, 477, 544, 549], "reason": [477, 488, 490, 496, 523, 549, 554], "rebuild": [133, 385], "recal": [232, 234], "receiv": 551, "recent": [474, 538, 539], "recip": [33, 173, 195, 306, 473, 482, 494, 531, 538, 541, 547, 549, 552, 554], "recipe_sampl": 173, "recipesearch": 173, "recogn": [227, 448, 554], "recognit": [474, 526, 528, 544], "recognitionimagenet": 555, "recommend": [234, 446, 472, 474, 484, 489, 494, 495, 530, 531, 544, 554], "recommendation_system": 195, "reconstruct": [133, 385, 545], "reconstruct_saved_model": [133, 385], "record": [135, 145, 163, 166, 209, 210, 211, 217, 433, 466, 482, 549, 551, 554], "record_max_info": [303, 413], "record_output": 166, "recov": [29, 411, 413, 433, 465, 466, 495, 525], "recover_config": 34, "recover_forward": 433, "recover_model_from_json": [411, 465], "rectangl": [477, 547], "recurs": [194, 466], "recursivescriptmodul": 415, "redpajama": [475, 552], "reduc": [16, 195, 439, 474, 475, 476, 477, 480, 481, 488, 489, 495, 521, 525, 538, 539, 544, 545, 546, 547, 548, 549, 552, 554], "reduce_rang": [2, 28, 29, 195, 497], "reducemax": 24, "reducemin": 24, "reduceminmaxoper": 24, "reduceoper": 24, "reduct": [544, 547], "redund": [76, 78, 347, 349, 448], "ref": [3, 133, 385], "refer": [29, 135, 149, 169, 177, 178, 179, 187, 188, 192, 195, 209, 227, 228, 234, 262, 281, 413, 420, 439, 472, 474, 475, 476, 478, 479, 480, 481, 489, 492, 494, 495, 496, 520, 523, 525, 526, 528, 532, 534, 535, 537, 538, 539, 542, 547, 550, 553, 554], "reference_corpu": 228, "refin": [443, 481, 532, 544], "reflect": [537, 553], "reg": [170, 181, 182, 187, 195], "reg_term": 189, "regard": [490, 548], "regardless": 490, "region": [475, 552], "regist": [3, 21, 138, 140, 146, 152, 163, 165, 169, 170, 175, 176, 180, 183, 188, 189, 190, 191, 211, 218, 225, 234, 245, 274, 280, 299, 391, 396, 397, 398, 413, 439, 443, 444, 448, 495, 522, 526, 537, 538, 542, 552, 554], "register_acceler": 443, "register_algo": [391, 448, 522], "register_autotun": 413, "register_config": [152, 522], "register_criterion": [169, 191], "register_customer_metr": 234, "register_pack_func": 444, "register_packag": [140, 396], "register_pattern": 175, "register_prun": [180, 188], "register_reg": 189, "register_schedul": 190, "register_supported_configs_for_fwk": 152, "register_weight_hook": 398, "registr": [146, 211, 218, 225], "registri": [152, 169, 175, 180, 188, 189, 190, 191, 443], "registry_criterion": 163, "regress": [209, 491], "regul": [181, 182, 187], "regular": [189, 227], "regulariz": 189, "reinstal": 529, "reject": 490, "rel": [195, 245, 538, 554, 555], "relat": [174, 189, 230, 280, 407, 410, 414, 424, 447, 455, 497, 531, 544, 549], "relationship": 195, "relative_loss": 153, "releas": [471, 476, 494, 531, 534, 556], "relev": [266, 496, 497, 522, 544], "reli": [478, 538, 552], "religion": 490, "reload": 160, "relu": [59, 63, 94, 331, 335, 359, 492, 530, 552], "relu6": [59, 331, 530], "remain": [431, 441], "remaind": 521, "remap": [55, 140, 327, 396], "remov": [5, 30, 36, 42, 45, 59, 65, 68, 69, 70, 81, 83, 125, 171, 184, 232, 283, 308, 314, 317, 331, 337, 340, 341, 342, 352, 354, 448, 490, 538, 544], "removableactivationoper": 5, "remove_init_from_model_input": 30, "remove_training_nod": [61, 333], "removetrainingnodesoptim": [65, 337], "renam": [66, 338, 478, 550], "rename_batch_norm": [61, 333], "renamebatchnormoptim": [66, 338], "repeat": 554, "repercuss": 490, "replac": [3, 141, 145, 232, 398, 406, 413, 427, 433, 448, 471, 473, 476, 489, 522, 526, 538, 541, 550], "replace_forward": 433, "replace_pattern": 406, "replacement_fn": 427, "replic": 554, "replica": 554, "repo": [209, 469, 491, 527, 556], "repo_id": [141, 398, 448], "repo_typ": [141, 398, 448], "report": [490, 491, 494, 551], "repositori": 491, "repr": 452, "repres": [30, 152, 153, 156, 157, 175, 179, 180, 188, 189, 192, 211, 216, 230, 232, 234, 406, 426, 476, 477, 481, 488, 490, 497, 521, 528, 544, 546, 547, 554], "represent": [30, 138, 397, 473, 477, 488, 490, 492, 521, 525, 541, 544, 546], "reproduc": 418, "requant": [77, 79, 348, 350], "requantize_cfg": 145, "request": [476, 494, 546], "requir": [145, 195, 201, 243, 261, 390, 433, 472, 474, 476, 477, 481, 488, 489, 492, 495, 496, 520, 521, 523, 525, 526, 529, 530, 538, 539, 540, 544, 546, 549, 550, 551, 552, 554], "requirements_pt": [529, 534], "requirements_tf": 534, "rerang": [132, 384], "rerange_quant": [132, 384], "rerange_quantized_concat": [130, 382], "rerangequantizedconcat": [132, 384], "rerewrit": [36, 308], "rerun": 489, "resblock": 195, "rescal": [221, 225, 553], "rescalekeraspretraintransform": 225, "rescaletftransform": 225, "rescaletransform": 225, "research": [478, 494, 535, 552], "reserv": [198, 199], "reset": 537, "reset_none_to_default": 192, "reshap": [46, 52, 53, 58, 318, 324, 325, 330, 413, 488, 552], "reshape_in0_ndef": [52, 53, 324, 325], "reshape_in1_ndef": [52, 53, 324, 325], "reshape_in_channel_to_last": 413, "reshape_scale_as_input": 413, "reshape_scale_as_weight": 413, "reshuffl": 523, "resid": [140, 396], "resiz": [16, 209, 210, 221, 225, 553], "resize_method": 221, "resize_shap": 216, "resize_sid": [221, 553], "resizecropimagenet": [526, 553], "resizemxnettransform": 225, "resizeoper": 25, "resizepytorchtransform": 225, "resizetftransform": 225, "resizetransform": 225, "resizewithaspectratio": 221, "resizewithratio": [225, 553], "resnest50": 555, "resnet": [526, 555], "resnet101": 555, "resnet18": [472, 494, 539, 555], "resnet34": 492, "resnet50": [195, 479, 494, 526, 528, 539, 544, 546, 551, 555], "resnet50_fp32_pretrained_model": 526, "resnet50_v1": [526, 538], "resnetv2": 555, "resnext101_32x8d": 555, "resolut": 521, "resolv": [492, 493], "resort": 538, "resourc": 554, "respect": [488, 490, 530, 544, 552], "respons": [249, 250, 253, 479, 546], "rest": [145, 433], "restor": [477, 549], "restrict": [55, 140, 195, 327, 396, 544, 551], "restructuredtext": 281, "result": [30, 133, 135, 145, 221, 225, 227, 230, 231, 232, 234, 262, 269, 385, 406, 408, 417, 449, 452, 465, 466, 470, 471, 472, 475, 477, 480, 482, 483, 488, 490, 494, 523, 526, 533, 535, 537, 538, 542, 544, 545, 546, 549, 551, 552, 553, 554, 555], "resum": [195, 198, 199, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274], "resume_from": [161, 195, 466], "retain": 521, "retrac": 548, "retrain": [187, 476, 525], "retrain_fre": [169, 183, 544], "retrainfreecriterion": 169, "retrainfreeprun": [187, 188], "retri": 413, "retriev": [152, 159, 207, 387, 406, 413, 448, 544], "return": [1, 3, 31, 39, 52, 53, 55, 59, 90, 101, 133, 134, 135, 140, 144, 145, 146, 151, 152, 154, 160, 161, 163, 165, 170, 173, 175, 176, 180, 183, 188, 189, 190, 192, 194, 198, 199, 211, 218, 221, 225, 227, 228, 230, 231, 232, 234, 235, 239, 243, 245, 257, 261, 262, 266, 274, 278, 280, 281, 299, 301, 303, 305, 311, 324, 325, 327, 331, 385, 390, 391, 396, 398, 404, 406, 408, 409, 411, 412, 413, 415, 417, 418, 420, 425, 427, 431, 433, 435, 437, 438, 439, 441, 442, 446, 448, 449, 452, 455, 459, 465, 466, 474, 478, 479, 482, 488, 492, 496, 497, 523, 526, 537, 540, 542, 544, 546, 549, 552, 554], "return_int": [433, 549], "return_tensor": 489, "reus": 478, "revers": [133, 154], "reversed_numa_info": 154, "revert": [272, 280, 554], "reverted_data_typ": 280, "review": [490, 491, 494, 545], "revis": [141, 398, 431, 441, 448], "rewrit": [35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 91, 92, 93, 94, 170, 307, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 406], "rgb": 221, "right": [140, 221, 230, 396, 488, 490, 491, 492, 552, 553], "rmax": [30, 481, 488, 546], "rmin": [30, 473, 481, 488, 541, 546], "rmse": [234, 537, 554], "rnn": 84, "rnn_convert": 80, "rnn_detail": 84, "roberta": [209, 555], "roc": 234, "role": [236, 390, 477, 488, 549, 552], "rollov": [200, 202, 204, 208, 387, 523], "romanian": 544, "root": [173, 209, 210, 211, 214, 234, 526, 537, 538], "root_linear": 174, "root_rank": 526, "roughli": [477, 488, 549], "rouhani": [473, 541], "round": [31, 128, 150, 380, 404, 439, 471, 472, 477, 478, 488, 494, 545, 546, 549, 552], "round_": [488, 552], "round_method": 439, "round_multipli": 171, "roundingmod": 404, "row": [466, 488, 552], "rowi": 31, "rte": [209, 537, 555], "rtn": [31, 145, 303, 392, 428, 437, 438, 439, 478, 484, 488, 489, 522, 547, 549], "rtn_algo_entri": 522, "rtn_arg": [477, 547, 549], "rtn_entri": 437, "rtn_g32asym": 549, "rtn_quantiz": 31, "rtnconfig": [437, 438, 439, 448, 452, 477, 478, 482, 489, 531], "rtnquantiz": 430, "rule": [481, 544], "run": [1, 125, 133, 140, 151, 180, 192, 195, 198, 199, 208, 224, 235, 262, 266, 283, 385, 396, 413, 448, 449, 472, 474, 477, 478, 481, 483, 484, 488, 494, 495, 520, 526, 529, 530, 538, 545, 546, 549, 550, 551, 554], "run_arg": [438, 442, 478], "run_cmd": 554, "run_fn": [412, 413, 438, 442, 475, 476, 477, 478], "run_fn_for_vlm_autoround": 448, "run_forward": 1, "run_generation_gpu_woq": 489, "run_inst": 151, "run_multi_instance_command": 154, "runtim": [471, 488, 491, 494, 495, 496, 498, 523, 527, 530, 533, 539, 541, 549, 554], "s1": 554, "s2": 554, "s3": 554, "s4": 554, "s5": 554, "s6": 554, "s7": 554, "s8": [292, 293, 294, 297, 298, 459], "sa_optim": 167, "sacrif": [473, 541, 545], "safe": [431, 491], "safe_seri": 431, "safetensor": 494, "salient": [31, 477, 544, 549], "same": [68, 126, 133, 153, 187, 195, 209, 225, 230, 286, 340, 385, 413, 466, 472, 473, 477, 482, 483, 488, 495, 497, 520, 523, 526, 530, 538, 541, 544, 546, 549, 552, 553, 554], "sampl": [31, 62, 145, 153, 195, 203, 207, 209, 210, 211, 221, 225, 234, 266, 334, 387, 413, 418, 448, 477, 481, 482, 483, 488, 523, 527, 533, 546, 549, 552, 553, 554, 555], "sampler": [153, 200, 202, 204, 277, 387, 418, 439, 477, 482, 523, 526], "sampling_s": 538, "samsum": 555, "sapphir": [534, 536], "satisfi": [208, 534], "satur": 553, "save": [90, 133, 139, 140, 160, 170, 195, 211, 243, 262, 385, 390, 396, 398, 408, 411, 415, 431, 442, 449, 453, 458, 459, 462, 466, 472, 478, 479, 488, 489, 492, 494, 495, 532, 538, 539, 540, 543, 546, 547, 549, 552], "save_config_map": 160, "save_dir": 489, "save_for_huggingface_upstream": [462, 538], "save_load": [158, 407, 410, 414, 428], "save_path": [458, 459, 495, 540], "save_pretrain": 489, "save_protobuf": 90, "save_q_input": 413, "save_to_disk": 495, "saved_dir": [462, 489, 549], "saved_model": [133, 385, 481, 547], "saved_model_sess": [243, 390], "saved_model_tag": [243, 390], "saved_path": 398, "saved_result": [408, 415, 431, 441, 477, 478, 540, 549], "savedmodel": [235, 243, 262, 390], "scalabl": [474, 481, 488, 494, 534, 536, 539, 545, 546, 548], "scalar": [198, 199, 235, 262, 449, 537, 546, 554], "scale": [3, 30, 31, 85, 126, 128, 142, 145, 149, 150, 221, 225, 286, 292, 293, 294, 297, 298, 355, 380, 413, 422, 425, 426, 429, 433, 471, 472, 473, 475, 476, 477, 481, 488, 541, 546, 549, 551, 552, 553, 554], "scale_bit": 404, "scale_c": [128, 150, 380], "scale_dtyp": [418, 429, 439, 452, 462, 477, 549], "scale_format": 439, "scale_info": 466, "scale_method": [439, 472], "scale_nam": 30, "scale_param": 439, "scale_propag": [80, 351], "scale_quant_group_s": [439, 477], "scale_shar": [413, 439], "scale_valu": 30, "scalepropagationtransform": [85, 355], "scaler": 285, "scales_per_op": [126, 286, 303], "scan": [491, 494], "scenario": [477, 478, 488, 495, 520, 538, 544, 549], "scene": 544, "schedul": [170, 180, 181, 182, 187, 189, 195, 477, 538, 543], "schema": [89, 433], "scheme": [29, 30, 31, 142, 145, 413, 417, 429, 433, 495, 496, 497, 523, 530, 549, 554], "scienc": 534, "scipi": 266, "scope": [133, 385, 478, 491, 496, 538], "score": [169, 175, 180, 184, 191, 195, 227, 228, 230, 231, 232, 234, 269, 455, 492, 537, 542, 544, 551, 554], "script": [154, 173, 227, 228, 231, 232, 489, 491, 494, 526, 533, 545], "seamless": 489, "seamlessli": [473, 541, 544, 554], "search": [133, 173, 187, 195, 198, 199, 227, 266, 406, 420, 433, 466, 470, 477, 478, 480, 494, 522, 533, 537, 544, 545, 549, 552, 554], "search_algorithm": 195, "search_clip": 433, "search_pattern": 406, "search_spac": 195, "searcher": [173, 195], "searching_result": 173, "sec": [483, 555], "second": [49, 140, 174, 195, 209, 267, 321, 396, 466, 477, 484, 488, 489, 538, 549, 552, 554], "section": [281, 497, 522, 525, 544, 549], "secur": [138, 397, 494, 545, 556], "sed": 550, "see": [138, 225, 227, 397, 472, 477, 488, 490, 491, 492, 493, 522, 535, 538, 549, 552, 554], "seed": [133, 161, 195, 391, 418, 439, 466, 477, 538], "seek": [140, 396, 549], "seem": 522, "seen": [466, 495], "segment": [209, 228, 230, 491], "segment_id": [225, 538], "select": [189, 192, 195, 209, 433, 437, 443, 448, 462, 472, 477, 488, 492, 521, 533, 534, 544, 546, 549, 554], "self": [48, 145, 173, 195, 245, 320, 433, 478, 495, 496, 523, 533, 537, 551, 554], "selfknowledg": 163, "selfknowledgedistillationloss": 163, "selfknowledgedistillationlossconfig": [195, 538], "selfmhasearch": 173, "semant": [495, 530], "send": [491, 496], "senior": 521, "sensit": [169, 533, 544], "sensitivitybalanc": 555, "sentenc": 522, "sep": [494, 545], "separ": [298, 418, 490, 522, 532, 534, 535, 543], "separable_conv2d": 295, "separableconv2d": 298, "seq_len": 452, "seq_length": 209, "seqlen": [418, 439, 448, 477], "seqtyp": 90, "sequenc": [48, 90, 174, 177, 178, 195, 209, 224, 225, 228, 232, 320, 418, 448, 477, 488, 495, 530, 544, 549, 553], "sequencediagram": [496, 497], "sequenti": [153, 207, 263, 265, 387, 391, 420, 482, 554], "sequentialsampl": [153, 207, 387], "seri": [221, 494, 534, 538, 545, 553], "serial": [138, 140, 396, 397, 431], "serv": [195, 243, 390, 488], "server": [161, 484], "servic": [494, 545], "sess": [133, 243, 385, 390], "session": [31, 133, 208, 243, 385, 390, 538], "sessionopt": 257, "set": [1, 30, 36, 81, 90, 100, 133, 140, 145, 151, 152, 153, 154, 161, 165, 192, 195, 198, 199, 200, 209, 211, 214, 225, 230, 234, 235, 243, 261, 262, 281, 288, 302, 308, 352, 385, 387, 389, 390, 396, 406, 409, 413, 431, 433, 438, 439, 441, 448, 449, 466, 472, 474, 475, 477, 478, 479, 480, 482, 483, 484, 488, 489, 490, 494, 496, 520, 523, 526, 528, 529, 530, 532, 533, 537, 538, 539, 544, 546, 549, 551, 552, 554], "set_all_env_var": 151, "set_cores_for_inst": 154, "set_eager_execut": 261, "set_env_var": 151, "set_epoch": 526, "set_loc": [471, 475, 476, 477, 478, 479], "set_modul": [145, 413, 433, 448], "set_nam": 90, "set_random_se": [161, 195, 466], "set_resume_from": [161, 195, 466], "set_tensor": 495, "set_tensorboard": [161, 195, 466], "set_workspac": [161, 195, 466], "settings_recommend": 522, "setup": [494, 530, 534, 544], "sever": [124, 190, 225, 379, 473, 474, 484, 488, 525, 533, 538, 539, 541, 544, 547, 552, 553, 554], "sex": [466, 490], "sexual": 490, "sf": 529, "sgd": [165, 195, 538], "shaji": 477, "shape": [30, 31, 90, 133, 175, 212, 213, 221, 225, 230, 385, 387, 420, 426, 435, 477, 481, 488, 530, 538, 549, 552, 553], "shape_overrid": 127, "shard": [141, 398, 431], "share": [30, 31, 67, 83, 94, 126, 286, 339, 354, 359, 433, 477, 488, 529, 530, 549, 552, 554], "share_qdq_y_pattern": [91, 356], "shared_criterion": [413, 439, 552], "shareqdqforitexypatternoptim": [94, 359], "shell": [154, 494], "shen": [535, 544], "shift": [128, 150, 221, 380, 553], "shop": 522, "short": 554, "shortcut": 209, "shorter": [209, 225, 553], "shot": [169, 187, 190, 477, 481, 533, 538, 544, 545, 549], "should": [40, 100, 101, 140, 162, 171, 173, 184, 195, 198, 199, 207, 209, 211, 225, 228, 234, 235, 262, 281, 301, 305, 312, 396, 413, 427, 431, 441, 449, 459, 466, 476, 478, 479, 480, 481, 483, 488, 489, 492, 495, 496, 497, 526, 537, 538, 542, 544, 546, 549, 553, 554], "show": [180, 466, 473, 488, 490, 526, 538, 541, 552, 554], "show_memory_info": 466, "show_nam": [131, 383], "show_op": [131, 383], "shown": [472, 473, 475, 488, 523, 537, 538, 541, 542, 543, 544, 547, 552, 554], "shrink": 544, "shuffl": [200, 202, 204, 387, 523, 538, 546], "shufflenet": 555, "side": [225, 494, 553], "sigmoid": 528, "sign": [280, 439, 477, 488, 491, 494, 497, 521, 545, 546, 549, 551], "signatur": [243, 390], "signed_flag": 280, "signifi": 90, "signific": [481, 488, 544, 545, 547, 552], "significantli": [474, 477, 538, 539, 544, 554], "signround": 477, "sigopt": [197, 545], "sigopt_api_token": [538, 551, 554], "sigopt_experiment_id": 551, "sigopt_experiment_nam": [538, 551, 554], "sigopt_project_id": [538, 551, 554], "sigopttunestrategi": 198, "silicon": [473, 541], "similar": [184, 488, 538, 552, 554], "similarli": [488, 552], "simpl": [156, 225, 472, 481, 488, 538, 544, 551, 552, 554], "simple_attr": 156, "simple_infer": [145, 417], "simple_progress_bar": 30, "simplest": 521, "simpli": [526, 540], "simplic": [488, 552], "simplifi": [544, 545], "simul": [168, 478, 497], "simultan": [184, 543], "sinc": [195, 227, 472, 477, 488, 528, 537, 543, 549], "since_vers": 89, "singl": [169, 190, 195, 203, 209, 225, 230, 234, 262, 305, 387, 406, 413, 477, 492, 494, 534, 544, 547, 552], "single_output": 234, "singleton": [95, 161, 360, 391, 466], "site": 550, "situat": [195, 478, 526, 544], "sixteen": [474, 539], "size": [83, 153, 177, 178, 202, 203, 208, 211, 217, 221, 225, 243, 245, 354, 387, 390, 418, 426, 431, 433, 466, 473, 476, 477, 488, 490, 496, 523, 526, 529, 538, 541, 542, 544, 546, 547, 549, 552, 553, 555], "skip": [140, 396, 489, 554], "skip_convers": 88, "skip_first": 277, "skip_special_token": 489, "skip_verified_config": 153, "skylak": 534, "slave": [280, 554], "slice": 90, "slim": [171, 172, 173, 230, 241, 243, 390, 391, 540, 544], "slim_sess": [243, 390], "slower": [477, 549], "small": [3, 52, 53, 54, 55, 269, 324, 325, 326, 327, 477, 488, 544, 549, 554, 555], "smaller": [55, 225, 327, 473, 538, 541, 544, 553], "smbo": 554, "smooth": [3, 125, 126, 133, 149, 195, 283, 284, 285, 286, 301, 303, 412, 413, 437, 439, 478, 494, 522, 531, 544, 554], "smooth_distribut": 3, "smooth_quant": [4, 139, 145, 148, 195, 394, 417, 522, 546, 552], "smooth_quant_arg": [195, 546, 552], "smooth_quant_calibr": 96, "smooth_quant_config": 301, "smooth_quant_en": [145, 417], "smooth_quant_entri": [301, 437, 522], "smooth_quant_scal": 96, "smoother": 282, "smoothquant": [29, 145, 149, 284, 410, 412, 413, 439, 470, 475, 477, 478, 480, 488, 536, 545, 549], "smoothquant_scale_info": [145, 413], "smoothquantcalibr": [125, 283], "smoothquantcalibrationllm": [125, 283], "smoothquantconfig": [284, 301, 303, 437, 439, 475, 480, 481], "smoothquantquant": 412, "smoothquantsampl": 277, "smoothquantscal": [126, 286], "smoothquantscalerllm": [126, 286], "smt": 227, "snapshot": 465, "snip": [169, 195, 533, 544, 555], "snip_momentum": [169, 195, 538, 544], "snip_momentum_progress": 195, "snip_progress": 195, "snipcriterion": 169, "snipmomentumcriterion": 169, "snippet": [522, 533], "so": [31, 90, 153, 225, 280, 442, 470, 472, 473, 477, 478, 481, 488, 489, 492, 494, 495, 529, 533, 539, 541, 546, 548, 549, 552, 553], "social": [490, 545], "socio": 490, "socket": [151, 154, 466, 555], "soft": 195, "softwar": [494, 535, 544, 545, 550], "solut": [128, 150, 380, 475, 477, 480, 488, 494, 495, 496, 529, 545, 549, 550, 552, 554], "solv": [488, 491, 546, 550], "some": [171, 173, 195, 209, 477, 478, 488, 494, 496, 497, 522, 528, 531, 534, 537, 538, 542, 544, 546, 548, 549, 552, 554], "someth": [198, 199, 211, 262, 449], "sometim": [90, 488, 492, 546], "somewhat": 496, "soon": 536, "sort": [195, 271, 477, 549, 554], "sota": 545, "sound": 534, "sourc": [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 81, 82, 83, 84, 85, 87, 88, 89, 90, 92, 93, 94, 95, 98, 100, 101, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 131, 132, 133, 134, 135, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 149, 150, 151, 152, 153, 154, 156, 157, 159, 160, 161, 162, 163, 165, 166, 169, 170, 171, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 194, 195, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 221, 223, 224, 225, 226, 227, 228, 230, 231, 232, 234, 235, 236, 238, 239, 240, 241, 242, 243, 244, 245, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 277, 278, 279, 280, 281, 283, 284, 286, 288, 289, 292, 293, 294, 297, 298, 299, 301, 302, 303, 305, 306, 307, 308, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 345, 346, 347, 348, 349, 350, 352, 353, 354, 355, 357, 358, 359, 360, 363, 364, 365, 366, 367, 368, 369, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 383, 384, 385, 387, 389, 390, 391, 392, 396, 397, 398, 399, 401, 403, 404, 405, 406, 408, 409, 411, 412, 413, 415, 416, 417, 418, 419, 420, 421, 422, 423, 425, 426, 427, 429, 430, 431, 432, 433, 435, 437, 438, 439, 441, 442, 443, 444, 445, 446, 448, 449, 452, 453, 455, 457, 458, 459, 461, 462, 463, 464, 465, 466, 467, 468, 492, 494, 535, 538, 545], "space": [151, 181, 182, 187, 195, 198, 199, 232, 235, 262, 266, 271, 278, 478, 482, 488, 490, 491, 495, 496, 497, 522, 544, 546, 552], "spacetobatchnd": [43, 315], "spanbert": 555, "spars": [171, 185, 545], "sparse_dummy_v2": 213, "sparse_gpt": 544, "sparse_ratio": 213, "sparsecategoricalcrossentropyloss": 163, "sparsedummydataset": 213, "sparsegpt": [188, 544], "sparsegptprun": 188, "sparsiti": [175, 177, 178, 180, 181, 182, 185, 187, 190, 192, 195, 466, 470, 477, 494, 533, 538, 555], "sparsity_decay_typ": [195, 538, 544], "speak": [477, 488, 549], "special": [101, 140, 173, 245, 396, 478, 492, 523, 537, 538, 542, 544, 549], "specif": [29, 89, 101, 151, 152, 154, 165, 192, 195, 209, 210, 211, 212, 216, 218, 234, 262, 387, 389, 409, 413, 431, 448, 473, 474, 476, 477, 478, 481, 484, 490, 495, 496, 521, 522, 523, 530, 532, 536, 537, 538, 539, 540, 541, 542, 544, 549, 554], "specifi": [95, 140, 145, 151, 156, 192, 195, 198, 199, 225, 230, 234, 235, 262, 281, 360, 396, 404, 408, 413, 417, 418, 439, 441, 448, 449, 452, 465, 466, 478, 484, 488, 489, 496, 497, 526, 530, 537, 538, 542, 544, 549, 553, 554], "speed": [472, 488, 521, 538, 544, 545, 546, 554], "speedup": [488, 527, 533, 546], "spellcheck": 491, "spent": 554, "spevif": 433, "spiq": [29, 149, 413, 488, 552], "split": [16, 30, 67, 224, 225, 339, 418, 488, 492, 547, 549, 552, 553], "split_shared_bia": 30, "split_shared_input": [61, 333], "splitoper": 26, "splitsharedinputoptim": [67, 339], "spot": [475, 552], "spr": [32, 289], "sprase": 171, "sq": [284, 412, 481, 536, 552], "sq_config": 480, "sq_weight_tensor": 133, "sq_weights_nod": 133, "sqlalchemi": 529, "sqlinearwrapp": 413, "sqrt": [54, 326], "squad": [209, 225, 231, 232, 234, 537, 544, 553, 555], "squadexampl": 225, "squadf1": [234, 537], "squadv1": [225, 553], "squar": [234, 271, 472, 477, 537, 554], "squeez": [63, 335, 492], "squeezenet": 555, "squeezer": 174, "squential": 207, "src": 90, "ssd": [492, 528, 555], "ssd_mobilenet_v1": 538, "ssh": 526, "sst": [544, 555], "st": [209, 537], "stabil": [477, 549], "stabilityai": [475, 552], "stabl": [494, 544, 545], "stablelm": [475, 552], "stack": [420, 494], "stage": [152, 161, 263, 265, 272, 544, 554], "stai": [475, 477, 478, 489, 549, 552], "stand": [544, 554], "stand_norm": [212, 213, 387], "standard": [211, 225, 491, 496, 522, 528, 537, 553, 554], "star": 491, "start": [154, 195, 225, 230, 267, 470, 534, 545, 553, 554, 556], "start_epoch": 538, "start_posit": 225, "start_step": [180, 195, 538, 544], "stat": [154, 448], "state": [135, 174, 477, 484, 497, 544], "state_dict": [135, 398, 477, 526, 549], "statement": 522, "static": [28, 29, 32, 149, 173, 195, 262, 278, 287, 288, 289, 299, 301, 303, 413, 416, 417, 437, 439, 442, 459, 478, 494, 495, 496, 497, 528, 531, 533, 538, 547, 552, 554, 555], "static_config": 480, "static_graph": 173, "static_group": [439, 452, 477, 549], "static_qu": [282, 394, 479, 522], "static_quant_entri": [301, 437], "static_quant_export": 459, "staticmethod": [124, 379], "staticqu": [414, 416], "staticquantconfig": [288, 289, 299, 303, 391, 437, 439, 476, 479, 480, 481, 482], "staticquantquant": 416, "statist": [161, 466, 467, 472], "statu": [173, 478, 483, 490, 523, 541, 542, 544], "std": [221, 225, 538, 553], "std_valu": 221, "stderr": [391, 466], "step": [169, 179, 180, 181, 182, 186, 187, 190, 195, 449, 471, 475, 476, 477, 488, 489, 496, 497, 525, 526, 538, 543, 544, 548, 549, 551, 552, 554], "step1": 278, "step2": 278, "step3": 278, "step4": 278, "step5": 278, "step_siz": 552, "still": [478, 480, 488, 521, 538, 545, 546, 548], "stock": [32, 289, 552], "stop": [153, 195, 448, 470, 482, 538, 554], "stopgradi": [65, 337], "storag": [140, 396, 477, 488, 537, 549, 552], "store": [125, 169, 170, 173, 180, 181, 182, 183, 184, 185, 186, 187, 189, 191, 195, 211, 283, 408, 413, 532], "str": [1, 29, 30, 31, 125, 135, 140, 141, 143, 144, 145, 146, 152, 154, 156, 160, 161, 163, 165, 170, 173, 194, 195, 209, 210, 211, 218, 221, 225, 227, 228, 230, 232, 234, 251, 256, 258, 260, 261, 277, 280, 281, 283, 299, 302, 303, 305, 391, 396, 398, 399, 404, 406, 408, 409, 413, 415, 417, 418, 425, 427, 431, 433, 435, 437, 439, 441, 442, 443, 446, 448, 452, 455, 458, 459, 462, 466, 467, 477, 478, 481, 522, 523, 537, 553], "str2arrai": 466, "str_label": 537, "straightforward": [477, 481, 488, 544, 549, 552], "strategi": [1, 145, 187, 195, 196, 211, 226, 470, 482, 488, 494, 495, 496, 497, 501, 521, 522, 523, 526, 533, 538, 545, 546, 548, 549, 550], "strategy_kwarg": [195, 551, 554], "strategy_registri": [274, 554], "stream": [391, 466], "streamlin": [534, 545], "stretch": 472, "strftime": 195, "strict": 452, "stride": [90, 225, 292, 294, 297, 298, 553], "string": [30, 101, 133, 138, 140, 151, 161, 163, 165, 173, 175, 180, 188, 189, 190, 192, 195, 211, 227, 230, 239, 243, 385, 390, 391, 396, 397, 420, 448, 452, 455, 466, 472, 481, 537, 553], "strip": [52, 53, 55, 68, 69, 133, 324, 325, 327, 340, 341, 385], "strip_equivalent_nod": [61, 133, 333, 385], "strip_unused_lib": [133, 385], "strip_unused_nod": [61, 133, 333, 385], "stripequivalentnodesoptim": [68, 340], "stripunusednodesoptim": [69, 341], "stroke": 554, "structur": [174, 185, 186, 195, 279, 420, 466, 470, 478, 488, 533, 544, 545, 546, 555], "stsb": 555, "student": [162, 166, 195, 209, 525, 555], "student1_layer_name1": 195, "student1_layer_name2": 195, "student2_layer_name1": 195, "student2_layer_name2": 195, "student_layer_nam": 195, "student_layer_output_process": 195, "student_loss": 525, "student_model": [163, 195, 538], "student_output": 525, "style": [203, 216, 281, 522, 532], "style_fold": 216, "style_transfer_dataset": 215, "styletransferdataset": 216, "sub": [51, 145, 151, 173, 323, 417, 522, 554], "sub_class": [234, 262], "sub_modul": 522, "subclass": [3, 21, 146, 165, 175, 180, 188, 190, 204, 208, 211, 218, 225, 234, 245, 274, 413, 495], "subfold": 211, "subgraph": [40, 51, 90, 144, 312, 323, 547, 554], "subgraphstyl": 554, "subject": 535, "submit": [491, 545], "subpixel": 221, "subprocess": [154, 522, 523], "subsect": [488, 552], "subsequ": [184, 496], "subset": [211, 214], "subsidiari": 535, "substanti": 547, "substitut": [301, 305, 481], "success": 534, "successfulli": [492, 494, 526, 545], "successor": [76, 77, 78, 79, 347, 348, 349, 350], "sudo": 529, "suffici": 551, "suffix": [133, 385], "suggest": [478, 492, 522, 551], "suit": 534, "suitabl": 478, "sum": [195, 234], "summar": [131, 383, 522], "summari": [151, 154, 449, 555], "summary_benchmark": 151, "summary_latency_throughput": 154, "super": 232, "supplement": 481, "suppli": 1, "support": [1, 29, 30, 31, 82, 89, 149, 152, 163, 165, 170, 176, 183, 187, 195, 198, 199, 208, 209, 211, 218, 221, 225, 226, 234, 235, 239, 245, 262, 272, 278, 281, 353, 413, 437, 448, 449, 455, 461, 462, 466, 468, 470, 471, 476, 479, 482, 488, 494, 496, 522, 531, 536, 538, 545, 548, 551, 554], "supported_lay": [145, 420, 433], "supported_op_typ": [48, 320], "supported_typ": 448, "suppos": [488, 552], "suppress": [477, 488, 549, 552], "sure": [175, 180, 188, 190, 494, 520], "surg": [140, 396], "surrog": 554, "suyu": 535, "sweet": [475, 552], "switch": [70, 342], "switch_optim": [61, 333], "switchoptim": [70, 342], "sy": [391, 452, 466, 522, 555], "sym": [29, 30, 31, 409, 413, 433, 452, 496, 497, 530, 546, 549], "sym_full_rang": 549, "sym_model": 1, "symbol": [1, 30, 144, 145, 227, 235, 262, 492, 540], "symbol_r": 227, "symbolblock": 1, "symbolic_trac": 136, "symmetr": [98, 409, 413, 462, 481, 497, 546, 549, 553, 554], "symnet": 1, "synchron": [446, 554], "syntax": 530, "sys_nic": 494, "system": [135, 140, 151, 154, 396, 466, 473, 474, 484, 489, 520, 541, 544, 555], "systemat": [475, 480, 552], "szymon": 521, "t": [30, 52, 53, 55, 128, 140, 150, 175, 195, 225, 232, 324, 325, 327, 380, 396, 474, 475, 476, 481, 483, 492, 495, 522, 526, 538, 539, 544, 546, 549, 554], "t10k": 211, "t5": 544, "t5norm": 552, "tab": 232, "tabl": [173, 184, 466, 473, 494, 497, 528, 531, 541, 555], "table_entri": 466, "taco": [494, 545], "tag": [140, 243, 390, 396, 488, 552], "tail": 477, "tailor": [477, 484], "take": [3, 175, 195, 198, 199, 203, 225, 234, 235, 262, 266, 387, 449, 474, 477, 481, 484, 490, 492, 494, 523, 530, 538, 544, 546, 548, 549, 553, 554], "taken": [198, 199, 235, 262, 449, 546], "tamper": [140, 396], "tar": 211, "target": [1, 29, 30, 87, 145, 173, 190, 192, 195, 266, 288, 406, 420, 448, 462, 472, 478, 526, 538, 544, 553, 554], "target_boxes_num": 537, "target_depth": [145, 417], "target_dtyp": [277, 406], "target_height": [225, 553], "target_lay": 173, "target_linear": 174, "target_op_lut": 173, "target_spars": [175, 195, 538, 544], "target_sparsity_ratio": 180, "target_width": [225, 553], "targetspac": 266, "task": [128, 209, 216, 234, 380, 472, 475, 477, 484, 488, 489, 525, 528, 533, 537, 538, 544, 547, 549, 552], "taskdataset": 555, "taskset": [484, 489], "tbb": 529, "td": 554, "teacher": [162, 195, 525, 538, 555], "teacher_layer_nam": 195, "teacher_layer_name1": 195, "teacher_layer_name2": 195, "teacher_layer_output_process": 195, "teacher_logit": 538, "teacher_model": [163, 195, 525, 538], "team": [159, 232, 490, 548], "technic": 494, "techniqu": [226, 468, 470, 476, 481, 482, 488, 494, 531, 533, 538, 543, 544, 545, 546, 555], "technologi": [211, 473, 536, 538, 541], "tell": [140, 234, 262, 396], "temp_path": [125, 283], "temperatur": [163, 195, 489, 538], "templat": [188, 538, 544], "template_config": 192, "temporari": [125, 145, 283, 413, 466, 490], "temporarili": 490, "ten": 484, "tencent": [494, 545], "tend": 544, "tensor": [1, 2, 3, 30, 31, 55, 90, 125, 133, 135, 140, 141, 145, 169, 170, 179, 180, 181, 182, 183, 184, 185, 186, 187, 189, 191, 195, 225, 243, 271, 283, 327, 385, 390, 391, 396, 398, 404, 406, 408, 411, 412, 413, 417, 422, 423, 425, 426, 433, 438, 442, 457, 459, 462, 465, 466, 471, 472, 473, 478, 481, 492, 497, 522, 523, 541, 544, 546, 550, 553, 554], "tensor2tensor": 228, "tensor_data": [74, 346, 391, 466, 467], "tensor_dict": 495, "tensor_dtyp": 90, "tensor_nam": [30, 133, 140, 141, 385, 396, 398], "tensor_to_nod": 1, "tensor_v": 133, "tensor_valu": 30, "tensorboard": [161, 195, 455, 466, 495, 538], "tensorcollector": 1, "tensordataset": 209, "tensorflow": [37, 39, 60, 61, 62, 72, 80, 86, 87, 88, 89, 90, 91, 95, 96, 97, 99, 102, 115, 125, 126, 127, 130, 133, 163, 165, 191, 195, 208, 209, 210, 211, 214, 216, 218, 225, 226, 228, 230, 234, 235, 243, 256, 257, 260, 261, 262, 272, 458, 459, 466, 480, 491, 495, 496, 502, 521, 522, 523, 525, 527, 530, 531, 533, 538, 539, 540, 541, 544, 545, 551, 552, 554], "tensorflow1": 208, "tensorflow_addon": 165, "tensorflow_dataload": 260, "tensorflow_itex": [211, 214, 216, 218, 523], "tensorflow_itexadaptor": [32, 289], "tensorflow_model": [237, 260], "tensorflowadam": 165, "tensorflowadamw": 165, "tensorflowadaptor": [32, 289], "tensorflowbasemodel": [243, 260, 390], "tensorflowbertdataload": 208, "tensorflowbertdataset": 209, "tensorflowcheckpointmodel": [243, 390], "tensorflowcifar10": 211, "tensorflowcifar100": 211, "tensorflowcocomap": 234, "tensorflowconfig": 289, "tensorflowconfigconvert": 289, "tensorflowcriterion": 163, "tensorflowcroptoboundingbox": 225, "tensorflowcrossentropyloss": 163, "tensorflowdataload": [208, 260], "tensorflowdataset": 211, "tensorflowfashionmnist": 211, "tensorflowfilt": 218, "tensorflowglobalconfig": 389, "tensorflowimagenetdataset": 214, "tensorflowimagenetraw": 214, "tensorflowimagerecord": 211, "tensorflowknowledgedistillationloss": 163, "tensorflowknowledgedistillationlossextern": 163, "tensorflowknowledgedistillationlosswrapp": 163, "tensorflowllmmodel": [243, 390], "tensorflowmap": 234, "tensorflowmetr": 234, "tensorflowmnist": 211, "tensorflowmodel": [243, 390], "tensorflowmodelzoobertdataload": 208, "tensorflowmodelzoobertdataset": [209, 210], "tensorflowoptim": 165, "tensorflowparserfactori": 252, "tensorflowprofilingpars": 253, "tensorflowqatmodel": 243, "tensorflowqdqtoonnxqdqconvert": 127, "tensorflowqueri": [32, 289, 495], "tensorflowrandomhorizontalflip": 225, "tensorflowrandomverticalflip": 225, "tensorflowresizecropimagenettransform": 221, "tensorflowresizewithratio": 225, "tensorflowsavedmodelmodel": [243, 390], "tensorflowsgd": 165, "tensorflowshiftrescal": 221, "tensorflowsparsecategoricalcrossentropi": 163, "tensorflowtfrecorddataset": 211, "tensorflowtopk": 234, "tensorflowtransform": 225, "tensorflowtranspos": 225, "tensorflowtransposelastchannel": 221, "tensorflowvocmap": 234, "tensorflowvocrecord": 211, "tensorflowwrapfunct": 225, "tensorproto": [30, 90], "tensorrt": [30, 521, 539, 546], "tensorrtexecutionprovid": [539, 546], "tensors_kl": 1, "tensors_minmax": 1, "teq": [428, 437, 439, 478, 489, 494, 545, 547, 549], "teq_arg": 477, "teq_quantize_entri": 437, "teqconfig": [437, 439, 452, 477, 489], "teqlinearfakequ": [142, 429], "tequant": 432, "term": [162, 181, 182, 187, 189, 478, 481, 491, 521, 535, 537, 542, 543, 554], "test": [151, 195, 225, 448, 491, 494, 555], "test_func": 526, "text": [90, 188, 224, 225, 227, 232, 473, 475, 477, 488, 528, 535, 541, 544, 545, 549, 552, 553, 555], "tf": [90, 101, 126, 133, 173, 192, 208, 209, 210, 211, 225, 241, 243, 286, 303, 305, 361, 385, 387, 389, 390, 391, 479, 481, 494, 522, 526, 534, 540, 545, 548, 553], "tf1": [208, 540], "tf2": 540, "tf2onnx": [195, 456], "tf2onnx_convert": 96, "tf2onnx_util": 86, "tf2onnxconfig": [195, 528], "tf_criteria": 170, "tf_modul": 261, "tf_to_fp32_onnx": 458, "tf_to_int8_onnx": 458, "tfdatadataload": 208, "tfmodelzoocollecttransform": 225, "tfrecord": [209, 210, 211], "tfslimnetsfactori": [241, 391], "tfsquadv1modelzooposttransform": 225, "tfsquadv1posttransform": 225, "th": [74, 75, 346], "thalaiyasingam": 544, "than": [40, 133, 209, 225, 312, 391, 418, 443, 466, 473, 477, 488, 494, 520, 532, 541, 542, 543, 544, 546, 549, 551, 553, 554], "theblok": 494, "thei": [140, 281, 396, 474, 490, 522, 530, 538, 542], "them": [140, 173, 209, 396, 477, 478, 488, 492, 497, 523, 528, 530, 532, 534, 543, 544, 549, 554], "themselv": 546, "theoret": [477, 488, 546, 549], "therefor": [483, 488, 492, 538, 544, 546, 547, 550, 552], "thi": [29, 71, 95, 117, 124, 128, 132, 135, 138, 140, 145, 150, 152, 153, 154, 162, 166, 171, 173, 175, 177, 178, 179, 180, 184, 185, 186, 187, 188, 189, 190, 195, 198, 199, 208, 209, 210, 211, 212, 213, 214, 216, 225, 227, 230, 231, 232, 234, 235, 245, 262, 266, 271, 280, 281, 343, 360, 372, 379, 380, 384, 387, 396, 397, 413, 417, 442, 448, 449, 465, 466, 470, 472, 474, 475, 476, 477, 479, 480, 481, 482, 488, 489, 490, 491, 492, 494, 495, 496, 497, 521, 522, 523, 525, 526, 528, 529, 530, 533, 534, 535, 536, 537, 538, 539, 540, 544, 546, 548, 549, 550, 551, 552, 553, 554], "think": [477, 549], "third": [230, 491, 535], "those": [59, 140, 271, 331, 396, 466, 476, 481, 488, 525, 532, 538, 539, 543, 544, 546, 554], "though": [488, 552], "thread": [151, 195, 257, 261, 489, 523, 529], "threaten": 490, "three": [59, 192, 209, 263, 265, 331, 474, 478, 482, 488, 492, 495, 496, 521, 534, 539, 543, 546, 548, 551, 552], "threshold": [1, 147, 453, 461, 537], "through": [135, 198, 199, 234, 235, 262, 476, 477, 478, 488, 494, 520, 530, 532, 533, 537, 542, 543, 544, 546, 549], "throughput_pattern": 483, "throw": 523, "thu": [230, 488, 538, 544, 546], "thudm": 536, "tian": 535, "tiiuae": [475, 536, 552], "tile": 522, "till": [161, 466, 554], "tim": [477, 488, 549], "time": [133, 140, 161, 195, 225, 234, 245, 266, 385, 391, 396, 466, 476, 477, 478, 480, 482, 484, 488, 489, 494, 496, 523, 526, 529, 538, 542, 544, 545, 546, 549, 551, 552, 553, 554], "time_limit": 466, "timeout": [195, 538, 554], "tinybert": 555, "tip": 484, "titl": [466, 535, 554], "tloss": 526, "tmp_file_path": [391, 466], "to_devic": 448, "to_dtyp": 448, "to_numpi": 30, "toarrai": [225, 553], "todo": [156, 209, 212, 230, 278, 387, 420, 443], "togeth": [152, 195, 225, 534, 545, 553], "togethercomput": [475, 552], "token": [209, 222, 225, 227, 228, 418, 431, 452, 462, 475, 477, 488, 489, 537, 538, 549, 551, 552, 553, 554], "token_is_max_context": 225, "token_to_orig_map": 225, "token_type_id": [209, 538], "tokenzi": 224, "toler": [153, 481], "tolerable_loss": [153, 195, 482, 538, 554], "tolist": 552, "tondarrai": 553, "tondarraytransform": 225, "tool": [3, 133, 385, 458, 466, 488, 494, 495, 538, 544, 545, 546], "toolchain": [529, 545], "toolkit": [470, 492, 534, 545], "top": [225, 234, 491, 537, 553], "top1": [449, 537, 544, 546, 555], "topilimag": 553, "topk": [234, 262, 526, 537, 538, 546], "torch": [134, 135, 136, 137, 140, 141, 142, 143, 144, 145, 149, 153, 160, 170, 173, 174, 176, 184, 195, 226, 235, 262, 459, 462, 465, 471, 472, 473, 474, 475, 476, 477, 482, 484, 488, 489, 492, 497, 498, 526, 528, 529, 531, 538, 539, 540, 541, 546, 549, 552], "torch2onnx": [195, 456], "torch2onnxconfig": [195, 528], "torch_dtyp": [494, 497], "torch_load": 137, "torch_model": 237, "torch_to_fp32_onnx": 459, "torch_to_int8_onnx": 459, "torch_util": [439, 547, 552], "torchbaseconfig": 439, "torchdynamo": 478, "torchfunctyp": 406, "torchimport": 478, "torchscript": [441, 528, 547, 552], "torchsmoothqu": [412, 413, 552], "torchvis": [225, 472, 494, 527], "toronto": 211, "torr": 544, "total": [30, 180, 225, 234, 418, 477, 544, 549, 553, 555], "total_block_arg": 145, "total_block_kwarg": 145, "total_execution_tim": 251, "total_valu": [145, 433], "totensor": [538, 553], "tow": 526, "toward": 490, "tpe": 197, "tpetunestrategi": 199, "tqdm": [30, 538], "trace": [135, 144, 145, 173, 195, 269, 408, 412, 413, 417, 438, 442, 459, 476, 478, 492, 554], "trace_and_fuse_sub_graph": 144, "trace_gptq_target_block": 420, "traceabl": 492, "traced_model": [144, 413], "tracer": 492, "track": [161, 551], "trackabl": [133, 385], "trade": [477, 488, 549], "tradit": [481, 544], "train": [29, 42, 65, 149, 162, 181, 182, 185, 187, 195, 198, 199, 209, 211, 225, 226, 262, 314, 337, 413, 420, 439, 455, 473, 474, 475, 476, 477, 478, 480, 482, 494, 495, 496, 501, 525, 528, 531, 533, 539, 541, 543, 545, 547, 549, 551, 552, 554], "train_cfg": 455, "train_dataload": [538, 544, 546], "train_dataset": [526, 546], "train_fn": 477, "train_fun": 538, "train_func": [449, 526, 538, 546], "train_kwarg": 526, "train_load": 526, "train_loop": [449, 538, 543], "train_sampl": 526, "trainabl": [432, 477, 494, 545, 549], "trainableequivalenttransform": 432, "trainer": 538, "training_arg": 538, "training_func": 538, "training_func_for_nc": 525, "trane": 478, "transact": 552, "transfer": [162, 216, 488, 525, 538, 552], "transform": [30, 35, 128, 130, 131, 132, 143, 187, 209, 210, 211, 212, 213, 214, 216, 218, 220, 307, 380, 382, 383, 384, 387, 406, 412, 413, 417, 420, 432, 439, 446, 448, 475, 476, 477, 480, 488, 492, 494, 521, 525, 526, 527, 531, 536, 538, 544, 545, 548, 549, 552, 554, 555], "transform_func": 225, "transform_graph": [96, 361], "transform_list": [225, 553], "transform_registri": 225, "transform_typ": 225, "transformer_block": 420, "transformerbasedmodelblockpatterndetector": [143, 417], "transformers_nam": 420, "transformers_pr": 420, "translat": [227, 228, 473, 541, 544, 554], "translation_corpu": 228, "transpos": [30, 58, 179, 221, 225, 330, 458, 549, 553], "travers": [482, 496, 497, 549, 554], "treat": [140, 396, 448, 523], "tree": [188, 278], "tri": [152, 195, 522, 554], "trial": [153, 482, 554], "trigger": [151, 154, 180, 483], "troll": 490, "trt_env_setup": 30, "true": [1, 30, 31, 70, 98, 131, 133, 140, 151, 153, 195, 207, 209, 211, 212, 224, 225, 228, 234, 245, 262, 277, 281, 292, 293, 294, 298, 299, 303, 342, 383, 385, 387, 396, 398, 406, 412, 413, 418, 420, 423, 427, 429, 431, 432, 433, 439, 442, 448, 452, 455, 459, 462, 465, 466, 471, 475, 476, 477, 478, 479, 481, 482, 483, 488, 489, 496, 523, 526, 530, 537, 538, 546, 547, 549, 552, 553, 554], "true_sequenti": [439, 452, 477, 549], "truncat": [209, 225, 418, 553], "trust": [140, 396], "trust_remote_cod": [431, 441, 489], "truth": [231, 232], "try": [207, 243, 390, 481, 488, 492, 494, 529, 544, 546, 552, 554], "try_cnt": [133, 385], "try_loading_kera": [243, 390], "tunabl": [152, 156], "tunable_typ": 156, "tune": [1, 151, 152, 153, 156, 159, 195, 198, 199, 209, 211, 235, 245, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 277, 278, 279, 280, 302, 413, 438, 439, 449, 466, 474, 475, 476, 477, 478, 482, 485, 489, 494, 495, 496, 521, 522, 523, 526, 531, 533, 535, 538, 539, 542, 544, 545, 548, 550, 551], "tune_cfg": [1, 134, 145, 412, 413, 417, 448, 495, 497, 554], "tune_config": [153, 302, 438, 474, 478, 479, 480, 481, 482, 496], "tuner": [198, 199, 235, 262, 449, 546], "tunestrategi": [274, 554], "tunestrategymeta": 274, "tuning_cfg_to_fw": 496, "tuning_config": 153, "tuning_criterion": [195, 538, 542, 551, 554], "tuning_history_path": 466, "tuning_items_prior": 277, "tuning_order_lst": 277, "tuning_param": 155, "tuning_sampl": 276, "tuning_spac": [276, 277, 279], "tuning_strategi": 195, "tuning_struct": [276, 277, 278], "tuningconfig": [153, 302, 438, 474, 478, 479, 480, 481, 482], "tuningcriterion": [195, 538, 542, 551, 554], "tuningitem": 278, "tuninglogg": [153, 159], "tuningmonitor": 153, "tuningord": 277, "tuningparam": 156, "tuningsampl": 277, "tuningspac": [277, 278], "tupl": [1, 90, 133, 145, 153, 195, 198, 199, 221, 225, 235, 262, 277, 302, 305, 399, 406, 408, 411, 412, 413, 417, 418, 425, 426, 435, 437, 438, 439, 442, 448, 449, 459, 465, 478, 481, 496, 522, 537, 538, 546, 553], "turbo": 555, "turn": [537, 547], "tutori": [470, 492, 545], "twitter": 545, "two": [133, 135, 140, 151, 174, 179, 186, 190, 211, 216, 230, 272, 396, 448, 466, 472, 473, 474, 476, 477, 480, 482, 485, 488, 491, 492, 522, 523, 526, 528, 530, 538, 539, 541, 544, 546, 548, 549, 551, 552, 554], "txt": [214, 529, 534], "type": [1, 3, 29, 30, 31, 59, 81, 90, 101, 107, 117, 125, 133, 134, 135, 140, 145, 146, 152, 153, 154, 156, 160, 161, 163, 165, 173, 174, 175, 180, 188, 189, 190, 192, 194, 195, 207, 209, 211, 218, 225, 227, 228, 230, 234, 243, 245, 266, 267, 274, 278, 280, 281, 283, 301, 305, 331, 352, 372, 385, 390, 391, 396, 398, 403, 404, 406, 408, 409, 411, 413, 417, 418, 420, 425, 426, 427, 431, 433, 435, 437, 438, 439, 442, 448, 459, 465, 466, 470, 472, 473, 474, 475, 476, 477, 478, 479, 481, 484, 495, 496, 526, 528, 530, 533, 538, 539, 541, 546, 547, 549, 551, 553, 554], "typealia": 522, "types_to_splic": [65, 337], "typic": [494, 525, 527, 544, 555], "u": [128, 150, 380], "ubuntu": [534, 555], "ubuntu22": 494, "ubyt": 211, "uint4": 497, "uint8": [30, 221, 230, 280, 409, 433, 439, 481, 488, 495, 530, 546, 553], "ultim": [488, 546], "ultra": [494, 534, 555], "unaccept": 490, "unari": 27, "unary_op": 16, "unarydirect8bitoper": 27, "unaryoper": 27, "unbalanc": 555, "uncas": [209, 225, 553, 555], "uncertain": 544, "undefin": [140, 192, 396], "under": [95, 195, 211, 360, 391, 448, 462, 465, 482, 491, 494, 535, 537, 538, 543, 544, 545, 547, 549, 554], "underli": [52, 53, 55, 140, 324, 325, 327, 396], "understand": [470, 488, 546], "understudi": 227, "unicod": 224, "unicodedecodeerror": [140, 396], "unicoderegex": 227, "unifi": [159, 392, 435, 495, 496, 497, 523, 532, 533, 537, 538, 540], "uniform": [477, 521, 549], "uniformli": [477, 549], "union": [152, 234, 435, 438, 439, 448, 449, 478, 481, 537], "uniqu": [230, 488, 546], "unique_id": 225, "unit": [175, 293, 491, 544], "unit_scal": 472, "unk": 224, "unk_token": 224, "unless": [140, 396, 530], "unlik": 481, "unnecessari": 538, "unpack": [421, 429], "unpack_weight": 429, "unpack_zp": 429, "unpackedweightonlylinearparam": 429, "unpickl": [138, 140, 396, 397], "unpicklingerror": [138, 397], "unpreced": 544, "unquant": 406, "unquantized_node_set": 406, "unsaf": [140, 396], "unseen": [481, 488, 546], "unset": 31, "unsign": [145, 280, 497, 549], "unstructur": [195, 533, 544, 555], "unsupport": [528, 538, 544], "until": [413, 472, 496, 554], "untrac": 492, "untrust": [140, 396], "unus": [69, 133, 341, 385, 448], "unwelcom": 490, "up": [1, 85, 184, 225, 355, 417, 472, 488, 494, 526, 527, 529, 530, 533, 544, 545, 546, 551, 553, 554], "up1": 530, "up2": 530, "updat": [59, 141, 145, 169, 175, 192, 234, 262, 331, 398, 413, 417, 470, 477, 529, 536, 537, 538, 544, 549, 552, 554], "update_config": 195, "update_modul": [141, 398], "update_param": 192, "update_sq_scal": [145, 413], "upgrad": 538, "upload": 533, "upon": 489, "upstream": [431, 441], "url": [211, 420, 491, 494, 534, 535, 544], "us": [1, 3, 21, 29, 30, 31, 33, 34, 55, 88, 90, 101, 126, 135, 140, 145, 151, 152, 154, 161, 163, 165, 169, 173, 175, 180, 184, 185, 188, 190, 191, 195, 198, 199, 207, 208, 209, 210, 211, 212, 213, 216, 224, 225, 227, 228, 230, 234, 245, 257, 261, 262, 266, 269, 271, 274, 278, 280, 281, 286, 288, 289, 301, 305, 306, 327, 387, 391, 396, 404, 405, 406, 408, 409, 412, 413, 416, 417, 418, 425, 426, 427, 431, 433, 438, 441, 442, 443, 448, 449, 452, 453, 454, 455, 458, 459, 460, 462, 466, 472, 473, 474, 475, 476, 477, 478, 479, 481, 482, 484, 488, 489, 490, 491, 492, 495, 496, 520, 521, 522, 525, 526, 528, 530, 531, 532, 535, 538, 539, 540, 541, 542, 544, 545, 546, 547, 548, 549, 550, 551, 553, 554, 555], "usabl": 494, "usag": [145, 152, 153, 156, 208, 230, 281, 391, 433, 443, 445, 448, 478, 479, 484, 488, 496, 526, 527, 542, 549, 553], "use_auto_clip": [439, 477], "use_auto_scal": [439, 477], "use_bf16": [33, 34, 306], "use_bia": [292, 293, 294, 298], "use_bp": 228, "use_double_qu": [439, 477], "use_full_length": 549, "use_full_rang": [439, 477], "use_layer_wis": [420, 439, 452, 477], "use_max_length": [420, 549], "use_mse_search": [439, 452, 477], "use_optimum_format": [429, 462, 549], "use_sym": [439, 477, 482], "user": [140, 151, 165, 173, 185, 192, 195, 198, 199, 211, 214, 218, 225, 234, 235, 245, 262, 267, 280, 396, 406, 413, 417, 439, 448, 449, 466, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 484, 488, 489, 492, 495, 496, 497, 501, 520, 522, 523, 525, 528, 530, 533, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 550, 552, 553, 554], "user_cfg": [413, 417], "user_config": 192, "user_eval_fns1": 153, "user_eval_fns2": 153, "user_eval_fns3": 153, "user_eval_fns4": 153, "user_metr": [234, 262], "user_model": [473, 531, 541], "user_obj_cfg": 245, "user_object": 245, "user_postprocess": 223, "user_processor_typ": 448, "userfloatmodel": [471, 476], "usr": 529, "usr_cfg": 245, "usual": [209, 481, 488, 496, 543, 544, 546, 547, 552], "utf": [140, 224, 396], "util": [0, 4, 90, 95, 96, 124, 127, 134, 135, 136, 137, 144, 152, 153, 155, 164, 170, 193, 195, 226, 228, 270, 290, 301, 302, 304, 305, 395, 402, 407, 410, 414, 418, 428, 436, 437, 439, 470, 472, 481, 484, 496, 497, 498, 521, 522, 526, 538, 544, 546, 554], "v": [544, 554], "v0": [475, 536, 552], "v1": [133, 221, 231, 232, 234, 243, 385, 390, 475, 486, 526, 528, 529, 537, 540, 544, 550, 551, 552, 555], "v14": 227, "v2": [135, 269, 475, 523, 526, 528, 529, 536, 550, 552, 554, 555], "v3": [486, 555], "v4": 555, "v5": 491, "v5s6": 555, "val": [133, 192, 211, 214, 426, 538], "val2017": 210, "val_dataload": [479, 538, 546], "val_dataset": [479, 538, 546], "val_load": 546, "val_map": 214, "valid": [40, 52, 53, 90, 159, 180, 183, 192, 195, 211, 230, 243, 292, 294, 297, 298, 312, 324, 325, 390, 409, 448, 489, 494, 495, 527, 530, 533, 535, 538, 544, 546, 549, 550], "valid_keras_format": 391, "valid_mixed_precis": 530, "valid_reshape_input": [52, 53, 324, 325], "validate_and_inference_input_output": [243, 390], "validate_graph_nod": [243, 390], "validate_modul": 448, "valu": [3, 30, 31, 41, 52, 53, 55, 74, 75, 90, 128, 133, 140, 145, 150, 151, 169, 173, 174, 184, 191, 192, 195, 198, 199, 209, 212, 213, 221, 225, 230, 234, 235, 262, 266, 271, 278, 280, 281, 313, 324, 325, 327, 346, 380, 387, 396, 398, 413, 417, 425, 429, 449, 453, 455, 466, 471, 472, 473, 474, 475, 477, 480, 481, 488, 492, 496, 497, 521, 530, 537, 538, 539, 541, 542, 544, 546, 549, 552, 553, 554], "valuabl": 544, "value_layer_nam": 184, "valueerror": [52, 53, 55, 145, 230, 281, 324, 325, 327, 433, 529], "valueinfo": 30, "values_from_const": [52, 53, 55, 324, 325, 327], "vanhouck": 521, "vanilla": [495, 496, 533], "vari": [544, 552, 555], "variabl": [30, 138, 151, 189, 195, 211, 266, 280, 397, 443, 474, 478, 484, 489, 544, 554], "varianc": [128, 150, 380], "variant": 205, "varieti": [478, 482, 527, 554], "variou": [235, 392, 473, 476, 478, 481, 497, 533, 541, 544, 547], "vault": 494, "vcvtne2ps2bf16": [474, 539], "vcvtneps2bf16": [474, 539], "vdpbf16p": [474, 539], "vecchio": 534, "vector": [448, 474], "ventura": 534, "verbos": [30, 266, 425, 459], "veri": [470, 477, 488, 538, 546, 549, 554], "verifi": [528, 536], "version": [1, 39, 89, 142, 145, 173, 195, 226, 311, 429, 446, 458, 459, 470, 475, 490, 492, 494, 495, 522, 530, 534, 535, 538, 545, 552, 554], "version1": [133, 391, 466], "version1_eq_version2": [133, 391, 466], "version1_gt_version2": [133, 391, 466], "version1_gte_version2": [133, 391, 466], "version1_lt_version2": [133, 391, 466], "version1_lte_version2": [133, 391, 466], "version2": [133, 391, 466], "vertic": [179, 225, 553], "vgg": 555, "vgg16": [528, 555], "vgg19": 555, "via": [439, 477, 488, 490, 494, 526, 533, 538, 544, 545], "view": [491, 494, 527, 550], "viewpoint": 490, "vincent": 521, "violat": [138, 397], "virtual": [280, 545], "visibl": 483, "vision": [472, 488, 494, 528, 552], "visit": 555, "visual": [195, 551, 554], "vit": 555, "vmware": 545, "vnni": [488, 527, 533, 546], "voc": [211, 234], "vocab": 224, "vocab_fil": [224, 225, 553], "vocabulari": [209, 224, 225, 553], "vocmap": 537, "vscode": 522, "vtune": 535, "w": [31, 225, 420, 475, 488, 544, 552, 553], "w8a8": [405, 471, 476, 477, 488, 547, 549], "w8a8pt2equant": 405, "w_algo": 439, "w_dq": [488, 552], "w_dtype": [439, 473, 475, 476, 541], "w_fp32": [128, 150, 380], "w_granular": 439, "w_int8": [128, 150, 380], "w_q": [488, 552], "w_scale": [488, 552], "w_sym": 439, "wa": [140, 195, 396, 473, 475, 495, 538, 541, 545, 554], "wai": [173, 210, 211, 214, 243, 390, 477, 480, 488, 497, 523, 537, 538, 543, 544, 546, 549, 552, 554], "wanda": 170, "want": [52, 53, 55, 173, 195, 207, 209, 267, 324, 325, 327, 449, 492, 495, 523, 526, 538, 542, 546, 549, 551, 554], "waq": 552, "warm": 417, "warmup": [195, 520, 538], "warn": [413, 463, 522], "wasn": [140, 396], "wasserblat": 544, "we": [29, 30, 52, 53, 55, 59, 71, 128, 140, 149, 150, 151, 153, 173, 177, 184, 195, 208, 227, 230, 269, 324, 325, 327, 331, 343, 380, 396, 413, 470, 471, 475, 476, 477, 478, 480, 481, 482, 483, 484, 488, 489, 490, 492, 494, 495, 496, 497, 522, 523, 526, 528, 530, 531, 536, 537, 538, 542, 543, 544, 546, 547, 549, 550, 551, 552, 554], "web": 494, "websit": 534, "wechat": [494, 545], "wei": [477, 488, 549, 552], "weight": [29, 30, 31, 45, 46, 107, 125, 126, 128, 133, 135, 145, 150, 153, 169, 170, 174, 177, 178, 179, 180, 181, 182, 183, 185, 186, 187, 189, 191, 195, 234, 262, 269, 278, 280, 283, 286, 317, 318, 380, 398, 413, 422, 428, 429, 431, 432, 433, 439, 441, 459, 466, 467, 470, 471, 472, 475, 476, 478, 479, 480, 481, 482, 489, 495, 496, 497, 521, 525, 530, 531, 532, 536, 538, 542, 544, 545, 546, 547, 552, 554], "weight_algorithm": 303, "weight_bit": 496, "weight_clip": [303, 413], "weight_config": [31, 420, 432], "weight_correct": [148, 195, 546], "weight_decai": 538, "weight_dict": 135, "weight_dtyp": [299, 303, 452, 479], "weight_empir": [128, 380], "weight_granular": [299, 303, 479], "weight_max_lb": 413, "weight_max_valu": [292, 293, 294, 297, 298], "weight_min_valu": [292, 293, 294, 297, 298], "weight_name_map": [125, 283], "weight_onli": [4, 195, 394, 439, 465, 473, 522, 541, 547, 549], "weight_shap": 31, "weight_slim": [172, 184], "weight_sym": [299, 303, 479, 481, 482], "weight_tensor": 466, "weight_typ": 459, "weightcorrect": 150, "weightdetail": 466, "weightonli": 31, "weightonlylinear": [429, 477, 549], "weightonlyqu": 477, "weightonlyquantsampl": 277, "weightpruningconfig": [195, 538, 543, 544], "weights_detail": 460, "weights_onli": [140, 396], "weightsdetail": 467, "weightsstatist": 467, "welcom": [469, 490, 491, 494, 556], "well": [198, 199, 235, 262, 449, 473, 488, 494, 522, 541, 544, 546, 550, 552], "wenhua": [477, 488], "were": [140, 234, 396, 544], "wget": 529, "what": [145, 180, 207, 234, 262, 417, 449, 481, 490, 545], "when": [30, 39, 49, 55, 133, 138, 140, 153, 174, 180, 185, 192, 195, 209, 210, 218, 225, 228, 262, 301, 305, 311, 321, 327, 385, 396, 397, 406, 431, 444, 449, 472, 476, 477, 481, 482, 483, 484, 488, 490, 496, 522, 523, 538, 544, 546, 549, 552, 553, 554], "where": [30, 140, 195, 230, 234, 280, 396, 408, 448, 466, 488, 497, 521, 552], "whether": [1, 30, 31, 90, 101, 133, 140, 145, 175, 195, 207, 209, 221, 225, 228, 230, 234, 305, 385, 391, 396, 409, 412, 413, 420, 425, 426, 431, 433, 442, 446, 455, 462, 465, 466, 477, 483, 495, 537, 546, 549, 552, 553], "which": [1, 29, 41, 68, 95, 135, 140, 152, 169, 173, 174, 176, 180, 181, 182, 184, 187, 188, 190, 192, 195, 209, 210, 211, 221, 227, 232, 234, 239, 245, 262, 266, 313, 340, 360, 391, 396, 413, 420, 425, 426, 431, 448, 466, 470, 472, 474, 476, 477, 478, 479, 481, 482, 488, 490, 492, 495, 496, 497, 521, 523, 526, 529, 530, 533, 537, 538, 539, 540, 543, 544, 546, 547, 548, 549, 550, 552, 553, 554], "while": [174, 185, 192, 266, 474, 476, 477, 481, 488, 494, 495, 496, 527, 533, 539, 544, 546, 549, 552], "white_list": [152, 299, 303, 439], "white_module_list": 448, "white_nod": 2, "whitespac": [224, 232], "whitespace_token": 224, "whl": [494, 534], "who": [267, 490], "whole": [471, 488, 497, 546, 555], "whose": [149, 175, 243, 390, 413, 448, 532, 552, 554], "why": [488, 552], "wide": [474, 477, 488, 494, 527, 539, 544, 546], "wideresnet40": 555, "width": [179, 221, 225, 488, 521, 526, 544, 546, 553], "wiki": 490, "wikitext": 555, "window": [151, 154, 483, 484, 520, 534], "winter": [488, 552], "wip": 536, "wise": [128, 137, 139, 141, 150, 195, 266, 267, 271, 380, 395, 398, 404, 470, 489, 496, 533, 544, 549, 552, 554], "wish": 535, "with_arg": 497, "within": [89, 152, 153, 181, 182, 187, 194, 230, 243, 257, 261, 390, 448, 474, 477, 489, 490, 492, 497, 521, 539, 544, 545, 549, 552, 554], "without": [34, 75, 133, 185, 225, 280, 385, 446, 473, 488, 490, 526, 538, 541, 544, 545, 546, 554], "wnli": [209, 537], "won": [195, 546, 549], "woq": [431, 439, 441, 473, 489, 494, 536, 541], "woq_config": 489, "woq_model": 489, "woqmodelload": 431, "word": [227, 477, 488, 522, 537, 544, 549, 555], "wordpiec": [224, 225, 553], "wordpiecetoken": 224, "work": [195, 301, 305, 478, 481, 483, 484, 488, 493, 496, 522, 523, 548, 549, 550, 552], "worker": [538, 546], "workflow": [470, 473, 494, 496, 497, 528, 531, 534, 539, 541], "workload": [466, 474, 545], "workload_loc": 466, "workshop": 545, "workspac": [161, 195, 465, 466, 540], "workspace_path": 195, "worth": [522, 544], "would": [476, 488, 491, 538, 544, 546, 552], "wrap": [1, 90, 101, 163, 230, 305, 389, 492, 526], "wrapmxnetmetr": 234, "wraponnxrtmetr": 234, "wrapped_lay": 101, "wrapper": [60, 87, 88, 89, 90, 107, 125, 134, 142, 153, 163, 170, 208, 225, 230, 234, 239, 243, 283, 332, 389, 390, 401, 429, 433, 481, 548], "wrapperlay": 413, "wrappytorchmetr": 234, "write": [133, 218, 385, 466, 526, 532, 538], "write_graph": [133, 385], "written": 538, "wt_compare_dict": 135, "www": [211, 555], "x": [1, 40, 59, 174, 195, 208, 225, 266, 281, 312, 331, 413, 448, 482, 488, 494, 521, 522, 526, 531, 534, 539, 540, 546, 547, 552, 553], "x1": [488, 552, 554], "x2": [488, 552, 554], "x86": [474, 539], "x86_64": 520, "x86_inductor_quant": 409, "x86inductorquant": [409, 471], "x_max": 266, "x_q": [488, 552], "x_scale": [488, 552], "x_tmp": [488, 552], "xdoctest": [140, 396], "xeon": [474, 483, 488, 494, 534, 535, 536, 539, 545, 546, 548, 555], "xgb": 195, "xgboost": 544, "xiao": [477, 488, 549, 552], "xiui": [477, 488, 549, 552], "xlm": [209, 555], "xlnet": [209, 555], "xpu": [195, 417, 443, 478, 481, 489, 546], "xpu_acceler": 443, "xx": [145, 433], "xx_func": 522, "xxx": [154, 211, 538], "xxy": 211, "xxz": 211, "y": [94, 195, 225, 266, 359, 488, 522, 529, 552, 553], "y_dq": [488, 552], "y_max": 266, "y_q": [488, 552], "yaml": [192, 195, 198, 199, 209, 211, 288, 465, 466, 495, 497, 532, 537, 538, 542, 553], "yaml_fil": [466, 526], "yaml_file_path": 526, "yao": 135, "year": 535, "yet": 546, "yield": [153, 198, 199, 207, 235, 262, 266, 281, 387, 449, 488, 523, 546, 554], "yolo": 555, "yolov3": 555, "yolov5": 544, "you": [29, 140, 195, 207, 209, 234, 262, 396, 413, 470, 472, 489, 491, 492, 496, 522, 523, 526, 529, 534, 535, 537, 539, 544, 546, 549, 550, 551, 552], "your": [207, 209, 470, 476, 489, 491, 494, 522, 523, 526, 535, 538, 544, 545, 551, 554], "your_node1_nam": 526, "your_node2_nam": 526, "your_script": 550, "yourmodel": 477, "yourself": 544, "youtub": 545, "yum": [529, 534], "yvinec": [488, 552], "z": [30, 483, 488, 521, 522, 552], "za": 483, "zafrir": 544, "zero": [3, 30, 31, 180, 192, 225, 292, 293, 294, 298, 413, 422, 425, 426, 433, 466, 473, 476, 477, 488, 541, 544, 546, 549, 552, 553, 554], "zero_grad": [526, 538, 544], "zero_point": [30, 31, 452], "zero_point_nam": 30, "zeropoint": [481, 488, 546], "zfnet": 555, "zhen": 135, "zhewei": 135, "zip": [413, 492], "zo_valu": 30, "zone": 545, "zoo": [208, 225, 494, 527, 555], "zp": [31, 429, 433, 488, 552], "\u03b1": 521, "\u03b2": 521, "\u2776": 496, "\u2777": 496, "\u2778": 496, "\u2779": 496, "\u277a": 496, "\u277b": 496, "\u277c": 496, "\u817e\u8baf\u4e91taco": 545, "\u96c6\u6210\u82f1\u7279\u5c14": 545}, "titles": ["neural_compressor.adaptor.mxnet_utils", "neural_compressor.adaptor.mxnet_utils.util", "neural_compressor.adaptor.ox_utils.calibration", "neural_compressor.adaptor.ox_utils.calibrator", "neural_compressor.adaptor.ox_utils", "neural_compressor.adaptor.ox_utils.operators.activation", "neural_compressor.adaptor.ox_utils.operators.argmax", "neural_compressor.adaptor.ox_utils.operators.attention", "neural_compressor.adaptor.ox_utils.operators.binary_op", "neural_compressor.adaptor.ox_utils.operators.concat", "neural_compressor.adaptor.ox_utils.operators.conv", "neural_compressor.adaptor.ox_utils.operators.direct_q8", "neural_compressor.adaptor.ox_utils.operators.embed_layernorm", "neural_compressor.adaptor.ox_utils.operators.gather", "neural_compressor.adaptor.ox_utils.operators.gavgpool", "neural_compressor.adaptor.ox_utils.operators.gemm", "neural_compressor.adaptor.ox_utils.operators", "neural_compressor.adaptor.ox_utils.operators.lstm", "neural_compressor.adaptor.ox_utils.operators.matmul", "neural_compressor.adaptor.ox_utils.operators.maxpool", "neural_compressor.adaptor.ox_utils.operators.norm", "neural_compressor.adaptor.ox_utils.operators.ops", "neural_compressor.adaptor.ox_utils.operators.pad", "neural_compressor.adaptor.ox_utils.operators.pooling", "neural_compressor.adaptor.ox_utils.operators.reduce", "neural_compressor.adaptor.ox_utils.operators.resize", "neural_compressor.adaptor.ox_utils.operators.split", "neural_compressor.adaptor.ox_utils.operators.unary_op", "neural_compressor.adaptor.ox_utils.quantizer", "neural_compressor.adaptor.ox_utils.smooth_quant", "neural_compressor.adaptor.ox_utils.util", "neural_compressor.adaptor.ox_utils.weight_only", "neural_compressor.adaptor.tensorflow", "neural_compressor.adaptor.tf_utils.graph_converter", "neural_compressor.adaptor.tf_utils.graph_converter_without_calib", "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.bf16_convert", "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16.dequantize_cast_optimizer", "neural_compressor.adaptor.tf_utils.graph_rewriter.bf16", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_add_to_biasadd", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_layout", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_leakyrelu", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_nan_to_random", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.convert_placeholder_to_const", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dilated_contraction", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.dummy_biasadd", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.expanddims_optimizer", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fetch_weight_from_reshape", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_batch_norm", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fold_constant", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_biasadd_add", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_column_wise_mul", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_conv_with_math", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_bn", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_decomposed_in", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_gelu", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_layer_norm", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_conv", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_pad_with_fp32_conv", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.fuse_reshape_transpose", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.graph_cse_optimizer", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.grappler_pass", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.move_squeeze_after_relu", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.pre_optimize", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.remove_training_nodes", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.rename_batch_norm", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.split_shared_input", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_equivalent_nodes", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.strip_unused_nodes", "neural_compressor.adaptor.tf_utils.graph_rewriter.generic.switch_optimizer", "neural_compressor.adaptor.tf_utils.graph_rewriter.graph_base", "neural_compressor.adaptor.tf_utils.graph_rewriter", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_fake_quant", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.freeze_value_without_calib", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_redundant_dequantize", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_conv_requantize", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.fuse_matmul_requantize", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.meta_op_optimizer", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_hostconst_converter", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.post_quantized_op_cse", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.rnn_convert", "neural_compressor.adaptor.tf_utils.graph_rewriter.int8.scale_propagation", "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx", "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_graph", "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_node", "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.onnx_schema", "neural_compressor.adaptor.tf_utils.graph_rewriter.onnx.tf2onnx_utils", "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq", "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.insert_qdq_pattern", "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.merge_duplicated_qdq", "neural_compressor.adaptor.tf_utils.graph_rewriter.qdq.share_qdq_y_pattern", "neural_compressor.adaptor.tf_utils.graph_util", "neural_compressor.adaptor.tf_utils", "neural_compressor.adaptor.tf_utils.quantize_graph", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.fake_quantize", "neural_compressor.adaptor.tf_utils.quantize_graph.qat", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_config", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_helper", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.optimize_layer", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_add", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_base", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_layers.quantize_layer_bn", "neural_compressor.adaptor.tf_utils.quantize_graph.qat.quantize_wrapper", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_bn", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_concatv2", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_conv", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_deconv", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_in", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_matmul", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.fuse_qdq_pooling", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq", "neural_compressor.adaptor.tf_utils.quantize_graph.qdq.optimize_qdq", "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_base", "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_bn", "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_concatv2", "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_conv", "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_for_intel_cpu", "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_matmul", "neural_compressor.adaptor.tf_utils.quantize_graph.quantize_graph_pooling", "neural_compressor.adaptor.tf_utils.quantize_graph_common", "neural_compressor.adaptor.tf_utils.smooth_quant_calibration", "neural_compressor.adaptor.tf_utils.smooth_quant_scaler", "neural_compressor.adaptor.tf_utils.tf2onnx_converter", "neural_compressor.adaptor.tf_utils.transform_graph.bias_correction", "neural_compressor.adaptor.tf_utils.transform_graph.graph_transform_base", "neural_compressor.adaptor.tf_utils.transform_graph", "neural_compressor.adaptor.tf_utils.transform_graph.insert_logging", "neural_compressor.adaptor.tf_utils.transform_graph.rerange_quantized_concat", "neural_compressor.adaptor.tf_utils.util", "neural_compressor.adaptor.torch_utils.bf16_convert", "neural_compressor.adaptor.torch_utils.hawq_metric", "neural_compressor.adaptor.torch_utils", "neural_compressor.adaptor.torch_utils.layer_wise_quant", "neural_compressor.adaptor.torch_utils.layer_wise_quant.modified_pickle", "neural_compressor.adaptor.torch_utils.layer_wise_quant.quantize", "neural_compressor.adaptor.torch_utils.layer_wise_quant.torch_load", "neural_compressor.adaptor.torch_utils.layer_wise_quant.utils", "neural_compressor.adaptor.torch_utils.model_wrapper", "neural_compressor.adaptor.torch_utils.pattern_detector", "neural_compressor.adaptor.torch_utils.symbolic_trace", "neural_compressor.adaptor.torch_utils.util", "neural_compressor.algorithm.algorithm", "neural_compressor.algorithm.fast_bias_correction", "neural_compressor.algorithm", "neural_compressor.algorithm.smooth_quant", "neural_compressor.algorithm.weight_correction", "neural_compressor.benchmark", "neural_compressor.common.base_config", "neural_compressor.common.base_tuning", "neural_compressor.common.benchmark", "neural_compressor.common", "neural_compressor.common.tuning_param", "neural_compressor.common.utils.constants", "neural_compressor.common.utils", "neural_compressor.common.utils.logger", "neural_compressor.common.utils.save_load", "neural_compressor.common.utils.utility", "neural_compressor.compression.callbacks", "neural_compressor.compression.distillation.criterions", "neural_compressor.compression.distillation", "neural_compressor.compression.distillation.optimizers", "neural_compressor.compression.distillation.utility", "neural_compressor.compression.hpo", "neural_compressor.compression.hpo.sa_optimizer", "neural_compressor.compression.pruner.criteria", "neural_compressor.compression.pruner", "neural_compressor.compression.pruner.model_slim.auto_slim", "neural_compressor.compression.pruner.model_slim", "neural_compressor.compression.pruner.model_slim.pattern_analyzer", "neural_compressor.compression.pruner.model_slim.weight_slim", "neural_compressor.compression.pruner.patterns.base", "neural_compressor.compression.pruner.patterns", "neural_compressor.compression.pruner.patterns.mha", "neural_compressor.compression.pruner.patterns.ninm", "neural_compressor.compression.pruner.patterns.nxm", "neural_compressor.compression.pruner.pruners.base", "neural_compressor.compression.pruner.pruners.basic", "neural_compressor.compression.pruner.pruners.block_mask", "neural_compressor.compression.pruner.pruners", "neural_compressor.compression.pruner.pruners.mha", "neural_compressor.compression.pruner.pruners.pattern_lock", "neural_compressor.compression.pruner.pruners.progressive", "neural_compressor.compression.pruner.pruners.retrain_free", "neural_compressor.compression.pruner.pruning", "neural_compressor.compression.pruner.regs", "neural_compressor.compression.pruner.schedulers", "neural_compressor.compression.pruner.tf_criteria", "neural_compressor.compression.pruner.utils", "neural_compressor.compression.pruner.wanda", "neural_compressor.compression.pruner.wanda.utils", "neural_compressor.config", "neural_compressor.contrib", "neural_compressor.contrib.strategy", "neural_compressor.contrib.strategy.sigopt", "neural_compressor.contrib.strategy.tpe", "neural_compressor.data.dataloaders.base_dataloader", "neural_compressor.data.dataloaders.dataloader", "neural_compressor.data.dataloaders.default_dataloader", "neural_compressor.data.dataloaders.fetcher", "neural_compressor.data.dataloaders.mxnet_dataloader", "neural_compressor.data.dataloaders.onnxrt_dataloader", "neural_compressor.data.dataloaders.pytorch_dataloader", "neural_compressor.data.dataloaders.sampler", "neural_compressor.data.dataloaders.tensorflow_dataloader", "neural_compressor.data.datasets.bert_dataset", "neural_compressor.data.datasets.coco_dataset", "neural_compressor.data.datasets.dataset", "neural_compressor.data.datasets.dummy_dataset", "neural_compressor.data.datasets.dummy_dataset_v2", "neural_compressor.data.datasets.imagenet_dataset", "neural_compressor.data.datasets", "neural_compressor.data.datasets.style_transfer_dataset", "neural_compressor.data.filters.coco_filter", "neural_compressor.data.filters.filter", "neural_compressor.data.filters", "neural_compressor.data", "neural_compressor.data.transforms.imagenet_transform", "neural_compressor.data.transforms", "neural_compressor.data.transforms.postprocess", "neural_compressor.data.transforms.tokenization", "neural_compressor.data.transforms.transform", "neural_compressor", "neural_compressor.metric.bleu", "neural_compressor.metric.bleu_util", "neural_compressor.metric.coco_label_map", "neural_compressor.metric.coco_tools", "neural_compressor.metric.evaluate_squad", "neural_compressor.metric.f1", "neural_compressor.metric", "neural_compressor.metric.metric", "neural_compressor.mix_precision", "neural_compressor.model.base_model", "neural_compressor.model", "neural_compressor.model.keras_model", "neural_compressor.model.model", "neural_compressor.model.mxnet_model", "neural_compressor.model.nets_factory", "neural_compressor.model.onnx_model", "neural_compressor.model.tensorflow_model", "neural_compressor.model.torch_model", "neural_compressor.objective", "neural_compressor.profiling", "neural_compressor.profiling.parser.factory", "neural_compressor.profiling.parser.onnx_parser.factory", "neural_compressor.profiling.parser.onnx_parser.parser", "neural_compressor.profiling.parser.parser", "neural_compressor.profiling.parser.result", "neural_compressor.profiling.parser.tensorflow_parser.factory", "neural_compressor.profiling.parser.tensorflow_parser.parser", "neural_compressor.profiling.profiler.factory", "neural_compressor.profiling.profiler.onnxrt_profiler.factory", "neural_compressor.profiling.profiler.onnxrt_profiler.profiler", "neural_compressor.profiling.profiler.onnxrt_profiler.utils", "neural_compressor.profiling.profiler.profiler", "neural_compressor.profiling.profiler.tensorflow_profiler.factory", "neural_compressor.profiling.profiler.tensorflow_profiler.profiler", "neural_compressor.profiling.profiler.tensorflow_profiler.utils", "neural_compressor.quantization", "neural_compressor.strategy.auto", "neural_compressor.strategy.auto_mixed_precision", "neural_compressor.strategy.basic", "neural_compressor.strategy.bayesian", "neural_compressor.strategy.conservative", "neural_compressor.strategy.exhaustive", "neural_compressor.strategy.hawq_v2", "neural_compressor.strategy", "neural_compressor.strategy.mse", "neural_compressor.strategy.mse_v2", "neural_compressor.strategy.random", "neural_compressor.strategy.strategy", "neural_compressor.strategy.utils.constant", "neural_compressor.strategy.utils", "neural_compressor.strategy.utils.tuning_sampler", "neural_compressor.strategy.utils.tuning_space", "neural_compressor.strategy.utils.tuning_structs", "neural_compressor.strategy.utils.utility", "neural_compressor.template.api_doc_example", "neural_compressor.tensorflow.algorithms", "neural_compressor.tensorflow.algorithms.smoother.calibration", "neural_compressor.tensorflow.algorithms.smoother.core", "neural_compressor.tensorflow.algorithms.smoother", "neural_compressor.tensorflow.algorithms.smoother.scaler", "neural_compressor.tensorflow.algorithms.static_quant", "neural_compressor.tensorflow.algorithms.static_quant.keras", "neural_compressor.tensorflow.algorithms.static_quant.tensorflow", "neural_compressor.tensorflow", "neural_compressor.tensorflow.keras", "neural_compressor.tensorflow.keras.layers.conv2d", "neural_compressor.tensorflow.keras.layers.dense", "neural_compressor.tensorflow.keras.layers.depthwise_conv2d", "neural_compressor.tensorflow.keras.layers", "neural_compressor.tensorflow.keras.layers.layer_initializer", "neural_compressor.tensorflow.keras.layers.pool2d", "neural_compressor.tensorflow.keras.layers.separable_conv2d", "neural_compressor.tensorflow.keras.quantization.config", "neural_compressor.tensorflow.keras.quantization", "neural_compressor.tensorflow.quantization.algorithm_entry", "neural_compressor.tensorflow.quantization.autotune", "neural_compressor.tensorflow.quantization.config", "neural_compressor.tensorflow.quantization", "neural_compressor.tensorflow.quantization.quantize", "neural_compressor.tensorflow.quantization.utils.graph_converter", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.bf16_convert", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16.dequantize_cast_optimizer", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.bf16", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_add_to_biasadd", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_layout", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_leakyrelu", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_nan_to_random", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.convert_placeholder_to_const", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dilated_contraction", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.dummy_biasadd", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.expanddims_optimizer", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fetch_weight_from_reshape", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_batch_norm", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fold_constant", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_biasadd_add", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_column_wise_mul", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_conv_with_math", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_bn", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_decomposed_in", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_gelu", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_layer_norm", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_conv", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_pad_with_fp32_conv", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.fuse_reshape_transpose", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.graph_cse_optimizer", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.grappler_pass", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.insert_print_node", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.move_squeeze_after_relu", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.pre_optimize", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.remove_training_nodes", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.rename_batch_norm", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.split_shared_input", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_equivalent_nodes", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.strip_unused_nodes", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.generic.switch_optimizer", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.graph_base", "neural_compressor.tensorflow.quantization.utils.graph_rewriter", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_fake_quant", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.freeze_value", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_redundant_dequantize", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_conv_requantize", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_redundant_dequantize", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.fuse_matmul_requantize", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.meta_op_optimizer", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_hostconst_converter", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.post_quantized_op_cse", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.int8.scale_propagation", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.insert_qdq_pattern", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.merge_duplicated_qdq", "neural_compressor.tensorflow.quantization.utils.graph_rewriter.qdq.share_qdq_y_pattern", "neural_compressor.tensorflow.quantization.utils.graph_util", "neural_compressor.tensorflow.quantization.utils", "neural_compressor.tensorflow.quantization.utils.quantize_graph", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_bn", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_concatv2", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_conv", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_deconv", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_in", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_matmul", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.fuse_qdq_pooling", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq", "neural_compressor.tensorflow.quantization.utils.quantize_graph.qdq.optimize_qdq", "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_base", "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_bn", "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_concatv2", "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_conv", "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_for_intel_cpu", "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_matmul", "neural_compressor.tensorflow.quantization.utils.quantize_graph.quantize_graph_pooling", "neural_compressor.tensorflow.quantization.utils.quantize_graph_common", "neural_compressor.tensorflow.quantization.utils.transform_graph.bias_correction", "neural_compressor.tensorflow.quantization.utils.transform_graph.graph_transform_base", "neural_compressor.tensorflow.quantization.utils.transform_graph", "neural_compressor.tensorflow.quantization.utils.transform_graph.insert_logging", "neural_compressor.tensorflow.quantization.utils.transform_graph.rerange_quantized_concat", "neural_compressor.tensorflow.quantization.utils.utility", "neural_compressor.tensorflow.utils.constants", "neural_compressor.tensorflow.utils.data", "neural_compressor.tensorflow.utils", "neural_compressor.tensorflow.utils.model", "neural_compressor.tensorflow.utils.model_wrappers", "neural_compressor.tensorflow.utils.utility", "neural_compressor.torch.algorithms.base_algorithm", "neural_compressor.torch.algorithms.fp8_quant.utils.logger", "neural_compressor.torch.algorithms", "neural_compressor.torch.algorithms.layer_wise", "neural_compressor.torch.algorithms.layer_wise.load", "neural_compressor.torch.algorithms.layer_wise.modified_pickle", "neural_compressor.torch.algorithms.layer_wise.utils", "neural_compressor.torch.algorithms.mixed_precision.half_precision_convert", "neural_compressor.torch.algorithms.mixed_precision", "neural_compressor.torch.algorithms.mixed_precision.module_wrappers", "neural_compressor.torch.algorithms.mx_quant", "neural_compressor.torch.algorithms.mx_quant.mx", "neural_compressor.torch.algorithms.mx_quant.utils", "neural_compressor.torch.algorithms.pt2e_quant.core", "neural_compressor.torch.algorithms.pt2e_quant.half_precision_rewriter", "neural_compressor.torch.algorithms.pt2e_quant", "neural_compressor.torch.algorithms.pt2e_quant.save_load", "neural_compressor.torch.algorithms.pt2e_quant.utility", "neural_compressor.torch.algorithms.smooth_quant", "neural_compressor.torch.algorithms.smooth_quant.save_load", "neural_compressor.torch.algorithms.smooth_quant.smooth_quant", "neural_compressor.torch.algorithms.smooth_quant.utility", "neural_compressor.torch.algorithms.static_quant", "neural_compressor.torch.algorithms.static_quant.save_load", "neural_compressor.torch.algorithms.static_quant.static_quant", "neural_compressor.torch.algorithms.static_quant.utility", "neural_compressor.torch.algorithms.weight_only.autoround", "neural_compressor.torch.algorithms.weight_only.awq", "neural_compressor.torch.algorithms.weight_only.gptq", "neural_compressor.torch.algorithms.weight_only.hqq.bitpack", "neural_compressor.torch.algorithms.weight_only.hqq.config", "neural_compressor.torch.algorithms.weight_only.hqq.core", "neural_compressor.torch.algorithms.weight_only.hqq", "neural_compressor.torch.algorithms.weight_only.hqq.optimizer", "neural_compressor.torch.algorithms.weight_only.hqq.qtensor", "neural_compressor.torch.algorithms.weight_only.hqq.quantizer", "neural_compressor.torch.algorithms.weight_only", "neural_compressor.torch.algorithms.weight_only.modules", "neural_compressor.torch.algorithms.weight_only.rtn", "neural_compressor.torch.algorithms.weight_only.save_load", "neural_compressor.torch.algorithms.weight_only.teq", "neural_compressor.torch.algorithms.weight_only.utility", "neural_compressor.torch.export", "neural_compressor.torch.export.pt2e_export", "neural_compressor.torch", "neural_compressor.torch.quantization.algorithm_entry", "neural_compressor.torch.quantization.autotune", "neural_compressor.torch.quantization.config", "neural_compressor.torch.quantization", "neural_compressor.torch.quantization.load_entry", "neural_compressor.torch.quantization.quantize", "neural_compressor.torch.utils.auto_accelerator", "neural_compressor.torch.utils.bit_packer", "neural_compressor.torch.utils.constants", "neural_compressor.torch.utils.environ", "neural_compressor.torch.utils", "neural_compressor.torch.utils.utility", "neural_compressor.training", "neural_compressor.transformers.quantization.utils", "neural_compressor.transformers.utils", "neural_compressor.transformers.utils.quantization_config", "neural_compressor.utils.collect_layer_histogram", "neural_compressor.utils.constant", "neural_compressor.utils.create_obj_from_config", "neural_compressor.utils.export", "neural_compressor.utils.export.qlinear2qdq", "neural_compressor.utils.export.tf2onnx", "neural_compressor.utils.export.torch2onnx", "neural_compressor.utils", "neural_compressor.utils.kl_divergence", "neural_compressor.utils.load_huggingface", "neural_compressor.utils.logger", "neural_compressor.utils.options", "neural_compressor.utils.pytorch", "neural_compressor.utils.utility", "neural_compressor.utils.weights_details", "neural_compressor.version", "Intel\u00ae Neural Compressor Documentation", "2.X API User Guide", "Dynamic Quantization", "FP8 Quantization", "Microscaling Quantization", "PyTorch Mixed Precision", "PyTorch Smooth Quantization", "PyTorch Static Quantization", "PyTorch Weight Only Quantization", "Torch", "TensorFlow Quantization", "Smooth Quant", "TensorFlow", "AutoTune", "Benchmark", "Quantization on Client", "Design", "Version mapping between Intel Neural Compressor to Gaudi Software Stack", "<no title>", "Quantization", "Transformers-like API", "Contributor Covenant Code of Conduct", "Contribution Guidelines", "FX", "Security Policy", "Intel\u00ae Neural Compressor", "Adaptor", "How to Add An Adaptor", "How to Support New Data Type, Like Int4, with a Few Line Changes", "Adaptor", "ONNX Runtime", "Torch Utils", "2.0 API", "3.0 API", "API Document Example", "APIs", "Benchmark", "Compression", "Config", "Mix Precision", "Model", "Objective", "Quantization", "Strategy", "Tensorflow Quantization AutoTune", "Tensorflow Quantization Base API", "Tensorflow Quantization Config", "Pytorch Quantization AutoTune", "Pytorch Quantization Base API", "Pytorch Quantization Config", "Training", "Benchmarking", "Calibration Algorithms in Quantization", "INC Coding Conventions", "DataLoader", "Design", "Distillation for Quantization", "Distributed Training and Inference (Evaluation)", "Examples", "Export", "Frequently Asked Questions", "Framework YAML Configuration Files", "Getting Started", "Incompatible changes between v1.2 and v1.1", "Infrastructure of Intel\u00ae Neural Compressor", "Installation", "Legal Information", "LLMs Quantization Recipes", "Metrics", "Code Migration from Intel Neural Compressor 1.X to Intel Neural Compressor 2.X", "Mixed Precision", "Model", "Microscaling Quantization", "Objective", "Optimization Orchestration", "Pruning", "Full Publications/Events (85)", "Quantization", "Layer Wise Quantization (LWQ)", "Turn OFF Auto Mixed Precision during Quantization", "Weight Only Quantization (WOQ)", "Release", "SigOpt Strategy", "Smooth Quant", "Transform", "Tuning Strategies", "Validated Models", "Intel\u00ae Neural Compressor Documentation"], "titleterms": {"": [494, 534], "0": [501, 502, 555], "1": [526, 529, 532, 538, 555], "15": [545, 555], "16": 555, "18": 555, "2": [470, 501, 526, 529, 532, 538, 555], "2018": 545, "2020": 545, "2021": 545, "2022": 545, "2023": 545, "2024": 545, "25": 545, "3": [502, 529, 555], "35": 545, "4": [529, 545], "5": 529, "6": 545, "64": 534, "85": 545, "For": 489, "One": 543, "With": 479, "abil": 497, "accept": 491, "accord": [496, 497], "accuraci": [474, 479, 488, 536, 539, 546, 554], "activ": 5, "ad": 495, "adaptor": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 495, 496, 498], "add": 496, "addit": 494, "advanc": 470, "ai": 534, "algorithm": [146, 147, 148, 149, 150, 282, 283, 284, 285, 286, 287, 288, 289, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 489, 521, 549, 552, 554], "algorithm_entri": [301, 437], "alpha": [475, 480, 552], "an": 496, "annot": 522, "api": [470, 473, 474, 478, 481, 489, 495, 496, 501, 502, 503, 504, 514, 517, 520, 523, 525, 526, 532, 537, 539, 541, 542, 543, 544], "api_doc_exampl": 281, "appendix": 528, "approach": [481, 546], "architectur": [485, 524, 533, 534], "argmax": 6, "argument": 477, "ask": 529, "asymmetr": 488, "attent": 7, "attribut": [211, 281, 490], "auto": [263, 480, 548, 552, 554], "auto_acceler": 443, "auto_mixed_precis": 264, "auto_slim": 171, "autoround": [418, 477], "autotun": [302, 438, 474, 478, 482, 513, 516], "awar": [479, 488, 492, 538, 544, 546], "awq": [419, 477], "backend": [476, 481, 495, 546], "background": [495, 522], "base": [175, 180, 470, 514, 517, 534], "base_algorithm": 392, "base_config": 152, "base_dataload": 200, "base_model": 236, "base_tun": 153, "basic": [181, 265, 554], "bayesian": [266, 554], "benchmark": [151, 154, 483, 505, 520, 538], "benefit": 551, "bert_dataset": 209, "between": [486, 532], "bf16": [35, 36, 37, 307, 308, 309, 474, 539], "bf16_convert": [35, 134, 307], "bias_correct": [128, 380], "binari": 534, "binary_op": 8, "bit_pack": 444, "bitpack": 421, "bleu": 227, "bleu_util": 228, "block": 552, "block_mask": 182, "build": [523, 529, 537], "built": [532, 534, 537], "calcul": 496, "calibr": [2, 3, 283, 496, 521], "callback": 162, "capabl": [495, 549], "case": 483, "chang": [497, 532, 550], "channel": [488, 552], "check": 491, "checklist": 491, "citat": 535, "class": [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 81, 82, 83, 84, 85, 87, 88, 89, 90, 92, 93, 94, 95, 98, 100, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 131, 132, 134, 135, 139, 142, 143, 146, 147, 149, 150, 152, 153, 156, 157, 159, 161, 162, 163, 165, 169, 173, 174, 175, 177, 178, 179, 180, 181, 182, 184, 185, 186, 187, 188, 189, 190, 191, 195, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 221, 223, 224, 225, 227, 230, 234, 236, 238, 239, 240, 241, 242, 243, 244, 245, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 258, 259, 260, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 277, 278, 279, 280, 281, 283, 284, 286, 288, 289, 292, 293, 294, 297, 298, 299, 303, 306, 307, 308, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 345, 346, 347, 348, 349, 350, 352, 353, 354, 355, 357, 358, 359, 360, 363, 364, 365, 366, 367, 368, 369, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 383, 384, 387, 389, 390, 391, 392, 398, 399, 401, 403, 404, 405, 406, 412, 413, 416, 417, 418, 419, 420, 421, 422, 423, 426, 427, 429, 430, 431, 432, 433, 439, 443, 445, 449, 452, 453, 461, 462, 463, 464, 466, 467, 495], "client": [477, 484], "coco_dataset": 210, "coco_filt": 217, "coco_label_map": 229, "coco_tool": 230, "code": [490, 491, 522, 538, 549], "collect_layer_histogram": 453, "comment": 522, "common": [152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 477, 478, 492, 529], "commun": 494, "comparison": 551, "compat": 534, "compress": [162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 506, 549], "compressor": [469, 486, 492, 494, 523, 533, 534, 537, 538, 551, 556], "comput": 534, "concat": 9, "conduct": [490, 491], "config": [195, 299, 303, 422, 439, 507, 515, 518, 542], "configur": [497, 526, 530, 551], "conserv": [267, 554], "constant": [157, 275, 386, 445, 454], "content": [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 81, 82, 83, 84, 85, 87, 88, 89, 90, 92, 93, 94, 95, 98, 100, 101, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 131, 132, 133, 134, 135, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 149, 150, 151, 152, 153, 154, 156, 157, 159, 160, 161, 162, 163, 165, 166, 169, 170, 171, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 194, 195, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 221, 223, 224, 225, 227, 228, 230, 231, 232, 234, 235, 236, 238, 239, 240, 241, 242, 243, 244, 245, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 277, 278, 279, 280, 281, 283, 284, 286, 288, 289, 292, 293, 294, 297, 298, 299, 301, 302, 303, 305, 306, 307, 308, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 345, 346, 347, 348, 349, 350, 352, 353, 354, 355, 357, 358, 359, 360, 363, 364, 365, 366, 367, 368, 369, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 383, 384, 385, 387, 389, 390, 391, 392, 396, 397, 398, 399, 401, 403, 404, 405, 406, 408, 409, 411, 412, 413, 415, 416, 417, 418, 419, 420, 421, 422, 423, 425, 426, 427, 429, 430, 431, 432, 433, 435, 437, 438, 439, 441, 442, 443, 444, 445, 446, 448, 449, 452, 453, 455, 457, 458, 459, 461, 462, 463, 464, 465, 466, 467, 494], "contrib": [196, 197, 198, 199], "contribut": 491, "contributor": [490, 491], "conv": 10, "conv2d": 292, "convent": 522, "convert_add_to_biasadd": [38, 310], "convert_layout": [39, 311], "convert_leakyrelu": [40, 312], "convert_nan_to_random": [41, 313], "convert_placeholder_to_const": [42, 314], "core": [284, 405, 423], "coven": [490, 491], "cpu": [489, 494, 534, 555], "creat": 491, "create_obj_from_config": 455, "criteria": [169, 491, 544, 554], "criterion": 163, "custom": [523, 537, 554], "data": [200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 387, 496, 497, 527], "dataload": [200, 201, 202, 203, 204, 205, 206, 207, 208, 523], "dataset": [209, 210, 211, 212, 213, 214, 215, 216, 532], "decai": 544, "default_dataload": 202, "defin": [497, 526], "demo": [472, 483], "dens": 293, "depend": 489, "deploy": 544, "depthwise_conv2d": 294, "dequantize_cast_optim": [36, 308], "design": [485, 496, 524, 554], "detail": 492, "determin": [480, 552], "devic": [481, 489, 546], "differ": 551, "dilated_contract": [43, 315], "direct_q8": 11, "distil": [163, 164, 165, 166, 525, 538, 555], "distribut": [526, 554], "docker": [494, 534], "document": [469, 494, 503, 556], "driven": [474, 539], "dummy_biasadd": [44, 316], "dummy_dataset": 212, "dummy_dataset_v2": 213, "dump": 483, "dure": [539, 548], "dynam": [471, 488, 492, 546], "each": 552, "effici": 477, "embed_layernorm": 12, "enforc": 490, "engin": [534, 552], "enhanc": 552, "entir": 552, "environ": [446, 534], "evalu": 526, "evaluate_squad": 231, "event": [494, 545], "exampl": [471, 472, 473, 474, 476, 477, 479, 480, 488, 489, 492, 495, 503, 520, 523, 525, 526, 527, 528, 537, 538, 539, 540, 541, 542, 543, 544, 546, 547, 549, 552, 555], "except": [138, 397], "execut": 526, "exhaust": [268, 554], "exit": 554, "expanddims_optim": [45, 317], "export": [434, 435, 456, 457, 458, 459, 528, 549], "f1": 232, "face": 532, "factori": [247, 248, 252, 254, 255, 259], "fake_quant": 98, "fast_bias_correct": 147, "featur": [526, 530, 531, 533, 546], "fetch_weight_from_reshap": [46, 318], "fetcher": 203, "few": 497, "file": 530, "filter": [217, 218, 219], "fix": [475, 480, 552], "flow": [495, 546], "fold_batch_norm": [47, 319], "fold_const": [48, 320], "folder": 522, "fp16": [474, 539], "fp32": [496, 528], "fp8": [472, 494], "fp8_quant": 393, "framework": [475, 494, 496, 523, 528, 530, 534, 540, 541, 547, 549, 552], "free": 544, "freeze_fake_qu": [73, 345], "freeze_valu": [74, 346], "freeze_value_without_calib": 75, "frequent": 529, "from": [494, 496, 534, 538], "full": 545, "function": [1, 3, 21, 29, 30, 31, 52, 53, 55, 89, 90, 101, 103, 133, 134, 135, 140, 141, 144, 145, 146, 151, 152, 153, 154, 160, 161, 163, 165, 166, 169, 170, 171, 173, 175, 176, 180, 183, 188, 189, 190, 191, 192, 194, 201, 202, 209, 211, 218, 224, 225, 227, 228, 230, 231, 232, 234, 235, 239, 243, 245, 257, 261, 262, 266, 274, 278, 280, 281, 292, 293, 294, 297, 298, 299, 301, 302, 303, 305, 324, 325, 327, 385, 387, 390, 391, 396, 398, 404, 406, 408, 409, 411, 412, 413, 415, 417, 418, 420, 425, 427, 431, 433, 435, 437, 438, 439, 441, 442, 443, 444, 446, 448, 449, 455, 457, 458, 459, 462, 463, 465, 466, 526], "fundament": [488, 546, 552], "fuse_biasadd_add": [49, 321], "fuse_column_wise_mul": [50, 322], "fuse_conv_redundant_dequant": [76, 347], "fuse_conv_requant": [77, 348], "fuse_conv_with_math": [51, 323], "fuse_decomposed_bn": [52, 324], "fuse_decomposed_in": [53, 325], "fuse_gelu": [54, 326], "fuse_layer_norm": [55, 327], "fuse_matmul_redundant_dequant": [78, 349], "fuse_matmul_requant": [79, 350], "fuse_pad_with_conv": [56, 328], "fuse_pad_with_fp32_conv": [57, 329], "fuse_qdq_bn": [108, 363], "fuse_qdq_concatv2": [109, 364], "fuse_qdq_conv": [110, 365], "fuse_qdq_deconv": [111, 366], "fuse_qdq_in": [112, 367], "fuse_qdq_matmul": [113, 368], "fuse_qdq_pool": [114, 369], "fuse_reshape_transpos": [58, 330], "fx": 492, "gather": 13, "gaudi": 486, "gavgpool": 14, "gemm": 15, "gener": [38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 483, 496], "get": [471, 472, 473, 474, 476, 477, 479, 484, 492, 494, 495, 520, 523, 525, 526, 530, 531, 537, 539, 541, 542, 543, 544, 546], "gptq": [420, 477], "gpu": [489, 494, 534], "graph": 496, "graph_bas": [71, 343], "graph_convert": [33, 306], "graph_converter_without_calib": 34, "graph_cse_optim": [59, 331], "graph_rewrit": [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359], "graph_transform_bas": [129, 381], "graph_util": [95, 360], "grappler_pass": [60, 332], "guid": 470, "guidelin": 491, "half_precision_convert": 399, "half_precision_rewrit": 406, "hardwar": [474, 534, 539, 555], "hawq_metr": 135, "hawq_v2": [269, 554], "heterogen": 534, "horovodrun": 526, "how": [482, 496, 497], "hpo": [167, 168], "hpu": [494, 534], "hqq": [421, 422, 423, 424, 425, 426, 427, 477], "hyperparamet": 544, "imag": [494, 534], "imagenet_dataset": 214, "imagenet_transform": 221, "implement": [495, 496], "import": 522, "inc": 522, "incompat": [532, 550], "infer": 526, "inform": 535, "infrastructur": 533, "insert_log": [131, 383], "insert_print_nod": [62, 334], "insert_qdq_pattern": [92, 357], "instal": [494, 534], "int4": 497, "int8": [73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 528, 555], "intel": [469, 486, 489, 494, 523, 533, 534, 537, 538, 556], "intel_extension_for_pytorch": [494, 534], "interfac": 522, "intern": 522, "introduct": [471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 483, 484, 488, 489, 492, 495, 496, 497, 520, 521, 523, 525, 526, 528, 530, 533, 537, 539, 540, 541, 542, 543, 544, 546, 547, 549, 551, 552, 553, 554], "invok": 497, "ipex": [476, 488, 546, 555], "issu": [529, 550], "iter": 496, "json": 522, "kera": [288, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 555], "keras_model": 238, "kernel": 497, "kit": 534, "kl_diverg": 461, "knowledg": 555, "known": 550, "languag": [494, 536, 544], "larg": [494, 536, 544], "latenc": 483, "layer": [292, 293, 294, 295, 296, 297, 298, 477, 547, 552], "layer_initi": 296, "layer_wis": [395, 396, 397, 398], "layer_wise_qu": [137, 138, 139, 140, 141], "legal": 535, "licens": 535, "like": [478, 489, 497], "limit": [488, 552], "line": 497, "list": [496, 527, 553], "llm": [494, 536], "load": [396, 477, 478, 494], "load_entri": 441, "load_huggingfac": 462, "logger": [159, 393, 463, 522], "lstm": 17, "lwq": 547, "map": 486, "matmul": [18, 488, 552], "matrix": [474, 475, 477, 478, 481, 483, 492, 495, 520, 521, 523, 525, 526, 528, 530, 531, 533, 537, 539, 540, 541, 542, 543, 544, 546, 547, 549, 552], "maxpool": 19, "merge_duplicated_qdq": [93, 358], "meta_op_optim": [81, 352], "metric": [227, 228, 229, 230, 231, 232, 233, 234, 532, 537], "mha": [177, 184], "microsc": [473, 541], "migrat": 538, "mix": [474, 481, 508, 538, 539, 548], "mix_precis": 235, "mixed_precis": [399, 400, 401], "mme": 534, "mode": [492, 555], "model": [236, 237, 238, 239, 240, 241, 242, 243, 244, 389, 475, 476, 482, 494, 496, 509, 528, 534, 536, 538, 540, 541, 544, 547, 549, 552, 555], "model_slim": [171, 172, 173, 174], "model_wrapp": [142, 390], "modified_pickl": [138, 397], "modul": [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 81, 82, 83, 84, 85, 87, 88, 89, 90, 92, 93, 94, 95, 98, 100, 101, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 131, 132, 133, 134, 135, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 149, 150, 151, 152, 153, 154, 156, 157, 159, 160, 161, 162, 163, 165, 166, 169, 171, 173, 174, 175, 177, 178, 179, 180, 181, 182, 184, 185, 186, 187, 188, 189, 190, 191, 192, 194, 195, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 221, 223, 224, 225, 227, 228, 230, 231, 232, 234, 235, 236, 238, 239, 240, 241, 242, 243, 244, 245, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 271, 272, 273, 274, 277, 278, 279, 280, 281, 283, 284, 286, 288, 289, 292, 293, 294, 297, 298, 299, 301, 302, 303, 305, 306, 307, 308, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 345, 346, 347, 348, 349, 350, 352, 353, 354, 355, 357, 358, 359, 360, 363, 364, 365, 366, 367, 368, 369, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 383, 384, 385, 387, 389, 390, 391, 392, 396, 397, 398, 399, 401, 403, 404, 405, 406, 408, 409, 411, 412, 413, 415, 416, 417, 418, 419, 420, 421, 422, 423, 425, 426, 427, 429, 430, 431, 432, 433, 435, 437, 438, 439, 441, 442, 443, 444, 445, 446, 448, 449, 452, 453, 455, 457, 458, 459, 461, 462, 463, 464, 465, 466, 467], "module_wrapp": 401, "move_squeeze_after_relu": [63, 335], "mse": [271, 554], "mse_v2": [272, 554], "multipl": [534, 542, 555], "mx": 403, "mx_quant": [402, 403, 404], "mxnet": [537, 546, 553], "mxnet_dataload": 204, "mxnet_model": 240, "mxnet_util": [0, 1], "need": 496, "nets_factori": 241, "network": 544, "neural": [469, 486, 492, 494, 523, 533, 534, 537, 538, 544, 551, 556], "neural_compressor": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468], "new": [494, 495, 497, 554], "ninm": 178, "norm": 20, "note": [492, 550], "nxm": 179, "object": [245, 510, 542], "off": 548, "onli": [477, 488, 494, 549], "onnx": [86, 87, 88, 89, 90, 499, 534, 546, 547, 555], "onnx_graph": 87, "onnx_model": 242, "onnx_nod": 88, "onnx_pars": [248, 249], "onnx_schema": 89, "onnxrt": [537, 553], "onnxrt_dataload": 205, "onnxrt_profil": [255, 256, 257], "onnxrtadaptor": 495, "op": [21, 528], "oper": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 496, 497], "optim": [165, 425, 543, 544], "optimize_lay": 103, "optimize_qdq": [116, 371], "option": [464, 526], "orchestr": [538, 543], "other": [494, 534], "our": [490, 552], "overview": [470, 482, 491], "ox_util": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31], "packag": [170, 176, 183, 489], "pad": 22, "paramet": 472, "parser": [247, 248, 249, 250, 251, 252, 253], "pattern": [175, 176, 177, 178, 179, 544], "pattern_analyz": 173, "pattern_detector": 143, "pattern_lock": 185, "per": [488, 552], "perform": 551, "platform": [494, 534, 551], "pledg": 490, "polici": [493, 554], "pool": 23, "pool2d": 297, "post": [481, 492, 538, 546], "post_hostconst_convert": [82, 353], "post_quantized_op_cs": [83, 354], "postprocess": 223, "pre_optim": [64, 336], "precis": [474, 481, 508, 538, 539, 548], "prepar": [489, 496, 551], "prerequisit": 534, "problem": [478, 492], "process": 554, "processor": 534, "profil": [246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261], "progress": 186, "prune": [188, 538, 544, 555], "pruner": [169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194], "pt2e": 476, "pt2e_export": 435, "pt2e_quant": [405, 406, 407, 408, 409], "ptq": 555, "public": [494, 522, 545], "pull": 491, "pure": 526, "pypi": 494, "python": [470, 523, 537], "pytorch": [465, 474, 475, 476, 477, 482, 488, 516, 517, 518, 526, 528, 537, 546, 547, 548, 553, 555], "pytorch_dataload": 206, "qat": [98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 555], "qdq": [91, 92, 93, 94, 108, 109, 110, 111, 112, 113, 114, 115, 116, 356, 357, 358, 359, 363, 364, 365, 366, 367, 368, 369, 370, 371, 555], "qlinear2qdq": 457, "qtensor": 426, "quant": [480, 552], "quantiz": [28, 139, 262, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 427, 437, 438, 439, 440, 441, 442, 450, 471, 472, 473, 475, 476, 477, 478, 479, 481, 484, 488, 492, 494, 496, 497, 511, 513, 514, 515, 516, 517, 518, 521, 525, 528, 534, 536, 538, 539, 541, 546, 547, 548, 549, 552, 555], "quantization_config": 452, "quantize_config": 100, "quantize_graph": [97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378], "quantize_graph_bas": [117, 372], "quantize_graph_bn": [118, 373], "quantize_graph_common": [124, 379], "quantize_graph_concatv2": [119, 374], "quantize_graph_conv": [120, 375], "quantize_graph_for_intel_cpu": [121, 376], "quantize_graph_matmul": [122, 377], "quantize_graph_pool": [123, 378], "quantize_help": 101, "quantize_lay": [102, 103, 104, 105, 106], "quantize_layer_add": 104, "quantize_layer_bas": 105, "quantize_layer_bn": 106, "quantize_wrapp": 107, "queri": 495, "query_fw_cap": 496, "question": 529, "quick": 531, "random": [273, 554], "rang": 496, "recip": [536, 546], "recommend": 522, "reduc": 24, "refer": [473, 477, 488, 521, 522, 541, 544, 546, 549, 552], "reg": 189, "regular": 544, "releas": [527, 550], "remove_training_nod": [65, 337], "rename_batch_norm": [66, 338], "report": 493, "request": [474, 491, 539], "requir": 534, "rerange_quantized_concat": [132, 384], "resiz": 25, "respons": 490, "result": 251, "retrain": 544, "retrain_fre": 187, "rnn_convert": 84, "rtn": [430, 477], "rule": [475, 476, 477, 479, 522, 546], "run": 496, "runtim": [499, 534, 546, 547, 555], "sa_optim": 168, "sampl": [476, 496, 531], "sampler": 207, "save": 477, "save_load": [160, 408, 411, 415, 431], "scale_propag": [85, 355], "scaler": 286, "schedul": [190, 544], "scheme": [481, 488, 546], "scope": [490, 544], "section": [469, 556], "secur": [493, 526], "select": 494, "separable_conv2d": 298, "set": 522, "share_qdq_y_pattern": [94, 359], "shot": 543, "side": 477, "sigopt": [198, 551, 554], "singl": 542, "smooth": [475, 480, 481, 488, 552], "smooth_quant": [29, 149, 410, 411, 412, 413], "smooth_quant_calibr": 125, "smooth_quant_scal": 126, "smoother": [283, 284, 285, 286], "smoothquant": 552, "softwar": [474, 486, 534, 539], "sourc": 534, "space": 554, "spars": 544, "sparsiti": 544, "specif": 497, "specifi": [475, 476, 477, 479, 546], "split": 26, "split_shared_input": [67, 339], "stack": 486, "standard": 490, "start": [471, 472, 473, 474, 476, 477, 479, 484, 492, 494, 495, 520, 523, 525, 526, 530, 531, 537, 539, 541, 542, 543, 544, 546], "static": [476, 481, 488, 492, 546], "static_qu": [287, 288, 289, 414, 415, 416, 417], "statu": 491, "step": 491, "strategi": [197, 198, 199, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 512, 551, 554], "string": 522, "strip_equivalent_nod": [68, 340], "strip_unused_nod": [69, 341], "structur": 522, "style_transfer_dataset": 216, "submodul": [0, 4, 16, 37, 61, 72, 80, 86, 91, 96, 97, 99, 102, 115, 130, 136, 137, 148, 155, 158, 164, 167, 170, 172, 176, 183, 193, 197, 215, 219, 222, 226, 233, 237, 270, 276, 285, 287, 295, 300, 304, 309, 333, 344, 351, 356, 361, 362, 370, 382, 388, 394, 395, 400, 402, 407, 410, 414, 424, 428, 434, 440, 447, 451, 456, 460], "subpackag": [4, 72, 96, 97, 99, 136, 155, 170, 196, 220, 226, 270, 282, 290, 291, 304, 344, 361, 362, 394, 428, 436, 460], "summari": [483, 497], "support": [472, 474, 475, 477, 478, 481, 483, 489, 491, 492, 495, 497, 520, 521, 523, 525, 526, 528, 530, 533, 534, 537, 539, 540, 541, 542, 543, 544, 546, 547, 549, 552, 553], "switch_optim": [70, 342], "symbolic_trac": 144, "symmetr": 488, "system": 534, "templat": [281, 491], "tensor": [488, 552], "tensorflow": [32, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 479, 481, 482, 488, 494, 513, 514, 515, 526, 528, 534, 537, 546, 548, 553, 555], "tensorflow_dataload": 208, "tensorflow_model": 243, "tensorflow_pars": [252, 253], "tensorflow_profil": [259, 260, 261], "teq": [432, 477], "tf2onnx": 458, "tf2onnx_convert": 127, "tf2onnx_util": 90, "tf_criteria": 191, "tf_util": [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133], "through": [480, 534, 552, 555], "throughput": 483, "todo": 522, "token": 224, "topic": 470, "torch": [392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 478, 494, 500, 534, 555], "torch2onnx": 459, "torch_load": 140, "torch_model": 244, "torch_util": [134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145], "tpc": 534, "tpe": [199, 554], "trademark": 535, "train": [449, 481, 488, 492, 519, 526, 538, 544, 546], "transform": [221, 222, 223, 224, 225, 450, 451, 452, 489, 532, 553], "transform_graph": [128, 129, 130, 131, 132, 380, 381, 382, 383, 384], "tune": [479, 480, 488, 497, 546, 549, 552, 554], "tune_cfg": 496, "tuning_param": 156, "tuning_sampl": 277, "tuning_spac": 278, "tuning_struct": 279, "turn": 548, "two": 534, "type": [497, 522, 544], "unary_op": 27, "us": [480, 483, 494, 497, 523, 534, 537, 552], "usag": [472, 475, 476, 477, 480, 483, 489, 552, 554], "user": [470, 526, 532, 549], "util": [1, 30, 133, 141, 145, 157, 158, 159, 160, 161, 166, 192, 194, 257, 261, 275, 276, 277, 278, 279, 280, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 393, 398, 404, 409, 413, 417, 433, 443, 444, 445, 446, 447, 448, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 500], "v": 522, "v1": 532, "valid": [475, 534, 552, 555], "vendor": 534, "version": [468, 486], "vulner": 493, "wanda": [193, 194], "weight": [477, 488, 494, 549], "weight_correct": 150, "weight_onli": [31, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433], "weight_slim": 174, "weights_detail": 467, "what": 494, "wise": [477, 547], "without": 479, "woq": 549, "work": [482, 495, 546], "workflow": [485, 524], "x": [470, 538], "xe": 534, "yaml": [496, 526, 530]}}) \ No newline at end of file